text stringlengths 1 1.05M |
|---|
default rel
section .text
global _0x0ff0_lddqu
global _0x0ff1_psllw
global _0x0ff2_pslld
global _0x0ff3_psllq
global _0x0ff4_pmuludq
global _0x0ff5_pmaddwd
global _0x0ff6_psadbw
global _0x0ff7_maskmov
global _0x0ff8_psubb
global _0x0ff9_psubw
global _0x0ffa_psubd
global _0x0ffb_psubq
global _0x0ffc_paddb
global _0x0ffd_paddw
global _0x0ffe_paddd
global _0x0fff
%include "extern_for_inst.asm"
_0x0ff0_lddqu:
_0x0ff1_psllw:
_0x0ff2_pslld:
_0x0ff3_psllq:
_0x0ff4_pmuludq:
_0x0ff5_pmaddwd:
_0x0ff6_psadbw:
_0x0ff7_maskmov:
_0x0ff8_psubb:
_0x0ff9_psubw:
_0x0ffa_psubd:
_0x0ffb_psubq:
_0x0ffc_paddb:
_0x0ffd_paddw:
_0x0ffe_paddd:
_0x0fff:
db 0xeb,0xfe
|
SECTION .text
GLOBAL square_p384
square_p384:
sub rsp, 0x4a8 ; last 0x30 (6) for Caller - save regs
mov [ rsp + 0x478 ], rbx; saving to stack
mov [ rsp + 0x480 ], rbp; saving to stack
mov [ rsp + 0x488 ], r12; saving to stack
mov [ rsp + 0x490 ], r13; saving to stack
mov [ rsp + 0x498 ], r14; saving to stack
mov [ rsp + 0x4a0 ], r15; saving to stack
mov rax, [ rsi + 0x0 ]; load m64 x6 to register64
mov rdx, rax; x6 to rdx
mulx rax, r10, [ rsi + 0x0 ]; x18, x17<- x6 * arg1[0]
mov r11, 0x100000001 ; moving imm to reg
xchg rdx, r11; 0x100000001, swapping with x6, which is currently in rdx
mulx rbx, rbp, r10; _, x30<- x17 * 0x100000001
xchg rdx, r11; x6, swapping with 0x100000001, which is currently in rdx
mulx rbx, r12, [ rsi + 0x10 ]; x14, x13<- x6 * arg1[2]
mulx r13, r14, [ rsi + 0x8 ]; x16, x15<- x6 * arg1[1]
mov r15, [ rsi + 0x8 ]; load m64 x1 to register64
mov rcx, 0xffffffff ; moving imm to reg
xchg rdx, rcx; 0xffffffff, swapping with x6, which is currently in rdx
mulx r8, r9, rbp; x43, x42<- x30 * 0xffffffff
add r14, rax; could be done better, if r0 has been u8 as well
mov rax, 0xffffffff00000000 ; moving imm to reg
xchg rdx, rax; 0xffffffff00000000, swapping with 0xffffffff, which is currently in rdx
mulx r11, rax, rbp; x41, x40<- x30 * 0xffffffff00000000
xchg rdx, r15; x1, swapping with 0xffffffff00000000, which is currently in rdx
mov [ rsp + 0x0 ], rdi; spilling out1 to mem
mulx r15, rdi, [ rsi + 0x8 ]; x78, x77<- x1 * arg1[1]
mov [ rsp + 0x8 ], r15; spilling x78 to mem
mov r15, -0x2 ; moving imm to reg
inc r15; OF<-0x0, preserve CF (debug: 6; load -2, increase it, save as -1)
adox rax, r8
mulx r8, r15, [ rsi + 0x0 ]; x80, x79<- x1 * arg1[0]
mov [ rsp + 0x10 ], rbx; spilling x14 to mem
setc bl; spill CF x20 to reg (rbx)
clc;
adcx r9, r10
adcx rax, r14
seto r9b; spill OF x45 to reg (r9)
mov r10, -0x2 ; moving imm to reg
inc r10; OF<-0x0, preserve CF (debug: 6; load -2, increase it, save as -1)
adox rdi, r8
setc r14b; spill CF x58 to reg (r14)
clc;
adcx r15, rax
seto r8b; spill OF x82 to reg (r8)
inc r10; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov rax, -0x1 ; moving imm to reg
movzx rbx, bl
adox rbx, rax; loading flag
adox r13, r12
mov r12, [ rsi + 0x10 ]; load m64 x2 to register64
mulx rbx, r10, [ rsi + 0x10 ]; x76, x75<- x1 * arg1[2]
mov rax, 0xfffffffffffffffe ; moving imm to reg
xchg rdx, rbp; x30, swapping with x1, which is currently in rdx
mov [ rsp + 0x18 ], rbx; spilling x76 to mem
mov [ rsp + 0x20 ], r10; spilling x75 to mem
mulx rbx, r10, rax; x39, x38<- x30 * 0xfffffffffffffffe
mov rax, rdx; preserving value of x30 into a new reg
mov rdx, [ rsi + 0x0 ]; saving arg1[0] in rdx.
mov byte [ rsp + 0x28 ], r8b; spilling byte x82 to mem
mov [ rsp + 0x30 ], rbx; spilling x39 to mem
mulx r8, rbx, r12; x157, x156<- x2 * arg1[0]
mov rdx, 0x100000001 ; moving imm to reg
mov [ rsp + 0x38 ], r8; spilling x157 to mem
mov [ rsp + 0x40 ], rcx; spilling x6 to mem
mulx r8, rcx, r15; _, x106<- x92 * 0x100000001
setc r8b; spill CF x93 to reg (r8)
clc;
mov rdx, -0x1 ; moving imm to reg
movzx r9, r9b
adcx r9, rdx; loading flag
adcx r11, r10
mov r9, 0xffffffff00000000 ; moving imm to reg
mov rdx, rcx; x106 to rdx
mulx rcx, r10, r9; x117, x116<- x106 * 0xffffffff00000000
mov r9, 0xffffffff ; moving imm to reg
mov [ rsp + 0x48 ], rcx; spilling x117 to mem
mov [ rsp + 0x50 ], rbx; spilling x156 to mem
mulx rcx, rbx, r9; x119, x118<- x106 * 0xffffffff
setc r9b; spill CF x47 to reg (r9)
clc;
adcx r10, rcx
mov rcx, 0xffffffffffffffff ; moving imm to reg
xchg rdx, rcx; 0xffffffffffffffff, swapping with x106, which is currently in rdx
mov byte [ rsp + 0x58 ], r9b; spilling byte x47 to mem
mov [ rsp + 0x60 ], r10; spilling x120 to mem
mulx r9, r10, rax; x37, x36<- x30 * 0xffffffffffffffff
seto dl; spill OF x22 to reg (rdx)
mov [ rsp + 0x68 ], r9; spilling x37 to mem
mov r9, 0x0 ; moving imm to reg
dec r9; OF<-0x0, preserve CF (debug: state 4 (thanks Paul))
movzx r14, r14b
adox r14, r9; loading flag
adox r13, r11
setc r14b; spill CF x121 to reg (r14)
clc;
movzx r8, r8b
adcx r8, r9; loading flag
adcx r13, rdi
setc dil; spill CF x95 to reg (rdi)
clc;
adcx rbx, r15
mov rbx, [ rsp + 0x60 ]; x133, copying x120 here, cause x120 is needed in a reg for other than x133, namely all: , x133--x134, size: 1
adcx rbx, r13
setc r15b; spill CF x134 to reg (r15)
clc;
adcx rbx, [ rsp + 0x50 ]
mov r8b, dl; preserving value of x22 into a new reg
mov rdx, [ rsi + 0x18 ]; saving arg1[3] in rdx.
mulx r11, r13, [ rsp + 0x40 ]; x12, x11<- x6 * arg1[3]
mov rdx, [ rsi + 0x8 ]; arg1[1] to rdx
mov [ rsp + 0x70 ], r11; spilling x12 to mem
mulx r9, r11, r12; x155, x154<- x2 * arg1[1]
mov rdx, 0x100000001 ; moving imm to reg
mov [ rsp + 0x78 ], r9; spilling x155 to mem
mov byte [ rsp + 0x80 ], r15b; spilling byte x134 to mem
mulx r9, r15, rbx; _, x183<- x169 * 0x100000001
mov r9, 0xffffffff ; moving imm to reg
xchg rdx, r15; x183, swapping with 0x100000001, which is currently in rdx
mov [ rsp + 0x88 ], r11; spilling x154 to mem
mulx r15, r11, r9; x196, x195<- x183 * 0xffffffff
setc r9b; spill CF x170 to reg (r9)
mov [ rsp + 0x90 ], r15; spilling x196 to mem
movzx r15, byte [ rsp + 0x58 ]; load byte memx47 to register64
clc;
mov byte [ rsp + 0x98 ], r14b; spilling byte x121 to mem
mov r14, -0x1 ; moving imm to reg
adcx r15, r14; loading flag
adcx r10, [ rsp + 0x30 ]
setc r15b; spill CF x49 to reg (r15)
clc;
movzx r8, r8b
adcx r8, r14; loading flag
adcx r13, [ rsp + 0x10 ]
mov r8, [ rsp + 0x20 ]; load m64 x75 to register64
setc r14b; spill CF x24 to reg (r14)
mov byte [ rsp + 0xa0 ], r15b; spilling byte x49 to mem
movzx r15, byte [ rsp + 0x28 ]; load byte memx82 to register64
clc;
mov byte [ rsp + 0xa8 ], r9b; spilling byte x170 to mem
mov r9, -0x1 ; moving imm to reg
adcx r15, r9; loading flag
adcx r8, [ rsp + 0x8 ]
adox r10, r13
seto r15b; spill OF x62 to reg (r15)
inc r9; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
adox r11, rbx
mov r11, [ rsi + 0x18 ]; load m64 x3 to register64
setc bl; spill CF x84 to reg (rbx)
clc;
mov r13, -0x1 ; moving imm to reg
movzx rdi, dil
adcx rdi, r13; loading flag
adcx r10, r8
mov rdi, 0xfffffffffffffffe ; moving imm to reg
xchg rdx, rdi; 0xfffffffffffffffe, swapping with x183, which is currently in rdx
mulx r8, r9, rcx; x115, x114<- x106 * 0xfffffffffffffffe
setc r13b; spill CF x97 to reg (r13)
movzx rdx, byte [ rsp + 0x98 ]; load byte memx121 to register64
clc;
mov byte [ rsp + 0xb0 ], r15b; spilling byte x62 to mem
mov r15, -0x1 ; moving imm to reg
adcx rdx, r15; loading flag
adcx r9, [ rsp + 0x48 ]
mov rdx, [ rsi + 0x0 ]; arg1[0] to rdx
mov byte [ rsp + 0xb8 ], r13b; spilling byte x97 to mem
mulx r15, r13, r11; x234, x233<- x3 * arg1[0]
mov rdx, [ rsp + 0x88 ]; load m64 x154 to register64
mov byte [ rsp + 0xc0 ], r14b; spilling byte x24 to mem
seto r14b; spill OF x209 to reg (r14)
mov byte [ rsp + 0xc8 ], bl; spilling byte x84 to mem
mov rbx, -0x2 ; moving imm to reg
inc rbx; OF<-0x0, preserve CF (debug: 6; load -2, increase it, save as -1)
adox rdx, [ rsp + 0x38 ]
setc bl; spill CF x123 to reg (rbx)
mov [ rsp + 0xd0 ], r8; spilling x115 to mem
movzx r8, byte [ rsp + 0x80 ]; load byte memx134 to register64
clc;
mov [ rsp + 0xd8 ], r15; spilling x234 to mem
mov r15, -0x1 ; moving imm to reg
adcx r8, r15; loading flag
adcx r10, r9
mov r8, 0xffffffff00000000 ; moving imm to reg
xchg rdx, r8; 0xffffffff00000000, swapping with x158, which is currently in rdx
mulx r9, r15, rdi; x194, x193<- x183 * 0xffffffff00000000
setc dl; spill CF x136 to reg (rdx)
mov byte [ rsp + 0xe0 ], bl; spilling byte x123 to mem
movzx rbx, byte [ rsp + 0xa8 ]; load byte memx170 to register64
clc;
mov [ rsp + 0xe8 ], rbp; spilling x1 to mem
mov rbp, -0x1 ; moving imm to reg
adcx rbx, rbp; loading flag
adcx r10, r8
setc bl; spill CF x172 to reg (rbx)
clc;
adcx r15, [ rsp + 0x90 ]
setc r8b; spill CF x198 to reg (r8)
clc;
movzx r14, r14b
adcx r14, rbp; loading flag
adcx r10, r15
setc r14b; spill CF x211 to reg (r14)
clc;
adcx r13, r10
mov r15, 0x100000001 ; moving imm to reg
xchg rdx, r13; x246, swapping with x136, which is currently in rdx
mulx r10, rbp, r15; _, x260<- x246 * 0x100000001
mov r10, 0xfffffffffffffffe ; moving imm to reg
xchg rdx, rbp; x260, swapping with x246, which is currently in rdx
mov byte [ rsp + 0xf0 ], r14b; spilling byte x211 to mem
mulx r15, r14, r10; x269, x268<- x260 * 0xfffffffffffffffe
mov r10, 0xffffffffffffffff ; moving imm to reg
mov byte [ rsp + 0xf8 ], bl; spilling byte x172 to mem
mov byte [ rsp + 0x100 ], r13b; spilling byte x136 to mem
mulx rbx, r13, r10; x263, x262<- x260 * 0xffffffffffffffff
mov [ rsp + 0x108 ], r9; spilling x194 to mem
mov byte [ rsp + 0x110 ], r8b; spilling byte x198 to mem
mulx r9, r8, r10; x267, x266<- x260 * 0xffffffffffffffff
mov r10, 0xffffffff ; moving imm to reg
mov [ rsp + 0x118 ], rbx; spilling x263 to mem
mov [ rsp + 0x120 ], r13; spilling x262 to mem
mulx rbx, r13, r10; x273, x272<- x260 * 0xffffffff
mov r10, 0xffffffffffffffff ; moving imm to reg
mov [ rsp + 0x128 ], r13; spilling x272 to mem
mov [ rsp + 0x130 ], r9; spilling x267 to mem
mulx r13, r9, r10; x265, x264<- x260 * 0xffffffffffffffff
mov r10, 0xffffffff00000000 ; moving imm to reg
mov [ rsp + 0x138 ], r13; spilling x265 to mem
mulx rdx, r13, r10; x271, x270<- x260 * 0xffffffff00000000
setc r10b; spill CF x247 to reg (r10)
clc;
adcx r13, rbx
adcx r14, rdx
mov rbx, [ rsi + 0x28 ]; load m64 x5 to register64
adcx r8, r15
mov r15, [ rsp + 0x130 ]; x280, copying x267 here, cause x267 is needed in a reg for other than x280, namely all: , x280--x281, size: 1
adcx r15, r9
mov r9, [ rsp + 0x120 ]; load m64 x262 to register64
mov rdx, [ rsp + 0x138 ]; x282, copying x265 here, cause x265 is needed in a reg for other than x282, namely all: , x282--x283, size: 1
adcx rdx, r9
mov r9, rdx; preserving value of x282 into a new reg
mov rdx, [ rsi + 0x28 ]; saving arg1[5] in rdx.
mov [ rsp + 0x140 ], r15; spilling x280 to mem
mov [ rsp + 0x148 ], r8; spilling x278 to mem
mulx r15, r8, rbx; x378, x377<- x5 * arg1[5]
mov rdx, [ rsi + 0x20 ]; arg1[4] to rdx
mov [ rsp + 0x150 ], r9; spilling x282 to mem
mov [ rsp + 0x158 ], r14; spilling x276 to mem
mulx r9, r14, r11; x226, x225<- x3 * arg1[4]
mov rdx, rbx; x5 to rdx
mov [ rsp + 0x160 ], r13; spilling x274 to mem
mulx rbx, r13, [ rsi + 0x0 ]; x388, x387<- x5 * arg1[0]
mov [ rsp + 0x168 ], r13; spilling x387 to mem
mov byte [ rsp + 0x170 ], r10b; spilling byte x247 to mem
mulx r13, r10, [ rsi + 0x18 ]; x382, x381<- x5 * arg1[3]
mov [ rsp + 0x178 ], r9; spilling x226 to mem
mov r9, rdx; preserving value of x5 into a new reg
mov rdx, [ rsi + 0x20 ]; saving arg1[4] in rdx.
mov [ rsp + 0x180 ], r14; spilling x225 to mem
mov [ rsp + 0x188 ], rax; spilling x30 to mem
mulx r14, rax, r12; x149, x148<- x2 * arg1[4]
mov rdx, [ rsi + 0x18 ]; arg1[3] to rdx
mov [ rsp + 0x190 ], r14; spilling x149 to mem
mov [ rsp + 0x198 ], r15; spilling x378 to mem
mulx r14, r15, r12; x151, x150<- x2 * arg1[3]
mov rdx, [ rsi + 0x8 ]; arg1[1] to rdx
mov [ rsp + 0x1a0 ], r8; spilling x377 to mem
mov [ rsp + 0x1a8 ], rax; spilling x148 to mem
mulx r8, rax, r9; x386, x385<- x5 * arg1[1]
mov rdx, [ rsi + 0x10 ]; arg1[2] to rdx
mov [ rsp + 0x1b0 ], r14; spilling x151 to mem
mov [ rsp + 0x1b8 ], r13; spilling x382 to mem
mulx r14, r13, r12; x153, x152<- x2 * arg1[2]
mov rdx, r9; x5 to rdx
mov [ rsp + 0x1c0 ], r15; spilling x150 to mem
mulx r9, r15, [ rsi + 0x10 ]; x384, x383<- x5 * arg1[2]
mov [ rsp + 0x1c8 ], r14; spilling x153 to mem
setc r14b; spill CF x283 to reg (r14)
clc;
adcx rax, rbx
adcx r15, r8
adcx r10, r9
movzx rbx, r14b; x284, copying x283 here, cause x283 is needed in a reg for other than x284, namely all: , x284, size: 1
mov r8, [ rsp + 0x118 ]; load m64 x263 to register64
lea rbx, [ rbx + r8 ]; r8/64 + m8
mulx rdx, r8, [ rsi + 0x20 ]; x380, x379<- x5 * arg1[4]
mov r14, rdx; preserving value of x380 into a new reg
mov rdx, [ rsi + 0x28 ]; saving arg1[5] in rdx.
mulx r12, r9, r12; x147, x146<- x2 * arg1[5]
mov rdx, [ rsp + 0x78 ]; x160, copying x155 here, cause x155 is needed in a reg for other than x160, namely all: , x160--x161, size: 1
adox rdx, r13
mov r13, [ rsp + 0x1c8 ]; load m64 x153 to register64
mov [ rsp + 0x1d0 ], r10; spilling x393 to mem
mov r10, [ rsp + 0x1c0 ]; x162, copying x150 here, cause x150 is needed in a reg for other than x162, namely all: , x162--x163, size: 1
adox r10, r13
mov r13, [ rsp + 0x1b8 ]; x395, copying x382 here, cause x382 is needed in a reg for other than x395, namely all: , x395--x396, size: 1
adcx r13, r8
mov r8, rdx; preserving value of x160 into a new reg
mov rdx, [ rsi + 0x8 ]; saving arg1[1] in rdx.
mov [ rsp + 0x1d8 ], r13; spilling x395 to mem
mov [ rsp + 0x1e0 ], rbx; spilling x284 to mem
mulx r13, rbx, r11; x232, x231<- x3 * arg1[1]
mov rdx, [ rsp + 0x1a8 ]; load m64 x148 to register64
mov [ rsp + 0x1e8 ], r15; spilling x391 to mem
mov r15, [ rsp + 0x1b0 ]; x164, copying x151 here, cause x151 is needed in a reg for other than x164, namely all: , x164--x165, size: 1
adox r15, rdx
mov rdx, [ rsp + 0x1a0 ]; x397, copying x377 here, cause x377 is needed in a reg for other than x397, namely all: , x397--x398, size: 1
adcx rdx, r14
mov r14, rdx; preserving value of x397 into a new reg
mov rdx, [ rsi + 0x10 ]; saving arg1[2] in rdx.
mov [ rsp + 0x1f0 ], rax; spilling x389 to mem
mov [ rsp + 0x1f8 ], r15; spilling x164 to mem
mulx rax, r15, r11; x230, x229<- x3 * arg1[2]
mov rdx, [ rsp + 0x198 ]; x399, copying x378 here, cause x378 is needed in a reg for other than x399, namely all: , x399, size: 1
mov [ rsp + 0x200 ], r14; spilling x397 to mem
mov r14, 0x0 ; moving imm to reg
adcx rdx, r14
mov r14, 0xffffffffffffffff ; moving imm to reg
xchg rdx, rcx; x106, swapping with x399, which is currently in rdx
mov [ rsp + 0x208 ], rcx; spilling x399 to mem
mov [ rsp + 0x210 ], r10; spilling x162 to mem
mulx rcx, r10, r14; x113, x112<- x106 * 0xffffffffffffffff
xchg rdx, r11; x3, swapping with x106, which is currently in rdx
mov [ rsp + 0x218 ], rcx; spilling x113 to mem
mulx r14, rcx, [ rsi + 0x28 ]; x224, x223<- x3 * arg1[5]
mov [ rsp + 0x220 ], r12; spilling x147 to mem
mov r12, 0xffffffffffffffff ; moving imm to reg
xchg rdx, r12; 0xffffffffffffffff, swapping with x3, which is currently in rdx
mov [ rsp + 0x228 ], r8; spilling x160 to mem
mov [ rsp + 0x230 ], r10; spilling x112 to mem
mulx r8, r10, [ rsp + 0x188 ]; x35, x34<- x30 * 0xffffffffffffffff
clc;
adcx rbx, [ rsp + 0xd8 ]
adcx r15, r13
xchg rdx, r12; x3, swapping with 0xffffffffffffffff, which is currently in rdx
mulx rdx, r13, [ rsi + 0x18 ]; x228, x227<- x3 * arg1[3]
adcx r13, rax
mov rax, [ rsp + 0x190 ]; x166, copying x149 here, cause x149 is needed in a reg for other than x166, namely all: , x166--x167, size: 1
adox rax, r9
seto r9b; spill OF x167 to reg (r9)
mov r12, -0x2 ; moving imm to reg
inc r12; OF<-0x0, preserve CF (debug: 6; load -2, increase it, save as -1)
adox rbp, [ rsp + 0x128 ]
mov rbp, [ rsp + 0x180 ]; x241, copying x225 here, cause x225 is needed in a reg for other than x241, namely all: , x241--x242, size: 1
adcx rbp, rdx
mov rdx, [ rsi + 0x20 ]; arg1[4] to rdx
mov [ rsp + 0x238 ], rbp; spilling x241 to mem
mulx r12, rbp, [ rsp + 0x40 ]; x10, x9<- x6 * arg1[4]
mov rdx, 0xfffffffffffffffe ; moving imm to reg
mov [ rsp + 0x240 ], rax; spilling x166 to mem
mov [ rsp + 0x248 ], r13; spilling x239 to mem
mulx rax, r13, rdi; x192, x191<- x183 * 0xfffffffffffffffe
mov rdx, [ rsp + 0x178 ]; x243, copying x226 here, cause x226 is needed in a reg for other than x243, namely all: , x243--x244, size: 1
adcx rdx, rcx
seto cl; spill OF x286 to reg (rcx)
mov [ rsp + 0x250 ], rdx; spilling x243 to mem
movzx rdx, byte [ rsp + 0x110 ]; load byte memx198 to register64
mov [ rsp + 0x258 ], r15; spilling x237 to mem
mov r15, -0x1 ; moving imm to reg
inc r15; OF<-0x0, preserve CF (debug: state 5 (thanks Paul))
mov r15, -0x1 ; moving imm to reg
adox rdx, r15; loading flag
adox r13, [ rsp + 0x108 ]
mov rdx, [ rsi + 0x18 ]; arg1[3] to rdx
mov [ rsp + 0x260 ], rax; spilling x192 to mem
mulx r15, rax, [ rsp + 0xe8 ]; x74, x73<- x1 * arg1[3]
mov rdx, [ rsi + 0x20 ]; load m64 x4 to register64
mov byte [ rsp + 0x268 ], cl; spilling byte x286 to mem
mov rcx, 0x0 ; moving imm to reg
adcx r14, rcx
mov rcx, [ rsp + 0xd0 ]; load m64 x115 to register64
mov [ rsp + 0x270 ], r14; spilling x245 to mem
movzx r14, byte [ rsp + 0xe0 ]; load byte memx123 to register64
clc;
mov [ rsp + 0x278 ], r12; spilling x10 to mem
mov r12, -0x1 ; moving imm to reg
adcx r14, r12; loading flag
adcx rcx, [ rsp + 0x230 ]
seto r14b; spill OF x200 to reg (r14)
movzx r12, byte [ rsp + 0xa0 ]; load byte memx49 to register64
mov [ rsp + 0x280 ], rdx; spilling x4 to mem
mov rdx, -0x1 ; moving imm to reg
inc rdx; OF<-0x0, preserve CF (debug: state 5 (thanks Paul))
mov rdx, -0x1 ; moving imm to reg
adox r12, rdx; loading flag
adox r10, [ rsp + 0x68 ]
setc r12b; spill CF x125 to reg (r12)
movzx rdx, byte [ rsp + 0xc8 ]; load byte memx84 to register64
clc;
mov byte [ rsp + 0x288 ], r14b; spilling byte x200 to mem
mov r14, -0x1 ; moving imm to reg
adcx rdx, r14; loading flag
adcx rax, [ rsp + 0x18 ]
setc dl; spill CF x86 to reg (rdx)
movzx r14, byte [ rsp + 0xc0 ]; load byte memx24 to register64
clc;
mov byte [ rsp + 0x290 ], r12b; spilling byte x125 to mem
mov r12, -0x1 ; moving imm to reg
adcx r14, r12; loading flag
adcx rbp, [ rsp + 0x70 ]
setc r14b; spill CF x26 to reg (r14)
movzx r12, byte [ rsp + 0xb0 ]; load byte memx62 to register64
clc;
mov [ rsp + 0x298 ], rbx; spilling x235 to mem
mov rbx, -0x1 ; moving imm to reg
adcx r12, rbx; loading flag
adcx rbp, r10
mov r12b, dl; preserving value of x86 into a new reg
mov rdx, [ rsp + 0x40 ]; saving x6 in rdx.
mulx rdx, r10, [ rsi + 0x28 ]; x8, x7<- x6 * arg1[5]
setc bl; spill CF x64 to reg (rbx)
mov [ rsp + 0x2a0 ], rdx; spilling x8 to mem
movzx rdx, byte [ rsp + 0xb8 ]; load byte memx97 to register64
clc;
mov [ rsp + 0x2a8 ], r10; spilling x7 to mem
mov r10, -0x1 ; moving imm to reg
adcx rdx, r10; loading flag
adcx rbp, rax
mov rdx, 0xffffffffffffffff ; moving imm to reg
mulx rax, r10, [ rsp + 0x188 ]; x33, x32<- x30 * 0xffffffffffffffff
setc dl; spill CF x99 to reg (rdx)
mov [ rsp + 0x2b0 ], rax; spilling x33 to mem
movzx rax, byte [ rsp + 0x100 ]; load byte memx136 to register64
clc;
mov byte [ rsp + 0x2b8 ], bl; spilling byte x64 to mem
mov rbx, -0x1 ; moving imm to reg
adcx rax, rbx; loading flag
adcx rbp, rcx
setc al; spill CF x138 to reg (rax)
movzx rcx, byte [ rsp + 0xf8 ]; load byte memx172 to register64
clc;
adcx rcx, rbx; loading flag
adcx rbp, [ rsp + 0x228 ]
adox r10, r8
setc cl; spill CF x174 to reg (rcx)
movzx r8, byte [ rsp + 0xf0 ]; load byte memx211 to register64
clc;
adcx r8, rbx; loading flag
adcx rbp, r13
movzx r8, r9b; x168, copying x167 here, cause x167 is needed in a reg for other than x168, namely all: , x168, size: 1
mov r13, [ rsp + 0x220 ]; load m64 x147 to register64
lea r8, [ r8 + r13 ]; r8/64 + m8
mov r13b, dl; preserving value of x99 into a new reg
mov rdx, [ rsi + 0x20 ]; saving arg1[4] in rdx.
mulx r9, rbx, [ rsp + 0xe8 ]; x72, x71<- x1 * arg1[4]
mov rdx, 0xffffffffffffffff ; moving imm to reg
mov [ rsp + 0x2c0 ], r8; spilling x168 to mem
mov [ rsp + 0x2c8 ], r9; spilling x72 to mem
mulx r8, r9, r11; x111, x110<- x106 * 0xffffffffffffffff
setc dl; spill CF x213 to reg (rdx)
clc;
mov [ rsp + 0x2d0 ], r8; spilling x111 to mem
mov r8, -0x1 ; moving imm to reg
movzx r12, r12b
adcx r12, r8; loading flag
adcx r15, rbx
setc r12b; spill CF x88 to reg (r12)
movzx rbx, byte [ rsp + 0x170 ]; load byte memx247 to register64
clc;
adcx rbx, r8; loading flag
adcx rbp, [ rsp + 0x298 ]
mov bl, dl; preserving value of x213 into a new reg
mov rdx, [ rsp + 0x280 ]; saving x4 in rdx.
mov byte [ rsp + 0x2d8 ], r12b; spilling byte x88 to mem
mulx r8, r12, [ rsi + 0x8 ]; x309, x308<- x4 * arg1[1]
mov [ rsp + 0x2e0 ], r8; spilling x309 to mem
mov r8, [ rsp + 0x2a8 ]; load m64 x7 to register64
mov byte [ rsp + 0x2e8 ], bl; spilling byte x213 to mem
setc bl; spill CF x249 to reg (rbx)
clc;
mov [ rsp + 0x2f0 ], r12; spilling x308 to mem
mov r12, -0x1 ; moving imm to reg
movzx r14, r14b
adcx r14, r12; loading flag
adcx r8, [ rsp + 0x278 ]
setc r14b; spill CF x28 to reg (r14)
movzx r12, byte [ rsp + 0x2b8 ]; load byte memx64 to register64
clc;
mov byte [ rsp + 0x2f8 ], bl; spilling byte x249 to mem
mov rbx, -0x1 ; moving imm to reg
adcx r12, rbx; loading flag
adcx r8, r10
mulx r12, r10, [ rsi + 0x0 ]; x311, x310<- x4 * arg1[0]
setc bl; spill CF x66 to reg (rbx)
mov byte [ rsp + 0x300 ], r14b; spilling byte x28 to mem
movzx r14, byte [ rsp + 0x268 ]; load byte memx286 to register64
clc;
mov [ rsp + 0x308 ], r12; spilling x311 to mem
mov r12, -0x1 ; moving imm to reg
adcx r14, r12; loading flag
adcx rbp, [ rsp + 0x160 ]
seto r14b; spill OF x53 to reg (r14)
inc r12; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
adox r10, rbp
mov rbp, 0xffffffffffffffff ; moving imm to reg
xchg rdx, rdi; x183, swapping with x4, which is currently in rdx
mov byte [ rsp + 0x310 ], bl; spilling byte x66 to mem
mulx r12, rbx, rbp; x190, x189<- x183 * 0xffffffffffffffff
mov rbp, 0x100000001 ; moving imm to reg
xchg rdx, r10; x323, swapping with x183, which is currently in rdx
mov [ rsp + 0x318 ], r12; spilling x190 to mem
mov byte [ rsp + 0x320 ], r14b; spilling byte x53 to mem
mulx r12, r14, rbp; _, x337<- x323 * 0x100000001
mov r12, 0xffffffff00000000 ; moving imm to reg
xchg rdx, r12; 0xffffffff00000000, swapping with x323, which is currently in rdx
mov byte [ rsp + 0x328 ], cl; spilling byte x174 to mem
mulx rbp, rcx, r14; x348, x347<- x337 * 0xffffffff00000000
mov rdx, 0xffffffff ; moving imm to reg
mov [ rsp + 0x330 ], rbp; spilling x348 to mem
mov [ rsp + 0x338 ], rcx; spilling x347 to mem
mulx rbp, rcx, r14; x350, x349<- x337 * 0xffffffff
setc dl; spill CF x288 to reg (rdx)
clc;
adcx rcx, r12
setc cl; spill CF x363 to reg (rcx)
movzx r12, byte [ rsp + 0x290 ]; load byte memx125 to register64
clc;
mov byte [ rsp + 0x340 ], dl; spilling byte x288 to mem
mov rdx, -0x1 ; moving imm to reg
adcx r12, rdx; loading flag
adcx r9, [ rsp + 0x218 ]
setc r12b; spill CF x127 to reg (r12)
clc;
movzx r13, r13b
adcx r13, rdx; loading flag
adcx r8, r15
setc r13b; spill CF x101 to reg (r13)
movzx r15, byte [ rsp + 0x288 ]; load byte memx200 to register64
clc;
adcx r15, rdx; loading flag
adcx rbx, [ rsp + 0x260 ]
setc r15b; spill CF x202 to reg (r15)
clc;
movzx rax, al
adcx rax, rdx; loading flag
adcx r8, r9
seto al; spill OF x324 to reg (rax)
movzx r9, byte [ rsp + 0x328 ]; load byte memx174 to register64
inc rdx; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov rdx, -0x1 ; moving imm to reg
adox r9, rdx; loading flag
adox r8, [ rsp + 0x210 ]
mov r9, [ rsp + 0x308 ]; load m64 x311 to register64
setc dl; spill CF x140 to reg (rdx)
clc;
adcx r9, [ rsp + 0x2f0 ]
mov byte [ rsp + 0x348 ], dl; spilling byte x140 to mem
setc dl; spill CF x313 to reg (rdx)
clc;
adcx rbp, [ rsp + 0x338 ]
mov byte [ rsp + 0x350 ], r12b; spilling byte x127 to mem
seto r12b; spill OF x176 to reg (r12)
mov byte [ rsp + 0x358 ], r15b; spilling byte x202 to mem
movzx r15, byte [ rsp + 0x2e8 ]; load byte memx213 to register64
mov byte [ rsp + 0x360 ], r13b; spilling byte x101 to mem
mov r13, 0x0 ; moving imm to reg
dec r13; OF<-0x0, preserve CF (debug: state 4 (thanks Paul))
adox r15, r13; loading flag
adox r8, rbx
setc r15b; spill CF x352 to reg (r15)
movzx rbx, byte [ rsp + 0x2f8 ]; load byte memx249 to register64
clc;
adcx rbx, r13; loading flag
adcx r8, [ rsp + 0x258 ]
setc bl; spill CF x251 to reg (rbx)
movzx r13, byte [ rsp + 0x340 ]; load byte memx288 to register64
clc;
mov byte [ rsp + 0x368 ], r15b; spilling byte x352 to mem
mov r15, -0x1 ; moving imm to reg
adcx r13, r15; loading flag
adcx r8, [ rsp + 0x158 ]
seto r13b; spill OF x215 to reg (r13)
inc r15; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov r15, -0x1 ; moving imm to reg
movzx rax, al
adox rax, r15; loading flag
adox r8, r9
seto al; spill OF x326 to reg (rax)
inc r15; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov r9, -0x1 ; moving imm to reg
movzx rcx, cl
adox rcx, r9; loading flag
adox r8, rbp
setc cl; spill CF x290 to reg (rcx)
clc;
adcx r8, [ rsp + 0x168 ]
mov rbp, 0x100000001 ; moving imm to reg
xchg rdx, rbp; 0x100000001, swapping with x313, which is currently in rdx
mulx r15, r9, r8; _, x414<- x400 * 0x100000001
mov r15, 0xffffffff00000000 ; moving imm to reg
xchg rdx, r9; x414, swapping with 0x100000001, which is currently in rdx
mov byte [ rsp + 0x370 ], al; spilling byte x326 to mem
mulx r9, rax, r15; x425, x424<- x414 * 0xffffffff00000000
mov r15, 0xffffffff ; moving imm to reg
mov byte [ rsp + 0x378 ], cl; spilling byte x290 to mem
mov byte [ rsp + 0x380 ], bl; spilling byte x251 to mem
mulx rcx, rbx, r15; x427, x426<- x414 * 0xffffffff
mov r15, rdx; preserving value of x414 into a new reg
mov rdx, [ rsi + 0x20 ]; saving arg1[4] in rdx.
mov byte [ rsp + 0x388 ], r13b; spilling byte x215 to mem
mov [ rsp + 0x390 ], rbx; spilling x426 to mem
mulx r13, rbx, rdi; x303, x302<- x4 * arg1[4]
mov rdx, [ rsi + 0x10 ]; arg1[2] to rdx
mov byte [ rsp + 0x398 ], r12b; spilling byte x176 to mem
mov [ rsp + 0x3a0 ], r13; spilling x303 to mem
mulx r12, r13, rdi; x307, x306<- x4 * arg1[2]
seto dl; spill OF x365 to reg (rdx)
mov [ rsp + 0x3a8 ], rbx; spilling x302 to mem
mov rbx, -0x2 ; moving imm to reg
inc rbx; OF<-0x0, preserve CF (debug: 6; load -2, increase it, save as -1)
adox rax, rcx
movzx rcx, byte [ rsp + 0x320 ]; x54, copying x53 here, cause x53 is needed in a reg for other than x54, namely all: , x54, size: 1
mov rbx, [ rsp + 0x2b0 ]; load m64 x33 to register64
lea rcx, [ rcx + rbx ]; r8/64 + m8
mov rbx, 0xffffffffffffffff ; moving imm to reg
xchg rdx, r15; x414, swapping with x365, which is currently in rdx
mov [ rsp + 0x3b0 ], rax; spilling x428 to mem
mov byte [ rsp + 0x3b8 ], r15b; spilling byte x365 to mem
mulx rax, r15, rbx; x417, x416<- x414 * 0xffffffffffffffff
mov rbx, 0xfffffffffffffffe ; moving imm to reg
mov [ rsp + 0x3c0 ], rcx; spilling x54 to mem
mov [ rsp + 0x3c8 ], rax; spilling x417 to mem
mulx rcx, rax, rbx; x423, x422<- x414 * 0xfffffffffffffffe
mov rbx, rdx; preserving value of x414 into a new reg
mov rdx, [ rsi + 0x28 ]; saving arg1[5] in rdx.
mov [ rsp + 0x3d0 ], r12; spilling x307 to mem
mov [ rsp + 0x3d8 ], r15; spilling x416 to mem
mulx r12, r15, rdi; x301, x300<- x4 * arg1[5]
adox rax, r9
mov rdx, 0xffffffffffffffff ; moving imm to reg
mov [ rsp + 0x3e0 ], rax; spilling x430 to mem
mulx r9, rax, rbx; x419, x418<- x414 * 0xffffffffffffffff
mov [ rsp + 0x3e8 ], r12; spilling x301 to mem
mulx rbx, r12, rbx; x421, x420<- x414 * 0xffffffffffffffff
adox r12, rcx
xchg rdx, rdi; x4, swapping with 0xffffffffffffffff, which is currently in rdx
mulx rdx, rcx, [ rsi + 0x18 ]; x305, x304<- x4 * arg1[3]
adox rax, rbx
setc bl; spill CF x401 to reg (rbx)
clc;
mov rdi, -0x1 ; moving imm to reg
movzx rbp, bpl
adcx rbp, rdi; loading flag
adcx r13, [ rsp + 0x2e0 ]
mov rbp, [ rsp + 0x3d8 ]; x436, copying x416 here, cause x416 is needed in a reg for other than x436, namely all: , x436--x437, size: 1
adox rbp, r9
mov r9, rdx; preserving value of x305 into a new reg
mov rdx, [ rsi + 0x28 ]; saving arg1[5] in rdx.
mov [ rsp + 0x3f0 ], rbp; spilling x436 to mem
mulx rdi, rbp, [ rsp + 0xe8 ]; x70, x69<- x1 * arg1[5]
mov rdx, [ rsp + 0x3d0 ]; x316, copying x307 here, cause x307 is needed in a reg for other than x316, namely all: , x316--x317, size: 1
adcx rdx, rcx
mov rcx, 0xffffffffffffffff ; moving imm to reg
xchg rdx, r10; x183, swapping with x316, which is currently in rdx
mov [ rsp + 0x3f8 ], rax; spilling x434 to mem
mov [ rsp + 0x400 ], r12; spilling x432 to mem
mulx rax, r12, rcx; x186, x185<- x183 * 0xffffffffffffffff
movzx rcx, byte [ rsp + 0x300 ]; x29, copying x28 here, cause x28 is needed in a reg for other than x29, namely all: , x29, size: 1
mov [ rsp + 0x408 ], r10; spilling x316 to mem
mov r10, [ rsp + 0x2a0 ]; load m64 x8 to register64
lea rcx, [ rcx + r10 ]; r8/64 + m8
mov r10, [ rsp + 0x3a8 ]; x318, copying x302 here, cause x302 is needed in a reg for other than x318, namely all: , x318--x319, size: 1
adcx r10, r9
mov r9, [ rsp + 0x3a0 ]; x320, copying x303 here, cause x303 is needed in a reg for other than x320, namely all: , x320--x321, size: 1
adcx r9, r15
mov r15, [ rsp + 0x3c8 ]; x438, copying x417 here, cause x417 is needed in a reg for other than x438, namely all: , x438, size: 1
mov [ rsp + 0x410 ], r9; spilling x320 to mem
mov r9, 0x0 ; moving imm to reg
adox r15, r9
mov r9, [ rsp + 0x3e8 ]; x322, copying x301 here, cause x301 is needed in a reg for other than x322, namely all: , x322, size: 1
adc r9, 0x0
add byte [ rsp + 0x310 ], 0xFF; load flag from rm/8 into CF, clears other flag. NODE, if operand1 is not a byte reg, this fails.
setc [ rsp + 0x310 ]; since that has deps, resore it whereever it was
adcx rcx, [ rsp + 0x3c0 ]
mov [ rsp + 0x418 ], r15; spilling x438 to mem
mov r15, 0xffffffffffffffff ; moving imm to reg
mov [ rsp + 0x420 ], r9; spilling x322 to mem
mulx rdx, r9, r15; x188, x187<- x183 * 0xffffffffffffffff
xchg rdx, r11; x106, swapping with x188, which is currently in rdx
mov [ rsp + 0x428 ], r10; spilling x318 to mem
mulx rdx, r10, r15; x109, x108<- x106 * 0xffffffffffffffff
movzx r15, byte [ rsp + 0x2d8 ]; load byte memx88 to register64
mov [ rsp + 0x430 ], rax; spilling x186 to mem
mov rax, -0x1 ; moving imm to reg
adox r15, rax; loading flag
adox rbp, [ rsp + 0x2c8 ]
seto r15b; spill OF x90 to reg (r15)
movzx rax, byte [ rsp + 0x360 ]; load byte memx101 to register64
mov byte [ rsp + 0x438 ], bl; spilling byte x401 to mem
mov rbx, 0x0 ; moving imm to reg
dec rbx; OF<-0x0, preserve CF (debug: state 4 (thanks Paul))
adox rax, rbx; loading flag
adox rcx, rbp
setc al; spill CF x68 to reg (rax)
movzx rbp, byte [ rsp + 0x358 ]; load byte memx202 to register64
clc;
adcx rbp, rbx; loading flag
adcx r9, [ rsp + 0x318 ]
seto bpl; spill OF x103 to reg (rbp)
movzx rbx, byte [ rsp + 0x350 ]; load byte memx127 to register64
mov byte [ rsp + 0x440 ], al; spilling byte x68 to mem
mov rax, -0x1 ; moving imm to reg
inc rax; OF<-0x0, preserve CF (debug: state 5 (thanks Paul))
mov rax, -0x1 ; moving imm to reg
adox rbx, rax; loading flag
adox r10, [ rsp + 0x2d0 ]
setc bl; spill CF x204 to reg (rbx)
movzx rax, byte [ rsp + 0x348 ]; load byte memx140 to register64
clc;
mov byte [ rsp + 0x448 ], bpl; spilling byte x103 to mem
mov rbp, -0x1 ; moving imm to reg
adcx rax, rbp; loading flag
adcx rcx, r10
seto al; spill OF x129 to reg (rax)
movzx r10, byte [ rsp + 0x398 ]; load byte memx176 to register64
inc rbp; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov rbp, -0x1 ; moving imm to reg
adox r10, rbp; loading flag
adox rcx, [ rsp + 0x1f8 ]
setc r10b; spill CF x142 to reg (r10)
clc;
adcx r8, [ rsp + 0x390 ]
movzx r8, r15b; x91, copying x90 here, cause x90 is needed in a reg for other than x91, namely all: , x91, size: 1
lea r8, [ r8 + rdi ]
seto dil; spill OF x178 to reg (rdi)
inc rbp; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov r15, -0x1 ; moving imm to reg
movzx rbx, bl
adox rbx, r15; loading flag
adox r11, r12
seto r12b; spill OF x206 to reg (r12)
movzx rbx, byte [ rsp + 0x388 ]; load byte memx215 to register64
inc r15; OF<-0x0, preserve CF (debug: state 1(-0x1) (thanks Paul))
mov rbp, -0x1 ; moving imm to reg
adox rbx, rbp; loading flag
adox rcx, r9
movzx rbx, al; x130, copying x129 here, cause x129 is needed in a reg for other than x130, namely all: , x130, size: 1
lea rbx, [ rbx + rdx ]
setc dl; spill CF x440 to reg (rdx)
movzx r9, byte [ rsp + 0x380 ]; load byte memx251 to register64
clc;
adcx r9, rbp; loading flag
adcx rcx, [ rsp + 0x248 ]
setc r9b; spill CF x253 to reg (r9)
movzx rax, byte [ rsp + 0x378 ]; load byte memx290 to register64
clc;
adcx rax, rbp; loading flag
adcx rcx, [ rsp + 0x148 ]
setc al; spill CF x292 to reg (rax)
movzx r15, byte [ rsp + 0x370 ]; load byte memx326 to register64
clc;
adcx r15, rbp; loading flag
adcx rcx, r13
setc r15b; spill CF x328 to reg (r15)
movzx r13, byte [ rsp + 0x448 ]; load byte memx103 to register64
clc;
mov byte [ rsp + 0x450 ], dl; spilling byte x440 to mem
movzx rdx, byte [ rsp + 0x440 ]; load byte memx68 to register64
adcx r13, rbp; loading flag
adcx r8, rdx
mov rdx, 0xfffffffffffffffe ; moving imm to reg
mulx r13, rbp, r14; x346, x345<- x337 * 0xfffffffffffffffe
setc dl; spill CF x105 to reg (rdx)
clc;
mov [ rsp + 0x458 ], r13; spilling x346 to mem
mov r13, -0x1 ; moving imm to reg
movzx r10, r10b
adcx r10, r13; loading flag
adcx r8, rbx
movzx r10, dl; x145, copying x105 here, cause x105 is needed in a reg for other than x145, namely all: , x145, size: 1
mov rbx, 0x0 ; moving imm to reg
adcx r10, rbx
movzx rdx, byte [ rsp + 0x368 ]; load byte memx352 to register64
clc;
adcx rdx, r13; loading flag
adcx rbp, [ rsp + 0x330 ]
mov rdx, 0xffffffffffffffff ; moving imm to reg
mulx rbx, r13, r14; x344, x343<- x337 * 0xffffffffffffffff
seto dl; spill OF x217 to reg (rdx)
mov [ rsp + 0x460 ], rbx; spilling x344 to mem
mov rbx, 0x0 ; moving imm to reg
dec rbx; OF<-0x0, preserve CF (debug: state 4 (thanks Paul))
movzx rdi, dil
adox rdi, rbx; loading flag
adox r8, [ rsp + 0x240 ]
setc dil; spill CF x354 to reg (rdi)
movzx rbx, byte [ rsp + 0x3b8 ]; load byte memx365 to register64
clc;
mov [ rsp + 0x468 ], r10; spilling x145 to mem
mov r10, -0x1 ; moving imm to reg
adcx rbx, r10; loading flag
adcx rcx, rbp
setc bl; spill CF x367 to reg (rbx)
clc;
movzx rdx, dl
adcx rdx, r10; loading flag
adcx r8, r11
seto r11b; spill OF x180 to reg (r11)
inc r10; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov rdx, -0x1 ; moving imm to reg
movzx r9, r9b
adox r9, rdx; loading flag
adox r8, [ rsp + 0x238 ]
seto r9b; spill OF x255 to reg (r9)
inc rdx; OF<-0x0, preserve CF (debug: state 1(-0x1) (thanks Paul))
mov r10, -0x1 ; moving imm to reg
movzx rax, al
adox rax, r10; loading flag
adox r8, [ rsp + 0x140 ]
setc al; spill CF x219 to reg (rax)
movzx rbp, byte [ rsp + 0x438 ]; load byte memx401 to register64
clc;
adcx rbp, r10; loading flag
adcx rcx, [ rsp + 0x1f0 ]
movzx rbp, r12b; x207, copying x206 here, cause x206 is needed in a reg for other than x207, namely all: , x207, size: 1
mov rdx, [ rsp + 0x430 ]; load m64 x186 to register64
lea rbp, [ rbp + rdx ]; r8/64 + m8
setc dl; spill CF x403 to reg (rdx)
clc;
movzx r15, r15b
adcx r15, r10; loading flag
adcx r8, [ rsp + 0x408 ]
seto r12b; spill OF x294 to reg (r12)
inc r10; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov r15, -0x1 ; moving imm to reg
movzx rdi, dil
adox rdi, r15; loading flag
adox r13, [ rsp + 0x458 ]
setc dil; spill CF x330 to reg (rdi)
clc;
movzx rbx, bl
adcx rbx, r15; loading flag
adcx r8, r13
setc bl; spill CF x369 to reg (rbx)
clc;
movzx rdx, dl
adcx rdx, r15; loading flag
adcx r8, [ rsp + 0x1e8 ]
mov rdx, [ rsp + 0x2c0 ]; load m64 x168 to register64
setc r13b; spill CF x405 to reg (r13)
clc;
movzx r11, r11b
adcx r11, r15; loading flag
adcx rdx, [ rsp + 0x468 ]
setc r11b; spill CF x182 to reg (r11)
clc;
movzx rax, al
adcx rax, r15; loading flag
adcx rdx, rbp
setc al; spill CF x221 to reg (rax)
movzx rbp, byte [ rsp + 0x450 ]; load byte memx440 to register64
clc;
adcx rbp, r15; loading flag
adcx rcx, [ rsp + 0x3b0 ]
mov rbp, 0xffffffffffffffff ; moving imm to reg
xchg rdx, r14; x337, swapping with x220, which is currently in rdx
mulx r10, r15, rbp; x342, x341<- x337 * 0xffffffffffffffff
setc bpl; spill CF x442 to reg (rbp)
clc;
mov [ rsp + 0x470 ], rcx; spilling x441 to mem
mov rcx, -0x1 ; moving imm to reg
movzx r9, r9b
adcx r9, rcx; loading flag
adcx r14, [ rsp + 0x250 ]
movzx r9, al; x222, copying x221 here, cause x221 is needed in a reg for other than x222, namely all: , x222, size: 1
movzx r11, r11b
lea r9, [ r9 + r11 ]
mov r11, [ rsp + 0x460 ]; x357, copying x344 here, cause x344 is needed in a reg for other than x357, namely all: , x357--x358, size: 1
adox r11, r15
mov rax, [ rsp + 0x270 ]; x258, copying x245 here, cause x245 is needed in a reg for other than x258, namely all: , x258--x259, size: 1
adcx rax, r9
mov r15, 0xffffffffffffffff ; moving imm to reg
mulx rdx, r9, r15; x340, x339<- x337 * 0xffffffffffffffff
seto cl; spill OF x358 to reg (rcx)
mov r15, 0x0 ; moving imm to reg
dec r15; OF<-0x0, preserve CF (debug: state 4 (thanks Paul))
movzx r12, r12b
adox r12, r15; loading flag
adox r14, [ rsp + 0x150 ]
mov r12, [ rsp + 0x1e0 ]; x297, copying x284 here, cause x284 is needed in a reg for other than x297, namely all: , x297--x298, size: 1
adox r12, rax
seto al; spill OF x298 to reg (rax)
inc r15; OF<-0x0, preserve CF (debug: state 2 (y: -1, n: 0))
mov r15, -0x1 ; moving imm to reg
movzx rcx, cl
adox rcx, r15; loading flag
adox r10, r9
mov rcx, 0x0 ; moving imm to reg
adox rdx, rcx
movzx r9, al; x299, copying x298 here, cause x298 is needed in a reg for other than x299, namely all: , x299, size: 1
adc r9, 0x0
add dil, 0xFF; load flag from rm/8 into CF, clears other flag. NODE, if operand1 is not a byte reg, this fails.
setc dil; since that has deps, resore it whereever it was
adcx r14, [ rsp + 0x428 ]
mov rdi, [ rsp + 0x410 ]; x333, copying x320 here, cause x320 is needed in a reg for other than x333, namely all: , x333--x334, size: 1
adcx rdi, r12
movzx rbp, bpl
adox rbp, r15; loading flag
adox r8, [ rsp + 0x3e0 ]
mov rbp, [ rsp + 0x420 ]; x335, copying x322 here, cause x322 is needed in a reg for other than x335, namely all: , x335--x336, size: 1
adcx rbp, r9
seto al; spill OF x444 to reg (rax)
dec rcx; OF<-0x0, preserve CF (debug: state 1(0x0) (thanks Paul))
movzx rbx, bl
adox rbx, rcx; loading flag
adox r14, r11
adox r10, rdi
adox rdx, rbp
setc r15b; spill CF x336 to reg (r15)
clc;
movzx r13, r13b
adcx r13, rcx; loading flag
adcx r14, [ rsp + 0x1d0 ]
mov rbx, [ rsp + 0x1d8 ]; x408, copying x395 here, cause x395 is needed in a reg for other than x408, namely all: , x408--x409, size: 1
adcx rbx, r10
movzx r13, r15b; x376, copying x336 here, cause x336 is needed in a reg for other than x376, namely all: , x376, size: 1
mov r11, 0x0 ; moving imm to reg
adox r13, r11
dec r11; OF<-0x0, preserve CF (debug: state 1(0x0) (thanks Paul))
movzx rax, al
adox rax, r11; loading flag
adox r14, [ rsp + 0x400 ]
mov rcx, [ rsp + 0x200 ]; x410, copying x397 here, cause x397 is needed in a reg for other than x410, namely all: , x410--x411, size: 1
adcx rcx, rdx
mov r12, [ rsp + 0x3f8 ]; x447, copying x434 here, cause x434 is needed in a reg for other than x447, namely all: , x447--x448, size: 1
adox r12, rbx
mov r9, [ rsp + 0x208 ]; x412, copying x399 here, cause x399 is needed in a reg for other than x412, namely all: , x412--x413, size: 1
adcx r9, r13
mov rdi, [ rsp + 0x3f0 ]; x449, copying x436 here, cause x436 is needed in a reg for other than x449, namely all: , x449--x450, size: 1
adox rdi, rcx
mov rax, [ rsp + 0x418 ]; x451, copying x438 here, cause x438 is needed in a reg for other than x451, namely all: , x451--x452, size: 1
adox rax, r9
setc r15b; spill CF x413 to reg (r15)
seto bpl; spill OF x452 to reg (rbp)
mov r10, [ rsp + 0x470 ]; x454, copying x441 here, cause x441 is needed in a reg for other than x454, namely all: , x454--x455, x468, size: 2
mov rdx, 0xffffffff ; moving imm to reg
sub r10, rdx
mov rbx, r8; x456, copying x443 here, cause x443 is needed in a reg for other than x456, namely all: , x469, x456--x457, size: 2
mov r13, 0xffffffff00000000 ; moving imm to reg
sbb rbx, r13
mov rcx, r14; x458, copying x445 here, cause x445 is needed in a reg for other than x458, namely all: , x470, x458--x459, size: 2
mov r9, 0xfffffffffffffffe ; moving imm to reg
sbb rcx, r9
mov r11, r12; x460, copying x447 here, cause x447 is needed in a reg for other than x460, namely all: , x460--x461, x471, size: 2
mov rdx, 0xffffffffffffffff ; moving imm to reg
sbb r11, rdx
movzx r13, bpl; x453, copying x452 here, cause x452 is needed in a reg for other than x453, namely all: , x453, size: 1
movzx r15, r15b
lea r13, [ r13 + r15 ]
mov r15, rdi; x462, copying x449 here, cause x449 is needed in a reg for other than x462, namely all: , x472, x462--x463, size: 2
sbb r15, rdx
mov rbp, rax; x464, copying x451 here, cause x451 is needed in a reg for other than x464, namely all: , x473, x464--x465, size: 2
sbb rbp, rdx
sbb r13, 0x00000000
cmovc rbx, r8; if CF, x469<- x443 (nzVar)
cmovc rcx, r14; if CF, x470<- x445 (nzVar)
cmovc r10, [ rsp + 0x470 ]; if CF, x468<- x441 (nzVar)
cmovc r11, r12; if CF, x471<- x447 (nzVar)
mov r13, [ rsp + 0x0 ]; load m64 out1 to register64
mov [ r13 + 0x8 ], rbx; out1[1] = x469
mov [ r13 + 0x0 ], r10; out1[0] = x468
cmovc r15, rdi; if CF, x472<- x449 (nzVar)
mov [ r13 + 0x20 ], r15; out1[4] = x472
cmovc rbp, rax; if CF, x473<- x451 (nzVar)
mov [ r13 + 0x28 ], rbp; out1[5] = x473
mov [ r13 + 0x10 ], rcx; out1[2] = x470
mov [ r13 + 0x18 ], r11; out1[3] = x471
mov rbx, [ rsp + 0x478 ]; restoring from stack
mov rbp, [ rsp + 0x480 ]; restoring from stack
mov r12, [ rsp + 0x488 ]; restoring from stack
mov r13, [ rsp + 0x490 ]; restoring from stack
mov r14, [ rsp + 0x498 ]; restoring from stack
mov r15, [ rsp + 0x4a0 ]; restoring from stack
add rsp, 0x4a8
ret
; cpu AMD Ryzen 9 5950X 16-Core Processor
; clocked at 2200 MHz
; first cyclecount 235.11, best 188.7, lastGood 191.53333333333333
; seed 1292680342693278
; CC / CFLAGS clang / -march=native -mtune=native -O3
; time needed: 4730986 ms / 60000 runs=> 78.84976666666667ms/run
; Time spent for assembling and measureing (initial batch_size=62, initial num_batches=101): 146932 ms
; Ratio (time for assembling + measure)/(total runtime for 60000runs): 0.03105737366375635
; number reverted permutation/ tried permutation: 19603 / 29964 =65.422%
; number reverted decision/ tried decision: 18061 / 30037 =60.129% |
; A315767: Coordination sequence Gal.6.627.6 where G.u.t.v denotes the coordination sequence for a vertex of type v in tiling number t in the Galebach list of u-uniform tilings.
; Submitted by Jamie Morken(s1)
; 1,6,12,18,24,30,34,40,46,52,58,64,70,76,82,88,94,98,104,110,116,122,128,134,140,146,152,158,162,168,174,180,186,192,198,204,210,216,222,226,232,238,244,250,256,262,268,274,280,286
mul $0,16
mov $1,$0
mul $0,2
add $0,5
div $0,11
mul $1,2
sub $1,6
div $1,11
add $1,1
add $0,$1
|
; SBROM: 64KB PRG-ROM + (16,32,64)KB CHR-ROM
; http://bootgod.dyndns.org:7777/search.php?keywords=SBROM&kwtype=pcb
; Used for only three USA region games?
;------------------------------------------------------------------------------;
; number of 8K CHR banks
; Valid configurations: $02 (16K), $04 (32K), $08 (64K)
CHR_BANKS = $02
; MMC1 mirroring is mapper controlled. This just sets the default.
; If you want one-screen mirroring, you will need to set it via MMC1 writes.
; %0000 = Horizontal
; %0001 = Vertical
MIRRORING = %0001
; Mapper 001 (MMC1 - SBROM) iNES header
.byte "NES",$1A
.byte $04 ; 4x 16K PRG banks
.byte CHR_BANKS ; 8K CHR banks
.byte $10|MIRRORING ; flags 6
.byte $00 ; flags 7
.byte $00 ; no PRG RAM
.dsb 7, $00 ; clear the remaining bytes
|
version https://git-lfs.github.com/spec/v1
oid sha256:846653215e2f6ca0bc9500cfedb5360f76755a518318e4c5114a42f3d44069ba
size 5795
|
dnl Itanium-2 mpn_gcd_1 -- mpn by 1 gcd.
dnl Contributed to the GNU project by Kevin Ryde, innerloop by Torbjorn
dnl Granlund.
dnl Copyright 2002-2005, 2012, 2013 Free Software Foundation, Inc.
dnl This file is part of the GNU MP Library.
dnl
dnl The GNU MP Library is free software; you can redistribute it and/or modify
dnl it under the terms of either:
dnl
dnl * the GNU Lesser General Public License as published by the Free
dnl Software Foundation; either version 3 of the License, or (at your
dnl option) any later version.
dnl
dnl or
dnl
dnl * the GNU General Public License as published by the Free Software
dnl Foundation; either version 2 of the License, or (at your option) any
dnl later version.
dnl
dnl or both in parallel, as here.
dnl
dnl The GNU MP Library is distributed in the hope that it will be useful, but
dnl WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
dnl or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
dnl for more details.
dnl
dnl You should have received copies of the GNU General Public License and the
dnl GNU Lesser General Public License along with the GNU MP Library. If not,
dnl see https://www.gnu.org/licenses/.
include(`../config.m4')
C cycles/bitpair (1x1 gcd)
C Itanium: ?
C Itanium 2: 5.1
C mpn_gcd_1 (mp_srcptr xp, mp_size_t xsize, mp_limb_t y);
C
C The entry sequence is designed to expect xsize>1 and hence a modexact
C call. This ought to be more common than a 1x1 operation. Our critical
C path is thus stripping factors of 2 from y, calling modexact, then
C stripping factors of 2 from the x remainder returned.
C
C The common factors of 2 between x and y must be determined using the
C original x, not the remainder from the modexact. This is done with
C x_orig which is xp[0]. There's plenty of time to do this while the rest
C of the modexact etc is happening.
C
C It's possible xp[0] is zero. In this case the trailing zeros calculation
C popc((x-1)&~x) gives 63, and that's clearly no less than what y will
C have, making min(x_twos,y_twos) == y_twos.
C
C The main loop consists of transforming x,y to abs(x-y),min(x,y), and then
C stripping factors of 2 from abs(x-y). Those factors of two are
C determined from just y-x, without the abs(), since there's the same
C number of trailing zeros on n or -n in twos complement. That makes the
C dependent chain 8 cycles deep.
C
C The selection of x-y versus y-x for abs(x-y), and the selection of the
C minimum of x and y, is done in parallel with the critical path.
C
C The algorithm takes about 0.68 iterations per bit (two N bit operands) on
C average, hence the final 5.8 cycles/bitpair.
C
C Not done:
C
C An alternate algorithm which didn't strip all twos, but instead applied
C tbit and predicated extr on x, and then y, was attempted. The loop was 6
C cycles, but the algorithm is an average 1.25 iterations per bitpair for a
C total 7.25 c/bp, which is slower than the current approach.
C
C Alternatives:
C
C Perhaps we could do something tricky by extracting a few high bits and a
C few low bits from the operands, and looking up a table which would give a
C set of predicates to control some shifts or subtracts or whatever. That
C could knock off multiple bits per iteration.
C
C The right shifts are a bit of a bottleneck (shr at 2 or 3 cycles, or extr
C only going down I0), perhaps it'd be possible to shift left instead,
C using add. That would mean keeping track of the lowest not-yet-zeroed
C bit, using some sort of mask.
C
C TODO:
C * Once mod_1_N exists in assembly for Itanium, add conditional calls.
C * Call bmod_1 even for n=1 when up[0] >> v0 (like other gcd_1 impls).
C * Probably avoid popcnt also outside of loop, instead use ctz_table.
ASM_START()
.explicit C What does this mean?
C HP's assembler requires these declarations for importing mpn_modexact_1c_odd
.global mpn_modexact_1c_odd
.type mpn_modexact_1c_odd,@function
C ctz_table[n] is the number of trailing zeros on n, or MAXSHIFT if n==0.
deflit(MAXSHIFT, 7)
deflit(MASK, eval((m4_lshift(1,MAXSHIFT))-1))
.section ".rodata"
ALIGN(m4_lshift(1,MAXSHIFT)) C align table to allow using dep
ctz_table:
.byte MAXSHIFT
forloop(i,1,MASK,
` .byte m4_count_trailing_zeros(i)
')
PROLOGUE(mpn_gcd_1)
C r32 xp
C r33 xsize
C r34 y
define(x, r8)
define(xp_orig, r32)
define(xsize, r33)
define(y, r34) define(inputs, 3)
define(save_rp, r35)
define(save_pfs, r36)
define(x_orig, r37)
define(x_orig_one, r38)
define(y_twos, r39) define(locals, 5)
define(out_xp, r40)
define(out_xsize, r41)
define(out_divisor, r42)
define(out_carry, r43) define(outputs, 4)
.prologue
{ .mmi;
ifdef(`HAVE_ABI_32',
` addp4 r9 = 0, xp_orig define(xp,r9)', C M0
` define(xp,xp_orig)')
.save ar.pfs, save_pfs
alloc save_pfs = ar.pfs, inputs, locals, outputs, 0 C M2
.save rp, save_rp
mov save_rp = b0 C I0
}{ .body
add r10 = -1, y C M3 y-1
} ;;
{ .mmi; ld8 x = [xp] C M0 x = xp[0] if no modexact
ld8 x_orig = [xp] C M1 orig x for common twos
cmp.ne p6,p0 = 1, xsize C I0
}{ .mmi; andcm y_twos = r10, y C M2 (y-1)&~y
mov out_xp = xp_orig C M3
mov out_xsize = xsize C I1
} ;;
mov out_carry = 0
popcnt y_twos = y_twos C I0 y twos
;;
{ .mmi; add x_orig_one = -1, x_orig C M0 orig x-1
shr.u out_divisor = y, y_twos C I0 y without twos
}{ shr.u y = y, y_twos C I1 y without twos
(p6) br.call.sptk.many b0 = mpn_modexact_1c_odd C if xsize>1
} ;;
C modexact can leave x==0
{ .mmi; cmp.eq p6,p0 = 0, x C M0 if {xp,xsize} % y == 0
andcm x_orig = x_orig_one, x_orig C M1 orig (x-1)&~x
add r9 = -1, x C I0 x-1
} ;;
{ .mmi; andcm r9 = r9, x C M0 (x-1)&~x
mov b0 = save_rp C I0
} ;;
popcnt x_orig = x_orig C I0 orig x twos
popcnt r9 = r9 C I0 x twos
;;
{ cmp.lt p7,p0 = x_orig, y_twos C M0 orig x_twos < y_twos
shr.u x = x, r9 C I0 x odd
} ;;
{ (p7) mov y_twos = x_orig C M0 common twos
add r10 = -1, y C I0 y-1
(p6) br.dpnt.few L(done_y) C B0 x%y==0 then result y
} ;;
addl r22 = @ltoffx(ctz_table#), r1
mov r25 = m4_lshift(MASK, MAXSHIFT)
;;
ld8.mov r22 = [r22], ctz_table#
br L(ent)
ALIGN(32)
L(top): .pred.rel "mutex", p6,p7
.mmi; (p7) mov y = x
(p6) sub x = x, y
dep r21 = r19, r22, 0, MAXSHIFT C concat(table,lowbits)
.mmi; and r20 = MASK, r19
(p7) mov x = r19
nop 0
;;
L(mid):
.mmb; ld1 r16 = [r21]
cmp.eq p10,p0 = 0, r20
(p10) br.spnt.few.clr L(shift_alot)
;;
.mmi; nop 0
nop 0
shr.u x = x, r16
;;
L(ent):
.mmi; sub r19 = y, x
cmp.gtu p6,p7 = x, y
cmp.ne p8,p0 = x, y
.mmb; nop 0
nop 0
(p8) br.sptk.few.clr L(top)
C result is y
L(done_y):
mov ar.pfs = save_pfs C I0
shl r8 = y, y_twos C I common factors of 2
br.ret.sptk.many b0
L(shift_alot):
and r20 = x, r25
shr.u x = x, MAXSHIFT
;;
dep r21 = x, r22, 0, MAXSHIFT
br L(mid)
EPILOGUE()
|
#include "foodspawner.hpp"
#include <random>
#include <QDateTime>
#include "server.hpp"
#include <QRandomGenerator>
std::array<int, 26> FoodSpawner::ENGLISH_LETTER_FREQ = {
817, 149, 278, 425, 1270, 223, 202, 609, 697, 15, 77, 403,
241, 675, 751, 193, 9, 599, 633, 906, 276, 97, 236, 15, 197, 7
};
std::array<int, 26> FoodSpawner::ENGLISH_LETTER_FREQ_PREFIX = {};
FoodSpawner::FoodSpawner()
{
for (int i = 0; i < 26; i++) {
auto prev = i == 0 ? 0 : ENGLISH_LETTER_FREQ_PREFIX[i-1];
ENGLISH_LETTER_FREQ_PREFIX[i] = prev + ENGLISH_LETTER_FREQ[i];
}
}
uint FoodSpawner::pickIndex()
{
uint target = random(0, ENGLISH_LETTER_FREQ_PREFIX[25]);
auto upb = std::upper_bound(ENGLISH_LETTER_FREQ_PREFIX.begin(), ENGLISH_LETTER_FREQ_PREFIX.end(), target);
return std::distance(ENGLISH_LETTER_FREQ_PREFIX.begin(), upb);
}
void FoodSpawner::init(uint n)
{
auto gridWidth = Server::getServer()->getGridSize().x;
auto gridHeight = Server::getServer()->getGridSize().y;
n = std::min(gridHeight*gridWidth, n);
for (uint i = 0; i < n; i++) {
Point pt{random(0, gridWidth), random(0, gridHeight)};
while (validPositions.find(pt) != validPositions.end()) {
pt = {random(0, gridWidth), random(0, gridHeight)};
}
foodData.emplaceBack(FoodData{pt.x, pt.y, QChar(this->pickIndex()+'A')});
validPositions.insert(pt, i);
}
emit foodDataChanged();
}
void FoodSpawner::init(const QVector<FoodData>& fds)
{
foodData = fds;
uint i = 0;
for (const auto& fd : fds) {
Point pt{fd.x, fd.y};
validPositions.insert(pt, i++);
}
emit foodDataChanged();
}
void FoodSpawner::spawn(uint idx, const FoodData& fd)
{
foodData[idx] = fd;
validPositions.insert({fd.x, fd.y}, idx);
emit foodDataChanged();
}
void FoodSpawner::respawn(int idx)
{
this->respawn(foodData[idx]);
}
FoodData FoodSpawner::respawn(const FoodData& old, uint* fillIdx)
{
auto itr = validPositions.find({old.x, old.y});
FoodData res{0, 0, QChar(0)};
if (itr != validPositions.end()) {
Point pt;
auto gridWidth = Server::getServer()->getGridSize().x;
auto gridHeight = Server::getServer()->getGridSize().y;
auto idx = itr.value();
if (fillIdx) *fillIdx = idx;
validPositions.erase(itr);
pt.x = random(0, gridWidth);
pt.y = random(0, gridHeight);
this->spawn(idx, FoodData{pt.x, pt.y, QChar(random(0,26)+'A')});
res.x = pt.x; res.y = pt.y; res.letter = foodData[idx].letter;
}
return res;
}
void FoodSpawner::destroy(Point pt)
{
validPositions.remove(pt);
}
FoodData FoodSpawner::get(const Point& pt)
{
auto itr = validPositions.find(pt);
FoodData res{0, 0, QChar(0)};
if (itr != validPositions.end() && itr.value() < foodData.size()) {
auto idx = itr.value();
if (idx < foodData.size()) {
res = foodData[idx];
}
}
return res;
}
|
; A198308: Moore lower bound on the order of an (8,g)-cage.
; 9,16,65,114,457,800,3201,5602,22409,39216,156865,274514,1098057,1921600,7686401,13451202,53804809,94158416,376633665,659108914,2636435657,4613762400,18455049601,32296336802,129185347209,226074357616,904297430465,1582520503314,6330082013257,11077643523200,44310574092801,77543504662402,310174018649609,542804532636816,2171218130547265,3799631728457714
mov $8,$0
mov $10,$0
add $10,1
lpb $10,1
mov $0,$8
sub $10,1
sub $0,$10
lpb $0,1
sub $0,2
add $2,6
mul $2,7
lpe
sub $0,$2
add $0,1
mov $1,1
clr $2,6
add $0,2
sub $1,5
sub $3,$1
mov $4,$0
div $4,2
sub $3,$4
mov $1,$3
sub $1,2
mul $1,2
add $1,7
add $9,$1
lpe
mov $1,$9
|
; A074503: a(n) = 1^n + 2^n + 7^n.
; 3,10,54,352,2418,16840,117714,823672,5765058,40354120,282476274,1977328792,13841291298,96889018600,678223089234,4747561542712,33232930635138,232630514118280,1628413598172594,11398895185897432
mov $1,7
pow $1,$0
mov $2,2
pow $2,$0
add $1,$2
mov $0,$1
add $0,1
|
db 0 ; species ID placeholder
db 105, 105, 75, 50, 65, 100
; hp atk def spd sat sdf
db POISON, POISON ; type
db 75 ; catch rate
db 157 ; base exp
db NO_ITEM, NUGGET ; items
db GENDER_F50 ; gender ratio
db 20 ; step cycles to hatch
INCBIN "gfx/pokemon/muk/front.dimensions"
db GROWTH_MEDIUM_FAST ; growth rate
dn EGG_INDETERMINATE, EGG_INDETERMINATE ; egg groups
db 70 ; happiness
; tm/hm learnset
tmhm FOCUS_PUNCH, TOXIC, HIDDEN_POWER, SUNNY_DAY, TAUNT, HYPER_BEAM, PROTECT, RAIN_DANCE, GIGA_DRAIN, FRUSTRATION, THUNDERBOLT, THUNDER, RETURN, DIG, SHADOW_BALL, BRICK_BREAK, DOUBLE_TEAM, SHOCK_WAVE, FLAMETHROWER, SLUDGE_BOMB, FIRE_BLAST, ROCK_TOMB, TORMENT, FACADE, SECRET_POWER, REST, ATTRACT, THIEF, FOCUS_BLAST, FLING, ENDURE, EXPLOSION, PAYBACK, GIGA_IMPACT, CAPTIVATE, DARK_PULSE, ROCK_SLIDE, SLEEP_TALK, NATURAL_GIFT, POISON_JAB, SWAGGER, SUBSTITUTE, STRENGTH, ROCK_SMASH, FIRE_PUNCH, GUNK_SHOT, ICE_PUNCH, MUD_SLAP, SNORE, THUNDERPUNCH
; end
|
#ifndef _VKTAPIVERSIONCHECK_HPP
#define _VKTAPIVERSIONCHECK_HPP
/*-------------------------------------------------------------------------
* Vulkan Conformance Tests
* ------------------------
*
* Copyright (c) 2017 Khronos Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief API Version Check test - prints out version info
*//*--------------------------------------------------------------------*/
namespace tcu
{
class TestCaseGroup;
class TestContext;
}
namespace vkt
{
namespace api
{
tcu::TestCaseGroup* createVersionSanityCheckTests (tcu::TestContext& testCtx);
} // api
} // vkt
#endif // _VKTAPIVERSIONCHECK_HPP
|
// Copyright (c) 2016-2018 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <amount.h>
#include <policy/feerate.h>
#include <test/test_sin.h>
#include <boost/test/unit_test.hpp>
BOOST_FIXTURE_TEST_SUITE(amount_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(MoneyRangeTest)
{
BOOST_CHECK_EQUAL(MoneyRange(CAmount(-1)), false);
BOOST_CHECK_EQUAL(MoneyRange(MAX_MONEY + CAmount(1)), false);
BOOST_CHECK_EQUAL(MoneyRange(CAmount(1)), true);
}
BOOST_AUTO_TEST_CASE(GetFeeTest)
{
CFeeRate feeRate, altFeeRate;
feeRate = CFeeRate(0);
// Must always return 0
BOOST_CHECK_EQUAL(feeRate.GetFee(0), 0);
BOOST_CHECK_EQUAL(feeRate.GetFee(1e5), 0);
feeRate = CFeeRate(1000);
// Must always just return the arg
BOOST_CHECK_EQUAL(feeRate.GetFee(0), 0);
BOOST_CHECK_EQUAL(feeRate.GetFee(1), 1);
BOOST_CHECK_EQUAL(feeRate.GetFee(121), 121);
BOOST_CHECK_EQUAL(feeRate.GetFee(999), 999);
BOOST_CHECK_EQUAL(feeRate.GetFee(1e3), 1e3);
BOOST_CHECK_EQUAL(feeRate.GetFee(9e3), 9e3);
feeRate = CFeeRate(-1000);
// Must always just return -1 * arg
BOOST_CHECK_EQUAL(feeRate.GetFee(0), 0);
BOOST_CHECK_EQUAL(feeRate.GetFee(1), -1);
BOOST_CHECK_EQUAL(feeRate.GetFee(121), -121);
BOOST_CHECK_EQUAL(feeRate.GetFee(999), -999);
BOOST_CHECK_EQUAL(feeRate.GetFee(1e3), -1e3);
BOOST_CHECK_EQUAL(feeRate.GetFee(9e3), -9e3);
feeRate = CFeeRate(123);
// Truncates the result, if not integer
BOOST_CHECK_EQUAL(feeRate.GetFee(0), 0);
BOOST_CHECK_EQUAL(feeRate.GetFee(8), 1); // Special case: returns 1 instead of 0
BOOST_CHECK_EQUAL(feeRate.GetFee(9), 1);
BOOST_CHECK_EQUAL(feeRate.GetFee(121), 14);
BOOST_CHECK_EQUAL(feeRate.GetFee(122), 15);
BOOST_CHECK_EQUAL(feeRate.GetFee(999), 122);
BOOST_CHECK_EQUAL(feeRate.GetFee(1e3), 123);
BOOST_CHECK_EQUAL(feeRate.GetFee(9e3), 1107);
feeRate = CFeeRate(-123);
// Truncates the result, if not integer
BOOST_CHECK_EQUAL(feeRate.GetFee(0), 0);
BOOST_CHECK_EQUAL(feeRate.GetFee(8), -1); // Special case: returns -1 instead of 0
BOOST_CHECK_EQUAL(feeRate.GetFee(9), -1);
// check alternate constructor
feeRate = CFeeRate(1000);
altFeeRate = CFeeRate(feeRate);
BOOST_CHECK_EQUAL(feeRate.GetFee(100), altFeeRate.GetFee(100));
// Check full constructor
// default value
BOOST_CHECK(CFeeRate(CAmount(-1), 1000) == CFeeRate(-1));
BOOST_CHECK(CFeeRate(CAmount(0), 1000) == CFeeRate(0));
BOOST_CHECK(CFeeRate(CAmount(1), 1000) == CFeeRate(1));
// lost precision (can only resolve satoshis per kB)
BOOST_CHECK(CFeeRate(CAmount(1), 1001) == CFeeRate(0));
BOOST_CHECK(CFeeRate(CAmount(2), 1001) == CFeeRate(1));
// some more integer checks
BOOST_CHECK(CFeeRate(CAmount(26), 789) == CFeeRate(32));
BOOST_CHECK(CFeeRate(CAmount(27), 789) == CFeeRate(34));
// Maximum size in bytes, should not crash on Bitcoin, will crash on SIN because of supply and fee rate changes.
//CFeeRate(MAX_MONEY, std::numeric_limits<size_t>::max() >> 1).GetFeePerK();
}
BOOST_AUTO_TEST_CASE(BinaryOperatorTest)
{
CFeeRate a, b;
a = CFeeRate(1);
b = CFeeRate(2);
BOOST_CHECK(a < b);
BOOST_CHECK(b > a);
BOOST_CHECK(a == a);
BOOST_CHECK(a <= b);
BOOST_CHECK(a <= a);
BOOST_CHECK(b >= a);
BOOST_CHECK(b >= b);
// a should be 0.00000002 SIN/kB now
a += a;
BOOST_CHECK(a == b);
}
BOOST_AUTO_TEST_CASE(ToStringTest)
{
CFeeRate feeRate;
feeRate = CFeeRate(1);
BOOST_CHECK_EQUAL(feeRate.ToString(), "0.00000001 SIN/kB");
}
BOOST_AUTO_TEST_SUITE_END()
|
.global s_prepare_buffers
s_prepare_buffers:
push %r15
push %r8
push %r9
push %rax
push %rbx
push %rcx
push %rdi
push %rsi
lea addresses_A_ht+0xe41c, %r15
sub %r8, %r8
mov $0x6162636465666768, %rbx
movq %rbx, %xmm4
movups %xmm4, (%r15)
nop
nop
nop
nop
and $60566, %rax
lea addresses_UC_ht+0x85bc, %rsi
lea addresses_UC_ht+0x1ddbc, %rdi
nop
nop
nop
xor $54287, %rax
mov $81, %rcx
rep movsl
nop
nop
nop
nop
inc %rcx
lea addresses_D_ht+0x63c, %rsi
nop
nop
nop
nop
and $20493, %rax
movb $0x61, (%rsi)
cmp %rcx, %rcx
lea addresses_UC_ht+0x123bc, %rcx
nop
nop
sub %r15, %r15
mov (%rcx), %r8w
nop
nop
nop
nop
nop
cmp $14497, %r15
lea addresses_A_ht+0x1d3bc, %rsi
lea addresses_WC_ht+0x16cbc, %rdi
nop
nop
nop
nop
nop
dec %r9
mov $70, %rcx
rep movsl
nop
nop
nop
nop
add %rbx, %rbx
lea addresses_D_ht+0x3cbc, %rdi
xor $57931, %r8
and $0xffffffffffffffc0, %rdi
vmovaps (%rdi), %ymm3
vextracti128 $0, %ymm3, %xmm3
vpextrq $0, %xmm3, %rax
nop
nop
nop
nop
nop
dec %r15
lea addresses_normal_ht+0xa5bc, %rsi
lea addresses_A_ht+0xc3bc, %rdi
nop
nop
cmp $39666, %r15
mov $52, %rcx
rep movsb
nop
nop
inc %rsi
lea addresses_WC_ht+0x1ace4, %rcx
nop
nop
xor $52210, %rbx
mov (%rcx), %rax
nop
nop
nop
nop
xor $21340, %r15
lea addresses_normal_ht+0x2c5c, %rsi
lea addresses_UC_ht+0x11b5c, %rdi
nop
nop
nop
nop
add %r9, %r9
mov $35, %rcx
rep movsl
nop
nop
dec %rsi
lea addresses_A_ht+0x1af94, %rbx
cmp $57770, %rdi
mov $0x6162636465666768, %r15
movq %r15, (%rbx)
nop
nop
nop
sub %rax, %rax
pop %rsi
pop %rdi
pop %rcx
pop %rbx
pop %rax
pop %r9
pop %r8
pop %r15
ret
.global s_faulty_load
s_faulty_load:
push %r13
push %r14
push %r15
push %r8
push %rax
push %rbp
push %rdi
// Store
lea addresses_A+0x3abc, %r8
nop
nop
nop
nop
nop
add %rbp, %rbp
mov $0x5152535455565758, %r15
movq %r15, %xmm4
vmovups %ymm4, (%r8)
nop
nop
nop
add $23295, %rax
// Faulty Load
lea addresses_WC+0x1cbbc, %rdi
nop
nop
nop
nop
inc %r14
mov (%rdi), %ebp
lea oracles, %rdi
and $0xff, %rbp
shlq $12, %rbp
mov (%rdi,%rbp,1), %rbp
pop %rdi
pop %rbp
pop %rax
pop %r8
pop %r15
pop %r14
pop %r13
ret
/*
<gen_faulty_load>
[REF]
{'src': {'type': 'addresses_WC', 'same': False, 'size': 2, 'congruent': 0, 'NT': True, 'AVXalign': False}, 'OP': 'LOAD'}
{'dst': {'type': 'addresses_A', 'same': False, 'size': 32, 'congruent': 4, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
[Faulty Load]
{'src': {'type': 'addresses_WC', 'same': True, 'size': 4, 'congruent': 0, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 16, 'congruent': 5, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_UC_ht', 'congruent': 8, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 9, 'same': False}, 'OP': 'REPM'}
{'dst': {'type': 'addresses_D_ht', 'same': False, 'size': 1, 'congruent': 5, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'src': {'type': 'addresses_UC_ht', 'same': False, 'size': 2, 'congruent': 6, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_A_ht', 'congruent': 11, 'same': False}, 'dst': {'type': 'addresses_WC_ht', 'congruent': 6, 'same': False}, 'OP': 'REPM'}
{'src': {'type': 'addresses_D_ht', 'same': False, 'size': 32, 'congruent': 8, 'NT': False, 'AVXalign': True}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_normal_ht', 'congruent': 8, 'same': False}, 'dst': {'type': 'addresses_A_ht', 'congruent': 11, 'same': False}, 'OP': 'REPM'}
{'src': {'type': 'addresses_WC_ht', 'same': False, 'size': 8, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'LOAD'}
{'src': {'type': 'addresses_normal_ht', 'congruent': 4, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 4, 'same': True}, 'OP': 'REPM'}
{'dst': {'type': 'addresses_A_ht', 'same': False, 'size': 8, 'congruent': 3, 'NT': False, 'AVXalign': False}, 'OP': 'STOR'}
{'38': 21829}
38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38 38
*/
|
;----------------------------------------------------------------------
; stat.asm - user-callable entry point to _stat() function.
;----------------------------------------------------------------------
;
; C/C++ Run Time Library - Version 10.0
;
; Copyright (c) 1996, 2000 by Inprise Corporation
; All Rights Reserved.
;
; $Revision: 9.0 $
include rules.asi
include entry.inc
Entry@ stat, _stat, _RTLENTRY, 4
end
|
; A269716: Number of active (ON,black) cells at stage 2^n-1 of the two-dimensional cellular automaton defined by "Rule 22", based on the 5-celled von Neumann neighborhood.
; Submitted by Christian Krause
; 1,5,20,88,368,1504,6080,24448,98048,392704,1571840,6289408,25161728,100655104,402636800,1610579968
mov $3,2
pow $3,$0
add $0,1
div $0,$3
mov $1,2
mul $1,$3
add $0,$1
mov $2,$1
sub $1,2
sub $1,$3
add $1,$0
add $2,$3
mul $1,$2
mov $0,$1
div $0,6
|
; DEROM: 64KB PRG-ROM + (32,64)KB CHR-ROM
; http://bootgod.dyndns.org:7777/search.php?keywords=DEROM&kwtype=pcb
; DEROM uses Nintendo's clone of a Tengen 800002.
;------------------------------------------------------------------------------;
; number of 8K CHR banks
; Valid values: $04 (32K), $08 (64K)
CHR_BANKS = $04
; DEROM mirroring is like MMC3; mapper controlled.
; %0000 = Horizontal
; %0001 = Vertical
MIRRORING = %0001
; Mapper 206 (DEROM) iNES header
.byte "NES",$1A
.byte $04 ; 4x 16K PRG banks
.byte CHR_BANKS ; 8K CHR-ROM banks
.byte $E0|MIRRORING ; flags 6
.byte $C0 ; flags 7
.byte $00 ; no PRG RAM
.dsb 7, $00 ; clear the remaining bytes
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in
; the documentation and/or other materials provided with the
; distribution.
; * Neither the name of Intel Corporation nor the names of its
; contributors may be used to endorse or promote products derived
; from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;
;;; gf_vect_mad_sse(len, vec, vec_i, mul_array, src, dest);
;;;
%include "reg_sizes.asm"
%ifidn __OUTPUT_FORMAT__, win64
%define arg0 rcx
%define arg0.w ecx
%define arg1 rdx
%define arg2 r8
%define arg3 r9
%define arg4 r12
%define arg5 r15
%define tmp r11
%define return rax
%define return.w eax
%define PS 8
%define stack_size 16*3 + 3*8
%define arg(x) [rsp + stack_size + PS + PS*x]
%define func(x) proc_frame x
%macro FUNC_SAVE 0
sub rsp, stack_size
movdqa [rsp+16*0],xmm6
movdqa [rsp+16*1],xmm7
movdqa [rsp+16*2],xmm8
save_reg r12, 3*16 + 0*8
save_reg r15, 3*16 + 1*8
end_prolog
mov arg4, arg(4)
mov arg5, arg(5)
%endmacro
%macro FUNC_RESTORE 0
movdqa xmm6, [rsp+16*0]
movdqa xmm7, [rsp+16*1]
movdqa xmm8, [rsp+16*2]
mov r12, [rsp + 3*16 + 0*8]
mov r15, [rsp + 3*16 + 1*8]
add rsp, stack_size
%endmacro
%elifidn __OUTPUT_FORMAT__, elf64
%define arg0 rdi
%define arg0.w edi
%define arg1 rsi
%define arg2 rdx
%define arg3 rcx
%define arg4 r8
%define arg5 r9
%define tmp r11
%define return rax
%define return.w eax
%define func(x) x:
%define FUNC_SAVE
%define FUNC_RESTORE
%endif
;;; gf_vect_mad_sse(len, vec, vec_i, mul_array, src, dest)
%define len arg0
%define len.w arg0.w
%define vec arg1
%define vec_i arg2
%define mul_array arg3
%define src arg4
%define dest arg5
%define pos return
%define pos.w return.w
%ifndef EC_ALIGNED_ADDR
;;; Use Un-aligned load/store
%define XLDR movdqu
%define XSTR movdqu
%else
;;; Use Non-temporal load/stor
%ifdef NO_NT_LDST
%define XLDR movdqa
%define XSTR movdqa
%else
%define XLDR movntdqa
%define XSTR movntdq
%endif
%endif
default rel
[bits 64]
section .text
%define xmask0f xmm8
%define xgft_lo xmm7
%define xgft_hi xmm6
%define x0 xmm0
%define xtmpa xmm1
%define xtmph xmm2
%define xtmpl xmm3
%define xd xmm4
%define xtmpd xmm5
align 16
global gf_vect_mad_sse:function
func(gf_vect_mad_sse)
FUNC_SAVE
sub len, 16
jl .return_fail
xor pos, pos
movdqa xmask0f, [mask0f] ;Load mask of lower nibble in each byte
sal vec_i, 5 ;Multiply by 32
movdqu xgft_lo, [vec_i+mul_array] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
movdqu xgft_hi, [vec_i+mul_array+16] ; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
XLDR xtmpd, [dest+len] ;backup the last 16 bytes in dest
.loop16:
XLDR xd, [dest+pos] ;Get next dest vector
.loop16_overlap:
XLDR x0, [src+pos] ;Get next source vector
movdqa xtmph, xgft_hi ;Reload const array registers
movdqa xtmpl, xgft_lo
movdqa xtmpa, x0 ;Keep unshifted copy of src
psraw x0, 4 ;Shift to put high nibble into bits 4-0
pand x0, xmask0f ;Mask high src nibble in bits 4-0
pand xtmpa, xmask0f ;Mask low src nibble in bits 4-0
pshufb xtmph, x0 ;Lookup mul table of high nibble
pshufb xtmpl, xtmpa ;Lookup mul table of low nibble
pxor xtmph, xtmpl ;GF add high and low partials
pxor xd, xtmph
XSTR [dest+pos], xd ;Store result
add pos, 16 ;Loop on 16 bytes at a time
cmp pos, len
jle .loop16
lea tmp, [len + 16]
cmp pos, tmp
je .return_pass
;; Tail len
mov pos, len ;Overlapped offset length-16
movdqa xd, xtmpd ;Restore xd
jmp .loop16_overlap ;Do one more overlap pass
.return_pass:
mov return, 0
FUNC_RESTORE
ret
.return_fail:
mov return, 1
FUNC_RESTORE
ret
endproc_frame
section .data
align 16
mask0f: ddq 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
;;; func core, ver, snum
slversion gf_vect_mad_sse, 00, 01, 0200
|
; Z88 Small C+ Run time Library
SECTION code_crt0_sccz80
PUBLIC l_ne
;
; DE != HL
; set carry if true
.l_ne
ld a,l
sub e
ld l,a
ld a,h
sbc d
ld h,a
or l
inc hl
scf
ret nz
xor a
ld l,a
ld h,a
ret
|
section .multiboot_header
header_start:
dd 0xe85250d6 ; magic number
dd 0
dd header_end - header_start
; checksum
dd 0x100000000 - (0xe85250d6 + 0 + (header_end - header_start))
; required end tag
dw 0
dw 0
dd 8
header_end:
|
bits 64
add rdi, r10
add al, 10
add ax, 10
add rdx, rcx
add rax, 32
add [rbp-40], rax ;
add rdi, [rbp-72]
add eax, dword [rdi+44]
add dword [rdi+44], 1
add r10, rcx
add byte [rcx], al
|
LI A, 0
LI B, 0
LI U, 0
LI V, 0
LI D, 1
delay256_0:
INUV
LI B, 1
ADD A
LI B, 255
CMP
LXY delay256_0
JNE
NOP
NOP
LI D, 255
HLT
|
; clear-instructions-macros.asm
*pragmapush list ; Save state of list pragma
pragma nolist ; Turn off assembly listing and exclude from symbol list
ifndef CLEAR_INSTRUCTIONS_MACROS ; Load defines only once
clru macro
ldu #0
endm
clrx macro
ldx #0
endm
clry macro
ifpragma 6809
ldy #0
else
tfr 0,y
endc
endm
ifpragma no6309conv
clrq macro
clrd
clrw
endm
endc
CLEAR_INSTRUCTIONS_MACROS equ 1 ; Set flag for defines being loaded
endc
*pragmapop list ; restore assembly listing to previous state
|
; Lunar Launcher
; (c) 2018 Michael Bayer
;
;
; TODO
;
processor 6502
include "vcs.h"
include "macro.h"
;----------------------------
; Constants
;----------------------------
; ntsc constants
VBLANK_WAIT = 42
; Game constants
COL_BG_SCORE = $0
COL_BG_RESOURCES = $0
COL_BG_LIGHT = $b6
COL_BG_DARK = $c4
COL_PF_HIGHLIGHT = $1e
COL_ZOMBIE_SKIN_1 = $0a ; grey
COL_ZOMBIE_SKIN_2 = $b2 ; green
COL_ZOMBIE_SKIN_3 = $52 ; purple
COL_ZOMBIE_SHIRT_1 = $16
COL_ZOMBIE_SHIRT_2 = $26
COL_ZOMBIE_SHIRT_3 = $36
COL_ZOMBIE_PANTS = $84
COL_ZOMBIE_SHOES = $e2
COL_ZOMBIE_HAIR = $e2
COL_SHOOTER = $ba
SCANLINES_RESOURCE = 16
SCANLINES_LANE = 25
SCANLINES_SCORE = 16
ZOMBIE_X_VEL_INIT = 10
;--- end Constants
;----------------------------
; Macros
;----------------------------
;###################################################
;
; M_ACTION_LANE 0 LIGHT
MAC M_HIGHLIGHT
ldy Vb_lane_select
lda PF_HIGHLIGHT_{1},y
sta PF0
sta PF1
sta PF2
ENDM
;###################################################
;
; M_SPRITEPTR 0, shooter, PLANT
MAC M_SPRITEPTR
; init shooter pointer
ldy Vb_{2}s_lane_{1} ; +3
bne .load{2} ; +2/+3
lda #<(NOSPRITE) ;
sta Vptr_{2} ;
lda #>(NOSPRITE) ;
sta Vptr_{2}+1 ;
jmp .{2}Done ;
.load{2}
lda #<({3}) ;
sta Vptr_{2} ;
lda #>({3}) ;
sta Vptr_{2}+1 ;
.{2}Done
ENDM
;###################################################
;
; M_ACTION_LANE 0 LIGHT
MAC M_ACTION_LANE
; +++ start scanline 1 (52)
; init BGCOLOR
lda #COL_BG_{2}
sta COLUBK ; (5)
; highlight
M_HIGHLIGHT {1} ; (21)
; init sunflower pointers
ldy Vb_sunflowers_lane_{1}
lda SunflowerP0LoTbl,y
sta Vptr_sunflower_pf0
lda SunflowerP1LoTbl,y
sta Vptr_sunflower_pf1 ; (38)
BREAK{1}:
; init shooter pointer
M_SPRITEPTR {1}, shooter, PLANT ; (56)
; number of shooters
lda Nusiz0Tbl,y
sta NUSIZ0 ; (63)
; re-set pf registers
SLEEP 2
lda #0 ; +e
sta PF2 ; +3
sta PF1 ; +3
; +++ end scanline 1 (52)
sta WSYNC
; clear PF2 *after* WSYNC to prevent artifacts
; from premature clearing
sta PF0 ; +3
; init zombie pointer
M_SPRITEPTR {1}, zombie, ZOMBIE ; (16)
; number of zombies
lda Nusiz1Tbl,y
sta NUSIZ1 ; (23)
; first thing in bzoneRepos is a WSYNC
; so there's about 25 cycles of space left
; at this point
; zombie positioning
lda Vw_zombies_xpos{1}
ldx #1 ; (28)
; +++ bzoneRepos ends scanline 2 (53)
jsr bzoneRepos
; +++ end scanline 3 (54)
sta WSYNC
sta HMOVE
; init scanline counter
ldy #SCANLINES_LANE
.actionLaneLoop{1}
; PF0 (sunflower)
;lda PF_SUNFLOWER,y
lda (Vptr_sunflower_pf0),y
sta PF0
lda (Vptr_sunflower_pf1),y
sta PF1
; P0 (shooter)
; grafix
lda (Vptr_shooter),y
sta GRP0
; color
;lda SHOOTER_COLORS,y
;sta COLUP0
; P1 (zombie)
; grafix
lda (Vptr_zombie),y
sta GRP1
; color
lda (Vptr_zombie_colors),y
sta COLUP1
SLEEP 12
; re-set PF0/1
lda #0
sta PF0
sta PF1
sta WSYNC
dey
bne .actionLaneLoop{1}
; re-set graphics registers
lda #0
sta GRP0
sta GRP1
; 2 empty lines
sta WSYNC
sta WSYNC
; highlight
M_HIGHLIGHT {1}
sta WSYNC
ENDM
;###################################################
; adds two two-byte variables
; M_WORD_ADD <target> <operand>
MAC M_WORD_ADD
clc
lda {1}
adc {2}
sta {1}
lda {1}+1
adc {2}+1
sta {1}+1
ENDM
;###################################################
; subtracts a two-byte variable from another one
; M_WORD_SUB <target> <operand>
MAC M_WORD_SUB
clc
lda {1}+1
sbc {2}+1
sta {1}+1
lda {1}
sbc {2}
sta {1}
ENDM
;###################################################
; adds a constant to a two-byte variable
; M_ADD_CONSTANT <target> <constant>
MAC M_ADD_CONSTANT
clc
lda {1}
adc #<{2}
sta {1}
lda {1}+1
adc #>{2}
sta {1}+1
ENDM
;###################################################
; subtracts a constant from a two-byte variable
; M_ADD_CONSTANT <target> <constant>
MAC M_SUB_CONSTANT
clc
lda {1}+1
sbc #>{2}
sta {1}+1
lda {1}
sbc #<{2}
sta {1}
ENDM
;----------------------------
; Variables
;----------------------------
SEG.U variables
ORG $80
;Vb_tmp00 ds 1
; shadow registers
Vb_SWCHA_Shadow ds 1
; lane select
Vb_lane_select ds 1
; frame counter
Vw_frame_counter ds 2
; sunflowers
Vb_sunflowers_lane_0 ds 1
Vb_sunflowers_lane_1 ds 1
Vb_sunflowers_lane_2 ds 1
Vb_sunflowers_lane_3 ds 1
Vb_sunflowers_lane_4 ds 1
; shooters
Vb_shooters_lane_0 ds 1
Vb_shooters_lane_1 ds 1
Vb_shooters_lane_2 ds 1
Vb_shooters_lane_3 ds 1
Vb_shooters_lane_4 ds 1
; zombies
Vb_zombies_lane_0 ds 1
Vb_zombies_lane_1 ds 1
Vb_zombies_lane_2 ds 1
Vb_zombies_lane_3 ds 1
Vb_zombies_lane_4 ds 1
Vw_zombie_xvel ds 2
Vw_zombies_xpos0 ds 2
Vw_zombies_xpos1 ds 2
Vw_zombies_xpos2 ds 2
Vw_zombies_xpos3 ds 2
Vw_zombies_xpos4 ds 2
; sunflower grahic pointer
Vptr_sunflower_pf0 ds 2
Vptr_sunflower_pf1 ds 2
Vptr_shooter ds 2
Vptr_zombie ds 2
Vptr_zombie_colors ds 2
echo "----",($100 - *) , "bytes of RAM left"
;--- end Variables
SEG code
ORG $F000
echo "---- start code at ",(*)
Reset:
;----------------------------
; Start of program
;----------------------------
CLEAN_START
;----------------------------
; any code to be executed before
; the game actually starts
; goes here
;----------------------------
; set TIA behaviour
; set bg color to black ($0)
lda #$00
sta COLUBK
; set PF color
lda #COL_PF_HIGHLIGHT
sta COLUPF
; set pf behaviour
lda #%00000001
sta CTRLPF
; set player color
;lda #$0F
;sta COLUP0
;sta COLUP1
; set Player size
;lda #7
;sta NUSIZ0
;sta NUSIZ1
; sunflower pointer hi
lda #>(PF_SUNFLOWER)
sta Vptr_sunflower_pf0+1
sta Vptr_sunflower_pf1+1
; initialize zombie x velocity
lda #<ZOMBIE_X_VEL_INIT
sta Vw_zombie_xvel+1
lda #>ZOMBIE_X_VEL_INIT
sta Vw_zombie_xvel
; TEST VALUES
; initial player pos
lda #1
sta Vb_sunflowers_lane_1
sta Vb_sunflowers_lane_3
sta Vb_shooters_lane_1
sta Vb_shooters_lane_3
sta Vb_zombies_lane_1
lda #2
sta Vb_zombies_lane_2
sta Vb_sunflowers_lane_2
sta Vb_shooters_lane_2
lda #3
sta Vb_zombies_lane_3
sta Vb_shooters_lane_4
;sta Vw_PlayerPosY+1
lda #90
sta Vw_zombies_xpos0
sta Vw_zombies_xpos2
sta Vw_zombies_xpos3
sta Vw_zombies_xpos4
lda #104
sta Vw_zombies_xpos1
;----------------------------
; Main Loop
;----------------------------
MainLoop:
jsr VerticalBlank
jsr GameState
jsr DrawScreen
jsr OverScan
jmp MainLoop ; loop forever
;----------------------------
; vertical blank init
; initializes TIM64T timer
;----------------------------
VerticalBlank:
ldx #0
lda #2
sta WSYNC ;
; begin vertical sync
sta VSYNC
; first two lines of vsync
sta WSYNC
sta WSYNC
; use duration 3rd line of VSYNC
; to set the vertical blank timer
lda #VBLANK_WAIT
sta TIM64T
lda #0
; end vsync period
sta WSYNC
sta VSYNC
rts ;--- VerticalBlank
;----------------------------
; calculate game state for this frame
;----------------------------
GameState:
;-- check input
; joystick input
lda SWCHA
; break if nothing has changed
cmp Vb_SWCHA_Shadow
beq NoMovement
;beq NoMovement
; store new SWCHA state
sta Vb_SWCHA_Shadow
; right?
CheckRightPressed:
and #%10000000
; skip to CheckLeftPressed if not equal
bne CheckLeftPressed
; move right
inc Vw_zombies_xpos1
; left?
CheckLeftPressed:
lda Vb_SWCHA_Shadow
and #%01000000
; skip to CheckDownPressed not equal
bne CheckDownPressed
; move left
dec Vw_zombies_xpos1
; down?
CheckDownPressed:
; check if down is pressed
lda Vb_SWCHA_Shadow
and #%00100000
; skip to CheckUpPressed if not pressed
bne CheckUpPressed
; move down
lda #4
cmp Vb_lane_select
beq WrapDownPressed
inc Vb_lane_select
jmp CheckUpPressed
WrapDownPressed:
lda #0
sta Vb_lane_select
; up?
CheckUpPressed:
; check if up is pressed
lda Vb_SWCHA_Shadow
and #%00010000
; skip to NoMovement if not pressed
bne NoMovement
lda #0
cmp Vb_lane_select
beq WrapUpPressed
dec Vb_lane_select
jmp NoMovement
WrapUpPressed:
lda #4
sta Vb_lane_select
NoMovement:
; position shooters once per frame
lda #40
ldx #0
jsr bzoneRepos
; update zombie xpos
M_WORD_SUB Vw_zombies_xpos1, Vw_zombie_xvel
; set zombie colors
; hi byte
lda #>ZOMBIE_COLORS_1
sta Vptr_zombie_colors+1
; lo byte
lda #<ZOMBIE_COLORS_1
sta Vptr_zombie_colors
rts ;--- GameState
;----------------------------
; Draw visible scanlines
; https://alienbill.com/2600/cookbook/subpixel.html
;----------------------------
DrawScreen: SUBROUTINE
lda #0
; wait until vertical blank period is over
.vblankWait:
sta WSYNC
lda INTIM
bne .vblankWait
; y will be our scanline counter
ldy #SCANLINES_RESOURCE
sta WSYNC
sta HMOVE
sta VBLANK ; since A = #0
;------------------------
; Resource lane
;------------------------
lda #COL_BG_RESOURCES
sta COLUBK
.resourceLane
sta WSYNC
dey
bne .resourceLane
; set P0 hmove to 0
lda #0
sta HMP0
; scala, TODO: remove
; lda #%10101010
; sta PF0
; sta PF2
; lsr
; sta PF1
sta WSYNC
;------------------------
; Action lanes
;------------------------
M_ACTION_LANE 0, LIGHT
M_ACTION_LANE 1, DARK
M_ACTION_LANE 2, LIGHT
M_ACTION_LANE 3, DARK
M_ACTION_LANE 4, LIGHT
;------------------------
; Score lane
;------------------------
lda #0
sta PF0
sta PF1
sta PF2
ldy #SCANLINES_SCORE
lda #COL_BG_SCORE
sta COLUBK
.scoreLane:
sta WSYNC
dey
bne .scoreLane
; clear registers to prevent bleeding
lda #2
sta WSYNC ; finish scanline
sta VBLANK ; make TIA output blank
; re-use Y which is still 0
ldy #0
sty PF0
sty PF1
sty PF2
sty GRP0
sty GRP1
sty ENAM0
sty ENAM1
sty ENABL
sty VDELP0
sty VDELP1
rts ; DrawScreen
;----------------------------
; Overscan
;----------------------------
OverScan:
lda #35
sta TIM64T
; calc stuff here
OverScanLineWait:
lda INTIM
bne OverScanLineWait
; return
rts
;----------------------------
; BattleZone-style sprite repositioning
; see https://alienbill.com/2600/cookbook/subpixel.html
; set A = desired horizontal position, then X to object
; to be positioned (0->4 = P0->BALL)
;----------------------------
bzoneRepos: SUBROUTINE
sta WSYNC ; 00 Sync to start of scanline.
sec ; 02 Set the carry flag so no borrow will be applied during the division.
.divideby15
sbc #15 ; 04 Waste the necessary amount of time dividing X-pos by 15!
bcs .divideby15 ; 06/07 11/16/21/26/31/36/41/46/51/56/61/66
tay
lda fineAdjustTable,y ; 13 -> Consume 5 cycles by guaranteeing we cross a page boundary
sta HMP0,x
sta RESP0,x ; 21/ 26/31/36/41/46/51/56/61/66/71 - Set the rough position.
rts
;----------------------------
; Data
;----------------------------
DATA_Start ALIGN 256
echo "---- start data at ",(*)
PF_HIGHLIGHT_0:
.byte #%11111111
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
PF_HIGHLIGHT_1:
.byte #%00000000
.byte #%11111111
.byte #%00000000
.byte #%00000000
.byte #%00000000
PF_HIGHLIGHT_2:
.byte #%00000000
.byte #%00000000
.byte #%11111111
.byte #%00000000
.byte #%00000000
PF_HIGHLIGHT_3:
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%11111111
.byte #%00000000
PF_HIGHLIGHT_4:
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%11111111
PF_SUNFLOWER:
.byte #%00000000
.byte #%00000000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%01100000
.byte #%00000000
.byte #%00000000
PF_NOSUNFLOWER:
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
SunflowerP0LoTbl:
.byte <(PF_NOSUNFLOWER) ; 0 -> none
.byte <(PF_SUNFLOWER) ; 1 -> yes
.byte <(PF_SUNFLOWER) ; 2 -> yes
SunflowerP1LoTbl:
.byte <(PF_NOSUNFLOWER) ; 0 -> none
.byte <(PF_NOSUNFLOWER) ; 1 -> nope
.byte <(PF_SUNFLOWER) ; 2 -> yes
include "sprites.inc"
ZOMBIE_FrameTblLo:
.byte <(ZOMBIE_F0)
.byte <(ZOMBIE_F1)
PLANT_FrameTblLo:
.byte <(PLANT_F0)
.byte <(PLANT_F1)
NOSPRITE:
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
.byte #%00000000
SHOOTER_COLORS:
SHOOTER_COLORS_1:
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
.byte #COL_SHOOTER
ZOMBIE_COLORS:
ZOMBIE_COLORS_1:
.byte #COL_ZOMBIE_SHOES
.byte #COL_ZOMBIE_SHOES
.byte #COL_ZOMBIE_SHOES
.byte #COL_ZOMBIE_PANTS
.byte #COL_ZOMBIE_PANTS
.byte #COL_ZOMBIE_PANTS
.byte #COL_ZOMBIE_PANTS
.byte #COL_ZOMBIE_PANTS
.byte #COL_ZOMBIE_SHOES
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SHIRT_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_SKIN_1
.byte #COL_ZOMBIE_HAIR
Nusiz0Tbl:
.byte #%00000000 ; 0 shooters
.byte #%00000000 ; 1 shooter
.byte #%00000001 ; 2 shooters
.byte #%00000011 ; 3 shooters
Nusiz1Tbl:
.byte #%00000000 ; 0 zombies
.byte #%00000000 ; 1 zombies
.byte #%00000001 ; 2 zombies
.byte #%00000011 ; 3 zombies
;-----------------------------
; This table converts the "remainder" of the division by 15 (-1 to -15) to the correct
; fine adjustment value. This table is on a page boundary to guarantee the processor
; will cross a page boundary and waste a cycle in order to be at the precise position
; for a RESP0,x write
ORG $FE00
fineAdjustBegin
DC.B %01110000 ; Left 7
DC.B %01100000 ; Left 6
DC.B %01010000 ; Left 5
DC.B %01000000 ; Left 4
DC.B %00110000 ; Left 3
DC.B %00100000 ; Left 2
DC.B %00010000 ; Left 1
DC.B %00000000 ; No movement.
DC.B %11110000 ; Right 1
DC.B %11100000 ; Right 2
DC.B %11010000 ; Right 3
DC.B %11000000 ; Right 4
DC.B %10110000 ; Right 5
DC.B %10100000 ; Right 6
DC.B %10010000 ; Right 7
fineAdjustTable EQU fineAdjustBegin - %11110001 ; NOTE: %11110001 = -15
;----------------------------
; Reset/Break
;----------------------------
ORG $FFFC
; set Reset pointer (at $FFFC and $FFFD) to Reset label
.word Reset
; set BRK pointer (at $FFFE and $FFFF) to Reset label
.word Reset
|
; A231672: a(n) = Sum_{i=0..n} digsum_6(i), where digsum_6(i) = A053827(i).
; 0,1,3,6,10,15,16,18,21,25,30,36,38,41,45,50,56,63,66,70,75,81,88,96,100,105,111,118,126,135,140,146,153,161,170,180,181,183,186,190,195,201,203,206,210,215,221,228,231,235,240,246,253,261,265,270,276,283,291,300,305,311,318,326,335,345,351,358,366,375,385,396,398,401,405,410,416,423,426,430,435,441,448,456,460,465,471,478,486,495,500,506,513,521,530,540,546,553,561,570,580,591,598,606,615,625,636,648,651,655,660,666,673,681,685,690,696,703,711,720,725,731,738,746,755,765,771,778,786,795,805,816,823,831,840,850,861,873,881,890,900,911,923,936,940,945,951,958,966,975,980,986,993,1001,1010,1020,1026,1033,1041,1050,1060,1071,1078,1086,1095,1105,1116,1128,1136,1145,1155,1166,1178,1191,1200,1210,1221,1233,1246,1260,1265,1271,1278,1286,1295,1305,1311,1318,1326,1335,1345,1356,1363,1371,1380,1390,1401,1413,1421,1430,1440,1451,1463,1476,1485,1495,1506,1518,1531,1545,1555,1566,1578,1591,1605,1620,1621,1623,1626,1630,1635,1641,1643,1646,1650,1655,1661,1668,1671,1675,1680,1686,1693,1701,1705,1710,1716,1723,1731,1740,1745,1751,1758,1766,1775,1785,1791,1798,1806,1815
mov $7,$0
mov $9,$0
lpb $9,1
mov $0,$7
sub $9,1
sub $0,$9
mov $4,$0
lpb $3,4
div $4,6
mov $2,$4
mul $2,5
sub $0,$2
lpe
add $8,$0
lpe
mov $1,$8
|
; Copyright (c) Piotr Durlej
; All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions are met:
;
; 1. Redistributions of source code must retain the above copyright notice,
; this list of conditions and the following disclaimer.
;
; 2. Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in the
; documentation and/or other materials provided with the distribution.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
; POSSIBILITY OF SUCH DAMAGE.
;
new_stack:
dw INITSEG << 4
dw 0
old_stack:
old_sp: dw 0
old_ss: dw 0
stk_rta dw 0
stk_sax dw 0
stk_enter:
pop word [cs:stk_rta]
mov [cs:stk_sax], ax
mov [cs:old_sp], sp
mov [cs:old_ss], ss
mov ax, [cs:new_stack + 2]
mov ss, ax
mov sp, [cs:new_stack ]
mov ax, [cs:stk_sax]
push word [cs:stk_rta]
ret
stk_leave:
pop word [cs:stk_rta]
mov [cs:stk_sax], ax
mov ax, [cs:old_stack + 2]
mov ss, ax
mov sp, [cs:old_stack ]
mov ax, [cs:stk_sax]
push word [cs:stk_rta]
ret
stk_init:
ret
|
;-----------------------------------------------------
; File name: example.asm
; Study by Jedi Chou, 2020.1.1 20:37
;-----------------------------------------------------
;-----------------------------------------------------
; Define instrcut set and program execute model
.386
.model flat
;-----------------------------------------------------
; Declare ExitProcess function and parameters format
ExitProcess proto near32 stdcall, dwExitCode:DWORD
;-----------------------------------------------------
; Define stack length
.stack 4096
;-----------------------------------------------------
; Define two variables at memory
.data
number DWORD -105
sum DWORD ?
.code
_start:
mov eax, number
add eax, 158
mov sum, eax
invoke ExitProcess, 0
public _start
end _start
|
; A213367: Numbers that are not squares of primes.
; 1,2,3,5,6,7,8,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70
mov $1,$0
sub $1,1
mov $2,$1
mov $3,3
lpb $1
add $0,1
add $2,3
mov $4,1
add $4,$3
mul $4,2
trn $2,$4
add $2,1
mov $1,$2
sub $2,3
add $3,4
lpe
add $0,1
|
BITS 16
org 0x7c00
start:
mov eax, 0
mov ebx, 0x10
mov ecx, 0x11
call 0x7C0:0x2D ; eax: 0x10 after addEbx
mov edx, 0x12
call [indirect] ; eax: 0x21 after addEcx
add eax, edx ; eax: 0x33
call far [farAddr] ; eax: 43 after addEbx
jmp 0:0
addEbx:
add eax, ebx
retf
addEcx:
add eax, ecx
ret
indirect:
dd addEcx
farAddr:
dw 0x7C0 ; Seg
dd 0x2D ; Offset |
lda {m1}+1
bne {la1}
sty $ff
lda {m1}
cmp $ff
bcs {la1}
!: |
; $Id: bs3-cmn-SwitchToRing1.asm $
;; @file
; BS3Kit - Bs3SwitchToRing1
;
;
; Copyright (C) 2007-2017 Oracle Corporation
;
; This file is part of VirtualBox Open Source Edition (OSE), as
; available from http://www.virtualbox.org. This file is free software;
; you can redistribute it and/or modify it under the terms of the GNU
; General Public License (GPL) as published by the Free Software
; Foundation, in version 2 as it comes in the "COPYING" file of the
; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
;
; The contents of this file may alternatively be used under the terms
; of the Common Development and Distribution License Version 1.0
; (CDDL) only, as it comes in the "COPYING.CDDL" file of the
; VirtualBox OSE distribution, in which case the provisions of the
; CDDL are applicable instead of those of the GPL.
;
; You may elect to license modified versions of this file under the
; terms and conditions of either the GPL or the CDDL or both.
;
;*********************************************************************************************************************************
;* Header Files *
;*********************************************************************************************************************************
%include "bs3kit-template-header.mac"
;*********************************************************************************************************************************
;* External Symbols *
;*********************************************************************************************************************************
BS3_EXTERN_CMN_FAR Bs3SwitchToRingX
TMPL_BEGIN_TEXT
;;
; @cproto BS3_DECL(void) Bs3SwitchToRing1(void);
;
; @remarks Does not require 20h of parameter scratch space in 64-bit mode.
; @uses No GPRs.
;
BS3_PROC_BEGIN_CMN Bs3SwitchToRing1, BS3_PBC_HYBRID_0_ARGS
%if TMPL_BITS == 64
push rcx
sub rsp, 20h
mov ecx, 1
mov [rsp], rcx
call Bs3SwitchToRingX
add rsp, 20h
pop rcx
%else
push 1
TONLY16 push cs
call Bs3SwitchToRingX
add xSP, xCB
%endif
BS3_HYBRID_RET
BS3_PROC_END_CMN Bs3SwitchToRing1
|
/*ckwg +5
* Copyright 2012-2014 by Kitware, Inc. All Rights Reserved. Please refer to
* KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
* Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
*/
#include "file_format_xgtf.h"
#include <vul/vul_reg_exp.h>
#include <vgl/vgl_point_2d.h>
#include <tinyxml.h>
#include <boost/lexical_cast.hpp>
#include <track_oracle/utils/tokenizers.h>
#include <track_oracle/utils/logging_map.h>
#include <track_oracle/file_formats/track_xgtf/track_xgtf.h>
#include <track_oracle/aries_interface/aries_interface.h>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstdio>
#include <utility>
#include <vital/logger/logger.h>
static kwiver::vital::logger_handle_t main_logger( kwiver::vital::get_logger( __FILE__ ) );
using std::multimap;
using std::pair;
using std::map;
using std::string;
using std::make_pair;
using std::istringstream;
using std::vector;
namespace // start of anonymous namespace
{
enum xgtf_style { ACTIVITY, APPEARANCE };
typedef unsigned int activity_type;
//
// redefinition of tinyxml elements for style consistency
//
typedef TiXmlNode xml_node_t;
typedef TiXmlElement xml_element_t;
typedef TiXmlDocument xml_document_t;
typedef TiXmlHandle xml_handle_t;
//
// local structs for viper's activity maps
//
typedef multimap< pair<unsigned int, unsigned int>, activity_type > viper_activity_list_t;
typedef multimap< pair<unsigned int, unsigned int>, activity_type >::iterator viper_activity_list_it;
typedef multimap< pair<unsigned int, unsigned int>, activity_type >::const_iterator viper_activity_list_c_it;
//
// pair up the nodes with their "name" attributes -- probably an easier way
// to do this in tinyXML?
//
typedef map< string, xml_element_t* > named_nodes_t;
typedef map< string, xml_element_t* >::const_iterator named_nodes_c_it;
//
// build up a map of the immediate children of this node,
// cast to elements, and uniquely identified by name
//
bool
build_named_node_map( xml_node_t* node,
named_nodes_t& named_nodes )
{
xml_node_t* xml_child_node = 0;
while ( ( xml_child_node = node->IterateChildren( xml_child_node ) ) )
{
// it's an element?
xml_element_t* e = xml_child_node->ToElement();
if ( !e )
{
LOG_ERROR( main_logger, "Error reading line " << xml_child_node->Row()
<< " could not be converted into an element" );
return false;
}
// it has a name attribute?
const char* attr_name = e->Attribute( "name" );
if ( !attr_name )
{
LOG_ERROR( main_logger, "Error reading line " << xml_child_node->Row()
<< " has no 'name' attribute" );
return false;
}
// it's a unique name?
named_nodes_c_it probe = named_nodes.find( attr_name );
if ( probe != named_nodes.end() )
{
LOG_ERROR( main_logger, "Error reading line " << xml_child_node->Row()
<< " has duplicate '" << attr_name << "' attributes" );
return false;
}
// remember it!
named_nodes.insert( make_pair( attr_name, e ) );
}
return true;
}
//
// Given a framespan "m:n", e.g. "102:694", parse out the start and end frames
//
bool
parse_framespan( const string& framespan_str,
pair<unsigned int, unsigned int>& span )
{
istringstream iss( framespan_str );
char colon, junk;
bool okay = false;
if ( iss >> span.first >> colon >> span.second )
{
if ( !(iss >> junk) != 0 )
{
okay = true;
}
}
if ( !okay )
{
LOG_ERROR( main_logger, "Couldn't parse framespan '" << framespan_str << "'" );
}
return okay;
}
//
// Given a framespan "m:n", e.g. "102:694", parse out the start and end frames
// This time, allow for multiple sets: "a:b c:d ..."
//
bool
parse_framespan_set( const string& framespan_str,
vector< pair<unsigned int, unsigned int> > & span_set )
{
istringstream iss( framespan_str );
char colon;
bool okay = false;
pair< unsigned int, unsigned int> span;
while ( iss >> span.first >> colon >> span.second )
{
span_set.push_back( span );
okay = true;
}
if ( !okay )
{
LOG_ERROR( main_logger, "Couldn't parse framespan '" << framespan_str << "'" );
}
return okay;
}
//
// Given a bounding box node, e.g.
// <data:bbox framespan="2015:2015" height="22" width="20" x="322" y="263"/>
// ... extract the frame span and the bounding box.
//
bool
extract_bounding_box( xml_node_t* xml_node,
pair< unsigned int, unsigned int >& span,
vgl_box_2d< double >& box )
{
xml_element_t* e = xml_node->ToElement();
if ( !e )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't convert xmlnode to element?" );
return false;
}
if ( !parse_framespan( e->Attribute( "framespan" ), span ) )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't extract framespan?" );
return false;
}
int h, w, x, y;
if ( e->QueryIntAttribute( "height", &h ) != TIXML_SUCCESS )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't extract height?" );
return false;
}
if ( e->QueryIntAttribute( "width", &w ) != TIXML_SUCCESS )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't extract width?" );
return false;
}
if ( e->QueryIntAttribute( "x", &x ) != TIXML_SUCCESS )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't extract x?" );
return false;
}
if ( e->QueryIntAttribute( "y", &y ) != TIXML_SUCCESS )
{
LOG_ERROR( main_logger, "extract_bounding_box: Couldn't extract y?" );
return false;
}
box.set_min_point( vgl_point_2d< double >( x, y ) );
box.set_max_point( vgl_point_2d< double >( x+w, y+h ) );
return true;
}
//
// given an occlusion node, e.g.
// <data:fvalue framespan="61:277" value="1.0"/>
// ...extract the frame span and the occlusion value.
//
bool
extract_occlusion( xml_node_t* xml_node,
pair< unsigned int, unsigned int >& span,
double& occlusion_value )
{
xml_element_t* e = xml_node->ToElement();
if ( !e )
{
LOG_ERROR( main_logger, "extract_occlusion: Couldn't convert xmlnode to element?" );
return false;
}
if ( !parse_framespan( e->Attribute("framespan"), span ) )
{
LOG_ERROR( main_logger, "extract_occlusion: Couldn't extract framespan?" );
return false;
}
if ( e->QueryDoubleAttribute( "value", &occlusion_value ) != TIXML_SUCCESS )
{
LOG_ERROR( main_logger, "extract_occlusion: Couldn't extract value?" );
return false;
}
return true;
}
//
// named_nodes is a map of the XML nodes which are children of
// this_track. Extract the "Location" nodes and "Occlusion"
// nodes, and associate the frame ID, bounding box, and optional
// occlusion values with the frames.
//
bool
extract_viper_frame_data( const named_nodes_t& named_nodes,
::kwiver::track_oracle::track_handle_type& this_track,
map< unsigned int, ::kwiver::track_oracle::frame_handle_type>& xgtf_frame_map )
{
static ::kwiver::track_oracle::track_xgtf_type xgtf_schema;
xgtf_frame_map.clear();
// first, pull the bounding boxes from "Location"
named_nodes_c_it probe = named_nodes.find( "Location" );
if ( probe == named_nodes.end() )
{
LOG_ERROR( main_logger, "Couldn't find Location node?" );
return false;
}
vgl_box_2d< double > box;
pair< unsigned int, unsigned int > span;
xml_node_t* xml_location_node = probe->second;
xml_node_t* xml_bbox_node = 0;
while ( ( xml_bbox_node = xml_location_node->IterateChildren( xml_bbox_node ) ) )
{
// extract the bounding box from the XML
if ( !extract_bounding_box( xml_bbox_node, span, box ) )
{
return false;
}
// create a box for each frame in the span
for ( unsigned frame = span.first; frame <= span.second; ++frame )
{
::kwiver::track_oracle::frame_handle_type frame_handle = xgtf_schema( this_track ).create_frame();
xgtf_schema[ frame_handle ].bounding_box() = box;
xgtf_schema[ frame_handle ].frame_number() = frame;
xgtf_frame_map[ frame ] = frame_handle;
}
}
// If "Occlusion" is present, read and associate with frames
probe = named_nodes.find( "Occlusion" );
if ( probe != named_nodes.end() )
{
xml_node_t* xml_occlusion_node = probe->second;
xml_node_t* xml_fval_node = 0;
double occ_value;
while ( ( xml_fval_node = xml_occlusion_node->IterateChildren( xml_fval_node ) ) )
{
// extract span and occlusion
if ( !extract_occlusion( xml_fval_node, span, occ_value ))
{
return false;
}
// associate it with the frames. Only set occlusions where there are
// bounding boxes (assuming the MITRE annotators were... non-specific.)
for ( unsigned int frame = span.first; frame <= span.second; ++frame )
{
map< unsigned, ::kwiver::track_oracle::frame_handle_type >::const_iterator frame_it = xgtf_frame_map.find( frame );
if ( frame_it != xgtf_frame_map.end() )
{
xgtf_schema[ frame_it->second ].occlusion() = occ_value;
}
}
}
}
return true;
}
//
// Assume that all children of the sourcefile which are not "Location" or
// "Occlusion" or any of the other Not-An-Activity-Tags are activities, e.g.
//
// <attribute name="Standing">
// <data:bvalue framespan="61:1715" value="true"/>
// <data:bvalue framespan="1716:2016" value="false"/>
// </attribute>
//
// ... Associate the "true" framespans with their activities. No frame should
// have more than one activity. If a frame has no activity... warn?
// CHECK for more than one activity!
// clip to object span frame values
//
bool
extract_viper_activities( const named_nodes_t& named_nodes,
viper_activity_list_t& m,
const pair< unsigned int, unsigned int>& object_span,
unsigned int /*viperID*/,
bool promote_pvmoving,
xgtf_style style,
::kwiver::logging_map_type& warnings )
{
// get the current string-to-index map for VIRAT
size_t PERSON_MOVING_INDEX = ::kwiver::track_oracle::aries_interface::activity_to_index( "PersonMoving" );
size_t VEHICLE_MOVING_INDEX = ::kwiver::track_oracle::aries_interface::activity_to_index( "VehicleMoving" );
// start walking down the nodes
for ( named_nodes_c_it n = named_nodes.begin(); n != named_nodes.end(); ++n )
{
if ( n->first == "Location" ) continue;
if ( n->first == "Occlusion" ) continue;
if ( n->first == "Event-Related Occlusion" ) continue;
if ( n->first == "FOV" ) continue;
if ( n->first == "Wavelength" ) continue;
// assume it's an activity
string activity_name = n->first;
xml_node_t* xml_bvalue_node = 0;
while ( ( xml_bvalue_node = n->second->IterateChildren( xml_bvalue_node ) ) )
{
xml_element_t* e = xml_bvalue_node->ToElement();
if ( !e )
{
LOG_ERROR( main_logger, "extract_viper_activities: couldn't convert " << activity_name
<< " bvalues to element?" );
return false;
}
// can we convert it to an activity name?
// check for appearance-based annotations
if ( style == APPEARANCE )
{
if ( activity_name.find( "suv" ) != string::npos ||
activity_name.find( "pickup" ) != string::npos ||
activity_name.find( "car" ) != string::npos ||
activity_name.find( "vehicle" ) != string::npos )
{
activity_name = "VEHICLE_MOVING";
}
else if ( activity_name.find( "person" ) != string::npos )
{
activity_name = "PERSON_MOVING";
}
}
else
{
// special cases for the, um, idiosyncrasies of other performers
// in choosing labels for annotations
if ( activity_name == "Entering a Vehicle" ) activity_name = "Getting Into a Vehicle";
else if ( activity_name == "Exiting a Vehicle" ) activity_name = "Getting Out of a Vehicle";
else if ( activity_name == "Person Entering a Facility" ) activity_name = "Entering a Facility";
else if ( activity_name == "Person Exiting a Facility" ) activity_name = "Exiting a Facility";
else if ( activity_name == "Environment-Related Occlusion" ) activity_name = "Not Scored";
// adjust for ARIES aliasing (sigh)
if ( activity_name == "Pulling" ) activity_name = "PERSON_PULLING";
else if ( activity_name == "Carrying Together" ) activity_name = "PERSON_CARRYING_TOGETHER";
else if ( activity_name == "Climbing Atop" ) activity_name = "PERSON_CLIMBING_ATOP";
else if ( activity_name == "Driving into a Facility" ) activity_name = "VEHICLE_DRIVING_INTO_A_FACILITY";
else if ( activity_name == "Driving out of a Facility" ) activity_name = "VEHICLE_DRIVING_OUT_OF_A_FACILITY";
else if ( activity_name == "Kicking" ) activity_name = "PERSON_KICKING";
else if ( activity_name == "Laying Wire" ) activity_name = "PERSON_LAYING_WIRE";
else if ( activity_name == "Looping" ) activity_name = "VEHICLE_LOOPING";
else if ( activity_name == "Maintaining Distance" ) activity_name = "VEHICLE_MAINTAINING_DISTANCE";
else if ( activity_name == "Passing" ) activity_name = "VEHICLE_PASSING";
else if ( activity_name == "Pushing" ) activity_name = "PERSON_PUSHING";
else if ( activity_name == "Sitting" ) activity_name = "PERSON_SITTING";
else if ( activity_name == "Throwing" ) activity_name = "PERSON_THROWING";
}
bool skip_flag = false;
if ( activity_name == "No. Vehicle" )
{
skip_flag = true;
}
// For LMCO's VIRAT phase 3 "golden ground truth", there are tags such
// as "Running No Score". We'll skip those AS LONG as the value is false.
{
string no_score_str( " No Score" );
size_t len_an( activity_name.size() ), len_ns( no_score_str.size() );
const string true_str( "true" );
bool ends_in_no_score =
( len_an >= len_ns ) &&
( activity_name.substr( len_an-len_ns, len_ns ) == no_score_str );
bool is_true = ( e->Attribute( "value" ) == true_str );
if ( ends_in_no_score && ( ! is_true ) )
{
// silently skip
skip_flag = true;
}
}
if ( !skip_flag )
{
pair<bool, unsigned int> act_type( false, 0 );
pair<bool, unsigned int> promote_type( false, 0 );
try
{
act_type.second = static_cast<unsigned int>(
::kwiver::track_oracle::aries_interface::activity_to_index( activity_name ) );
// if we get here, it worked
act_type.first = true;
if ( ::kwiver::track_oracle::aries_interface::promote_to_PERSON_MOVING( act_type.second ) )
{
promote_type.first = true;
promote_type.second = PERSON_MOVING_INDEX;
}
else if ( ::kwiver::track_oracle::aries_interface::promote_to_VEHICLE_MOVING( act_type.second ) )
{
promote_type.first = true;
promote_type.second = VEHICLE_MOVING_INDEX;
}
else
{
warnings.add_msg( activity_name + " not promoted" );
}
}
catch ( ::kwiver::track_oracle::aries_interface_exception& aries_type_exception )
{
warnings.add_msg( string( "unrecognized activity: " ) + activity_name + ": " + aries_type_exception.what() );
if ( activity_name == "Bicycling" )
{
promote_type.first = true;
promote_type.second = PERSON_MOVING_INDEX;
}
}
//
// new promotion logic:
// -- only promote if promote_pvmoving is set
if ( promote_pvmoving )
{
// ...in which case, we REPLACE the activity we're reading
// with PVMoving so as ensure that an xgtf with N tracks
// going in has N tracks going out, rather than 2N tracks
// (the "real" track and its PVMoving "shadow") as previously done.
if ( promote_type.first )
{
act_type = promote_type;
}
}
// do we have to do anything?
if ( !act_type.first )
{
// no... nothing to do
continue;
}
// only take the "true" values
const string true_str( "true" );
if ( e->Attribute( "value" ) == true_str )
{
pair< unsigned int, unsigned int > span;
if ( !parse_framespan( e->Attribute("framespan"), span ) )
{
LOG_ERROR( main_logger, "Couldn't parse framespan for " << activity_name << "?" );
return false;
}
if ( span.first < object_span.first )
{
span.first = object_span.first;
}
if ( object_span.second < span.second )
{
span.second = object_span.second;
}
// if defined, add an entry for the base activity
if ( act_type.first )
{
m.insert( make_pair( span, act_type.second ) );
}
} // ...if true
} // ... if not skipped
} // ...for all value nodes
} // ...for all named nodes
return true;
}
xml_node_t*
doc_to_source_node( const string& filename, xml_document_t& doc )
{
xml_node_t* xml_root = doc.RootElement();
if ( !xml_root )
{
LOG_ERROR( main_logger, "Couldn't load root element from '" << filename << "'; skippig" );
return 0;
}
xml_node_t* xml_data = xml_root->FirstChild( "data" );
if ( !xml_data )
{
LOG_ERROR( main_logger, "Couldn't find the 'data' child in '" << filename << "'; skipping" );
return 0;
}
// ...down to the sourcefile node. For now, assume a single sourcefile
xml_node_t* xml_source = xml_data->FirstChild( "sourcefile" );
if ( !xml_source )
{
LOG_ERROR( main_logger, "Couldn't find the 'sourcefile' child in '" << filename << "'; skipping" );
return 0;
}
return xml_source;
}
} // anon
namespace kwiver {
namespace track_oracle {
xgtf_reader_opts&
xgtf_reader_opts
::operator=( const file_format_reader_opts_base& rhs_base )
{
const xgtf_reader_opts* rhs = dynamic_cast<const xgtf_reader_opts*>(&rhs_base);
if (rhs)
{
this->set_promote_pvmoving( rhs->promote_pvmoving );
}
else
{
LOG_WARN(main_logger, "Assigned a non-xgtf options structure to a xgtf options structure: Slicing the class");
}
return *this;
}
bool
file_format_xgtf
::inspect_file( const string& fn ) const
{
vector< string > tokens = xml_tokenizer::first_n_tokens( fn, 10 );
for ( size_t i=0; i<tokens.size(); ++i )
{
if ( tokens[i].find( "<viper" ) != string::npos )
{
return true;
}
}
return false;
}
bool
file_format_xgtf
::read( const string& fn,
track_handle_list_type& tracks ) const
{
logging_map_type warnings( main_logger, KWIVER_LOGGER_SITE );
track_xgtf_type xgtf_schema;
tracks.clear();
// dig through the XML wrappers...
xml_document_t doc( fn.c_str() );
xml_handle_t doc_handle( &doc );
if ( !doc.LoadFile() )
{
LOG_ERROR( main_logger, "TinyXML couldn't load '" << fn << "'; skipping" );
LOG_ERROR( main_logger, "Error description: " << doc.ErrorDesc() );
LOG_ERROR( main_logger, "Error location: row " << doc.ErrorRow() << "; col " << doc.ErrorCol() );
return false;
}
xml_node_t* xml_source = doc_to_source_node( fn, doc );
if ( !xml_source )
{
return false;
}
// does this xml file contain activity or appearance annotations?
xgtf_style style = ACTIVITY;
xml_node_t* xml_viper_object = 0;
while ( ( xml_viper_object = xml_source->IterateChildren( xml_viper_object ) ) )
{
// only interested in Object nodes
const string object_str( "object" );
if ( xml_viper_object->Value() != object_str ) continue;
//
// Consider the VIPER format. Each child of a sourcefile has:
// - attribute ID
// - attribute 'name' (OBJECT or PERSON)
// - attribute framespan, [objM:objN]
// - a set of children tagged 'attribute':
// --- one named 'Location'
// ------ whose children are 'data:bbox' with framespan and bounding box as the attributes
// --- one named 'Occlusion'
// ------ whose children are 'data:fvalue' with framespan and occluded value as the attributes
// --- one for each of the 20 activities
// ------ whose children are 'data:bvalue' with framespan [actM:actN] and true/false as the attributes
//
// At any given frame, exactly one of the bvalues should be true.
//
// We'll split out tracks based on the framespan of the bvalues,
// keeping in mind that each bvalue should be clipped by the object framespan.
//
// =====
// 1) break out the invariant id and 'class' (object/person).
// =====
xml_element_t* xmle = xml_viper_object->ToElement();
if ( !xmle ) return false;
int viperID = -1;
if ( xmle->QueryIntAttribute( "id", &viperID ) != TIXML_SUCCESS ) return false;
string viper_class( xmle->Attribute( "name" ));
if ( viper_class.empty() ) return false;
// special-case for "augmented" performer annotations
if ( viper_class == "PERSON-VEHICLE" ) viper_class = "PERSON";
if ( viper_class == "PERSON-FACILITY" ) viper_class = "PERSON";
// determine if gt is in appearance form divided into static and moving categories
if ( viper_class == "MOVER" ||
viper_class == "STATIC" )
{
style = APPEARANCE;
}
// it contains activities so it should now be either "PERSON" or "VEHICLE"
// ...or, now with AFRL's ESC evaluation XGTF, No_Annotation_Zone or
// Environment_Induced_Movement
else if ( ( viper_class != "PERSON" ) &&
( viper_class != "VEHICLE" ) &&
( viper_class != "No_Annotation_Zone" ) &&
( viper_class != "Environment_Induced_Movement" ) )
{
LOG_ERROR( main_logger, "XGTF reader: file '" << fn << "' row " << xmle->Row()
<< " track " << viperID << ":unknown class '" << viper_class << "'" );
return false;
}
// =====
// 2) need to pull the object framespan for clipping
// =====
//
// framespan is REQUIRED
if ( !xmle->Attribute( "framespan" ) )
{
LOG_WARN( main_logger, "xgtf_reader: viper_class " << viper_class << " has no framespan near row "
<< xmle->Row() << "? Skipping" );
continue;
}
pair<unsigned int, unsigned int> object_span;
// annoyingly, framespans may be multiple sets: "a:b c:d e:f ..."
// coalesce into the first and last elements
vector< pair<unsigned int, unsigned int> > objectspan_set;
if ( !parse_framespan_set( xmle->Attribute( "framespan" ), objectspan_set ) )
{
LOG_ERROR( main_logger, "...while reading '" << fn << "'" );
return false;
}
object_span.first = objectspan_set.front().first;
object_span.second = objectspan_set.back().second;
// =====
// 3) Gather all the children nodes up: one "location", one "occlusion"
// all the activities
// =====
named_nodes_t named_nodes;
if ( !build_named_node_map( xml_viper_object, named_nodes ) )
{
LOG_ERROR( main_logger, "...while reading '" << fn << "'" );
return false;
}
// =====
// 4) From the children nodes of this (track) node, generate the list of frames
// =====
// read in the (possibly multi-activity) xgtf_track
track_handle_type xgtf_track = xgtf_schema.create();
map< unsigned, frame_handle_type > xgtf_frame_map;
if ( !extract_viper_frame_data( named_nodes, xgtf_track, xgtf_frame_map ) )
{
LOG_ERROR( main_logger, "...while reading '" << fn << "'" );
return false;
}
// =====
// 5) Split into per-activity framespans, clipped by the object framespan
// =====
viper_activity_list_t activities;
if ( !extract_viper_activities( named_nodes,
activities,
object_span,
viperID,
this->opts.promote_pvmoving,
style,
warnings ) )
{
LOG_ERROR( main_logger, "...while reading '" << fn << "'" );
return false;
}
// =====
// 6) For each activity, create a track
// =====
::kwiver::track_oracle::track_xgtf_type dst_schema;
for ( viper_activity_list_c_it act_it = activities.begin(); act_it != activities.end(); ++act_it )
{
//
// The VIPER GUI seems prone to accidentally creating single-frame tracks.
// Skip them here.
//
if ( act_it->first.first == act_it->first.second )
{
continue;
}
track_handle_type this_track = xgtf_schema.create();
xgtf_schema( this_track ).activity() = act_it->second;
xgtf_schema( this_track ).activity_probability() = 1.0;
xgtf_schema( this_track ).frame_span() = act_it->first;
// No enum yet for PVO, store the string
xgtf_schema( this_track ).type() = viper_class;
xgtf_schema( this_track ).external_id() = viperID;
// copy over the frame data
for ( unsigned int frame = act_it->first.first; frame <= act_it->first.second; ++frame )
{
// look up the frame in the overall xgtf track
map< unsigned, frame_handle_type >::const_iterator probe = xgtf_frame_map.find( frame );
if ( probe == xgtf_frame_map.end() )
{
const string msg = "frame with no data";
bool emit_warning = warnings.add_msg( msg );
if ( emit_warning )
{
LOG_WARN( main_logger, "xgtf_reader: "
<< "id: " << viperID << " event: " << act_it->second
<< " frame: " << frame << " (framespan " << act_it->first.first
<< ":" << act_it->first.second << ") has no data? "
<< "(warning only printed once) Skipping..." );
}
continue;
}
const frame_handle_type& lookup_frame = probe->second;
frame_handle_type this_frame = xgtf_schema( this_track ).create_frame();
dst_schema[ this_frame ].bounding_box() = xgtf_schema[ lookup_frame ].bounding_box();
dst_schema[ this_frame ].frame_number() = xgtf_schema[ lookup_frame ].frame_number();
if ( dst_schema[ lookup_frame ].occlusion.exists() )
{
dst_schema[ this_frame ].occlusion() = xgtf_schema[ lookup_frame ].occlusion();
}
}
// add the track to the return set
tracks.push_back( this_track );
}
// 7) Delete the original track we saved in track_oracle...
// double-check xgtf(this_track).remove_me();
} // ...for each sourcefile
if ( !warnings.empty() )
{
LOG_INFO( main_logger, "xgtf_reader: Warnings from loading '" << fn << "':");
warnings.dump_msgs();
LOG_INFO( main_logger, "xgtf_reader: end of warnings" );
}
// all done!
return true;
}
} // ...track_oracle
} // ...kwiver
|
.size 8000
.text@48
jp lstatint
.text@100
jp lbegin
.data@143
c0
.text@150
lbegin:
ld a, 00
ldff(ff), a
ld a, 30
ldff(00), a
ld a, 01
ldff(4d), a
stop, 00
ld b, 91
call lwaitly_b
ld a, b1
ldff(40), a
ld a, 07
ldff(4b), a
ld c, 41
ld b, 03
lbegin_waitm3:
ldff a, (c)
and a, b
cmp a, b
jrnz lbegin_waitm3
ld a, 20
ldff(c), a
ld a, 02
ldff(ff), a
ei
.text@1000
lstatint:
nop
.text@101c
ld a, 91
ldff(40), a
ld a, b1
ldff(40), a
.text@1075
ldff a, (c)
and a, b
jp lprint_a
.text@7000
lprint_a:
push af
ld b, 91
call lwaitly_b
xor a, a
ldff(40), a
pop af
ld(9800), a
ld bc, 7a00
ld hl, 8000
ld d, a0
lprint_copytiles:
ld a, (bc)
inc bc
ld(hl++), a
dec d
jrnz lprint_copytiles
ld a, c0
ldff(47), a
ld a, 80
ldff(68), a
ld a, ff
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
xor a, a
ldff(69), a
ldff(69), a
ldff(43), a
ld a, 91
ldff(40), a
lprint_limbo:
jr lprint_limbo
.text@7400
lwaitly_b:
ld c, 44
lwaitly_b_loop:
ldff a, (c)
cmp a, b
jrnz lwaitly_b_loop
ret
.data@7a00
00 00 7f 7f 41 41 41 41
41 41 41 41 41 41 7f 7f
00 00 08 08 08 08 08 08
08 08 08 08 08 08 08 08
00 00 7f 7f 01 01 01 01
7f 7f 40 40 40 40 7f 7f
00 00 7f 7f 01 01 01 01
3f 3f 01 01 01 01 7f 7f
00 00 41 41 41 41 41 41
7f 7f 01 01 01 01 01 01
00 00 7f 7f 40 40 40 40
7e 7e 01 01 01 01 7e 7e
00 00 7f 7f 40 40 40 40
7f 7f 41 41 41 41 7f 7f
00 00 7f 7f 01 01 02 02
04 04 08 08 10 10 10 10
00 00 3e 3e 41 41 41 41
3e 3e 41 41 41 41 3e 3e
00 00 7f 7f 41 41 41 41
7f 7f 01 01 01 01 7f 7f
|
lda {c1},y
cmp #<{c2}
lda {c1}+1,y
sbc #>{c2}
bvc !+
eor #$80
!:
bmi {la1} |
#include "LED.h"
#include <iostream>
LED::LED(const uint32_t id, const uint32_t pin)
{
this->m_virtual_device_id = id;
this->led_controller = new HardwareController(pin,HardwareController::IO_MODE::OUTPUT);
this->register_hardware_controller(*this->led_controller);
this->register_network_event_handler([](std::string event,VirtualDevice* vd)
{
if(event=="LED") {
LED *led = dynamic_cast<LED *>(vd);
led->toggle();
}
});
this->start_vd_io_dispatcher();
this->start_vd_network_dispatcher();
}
bool LED::toggle()
{
this->status = !this->status;
if(this->status)
{
this->led_controller->set_pin_level(1);
} else {
this->led_controller->set_pin_level(0);
}
return this->status;
}
void LED::turn_on()
{
this->status = true;
this->led_controller->set_pin_level(1);
}
void LED::turn_off()
{
this->status = false;
this->led_controller->set_pin_level(0);
}
|
;--------------------------------------------------------
; File Created by SDCC : free open source ANSI-C Compiler
; Version 4.0.0 #11528 (MINGW64)
;--------------------------------------------------------
; MODULE 07_function
.optsdcc -mgbz80
; Generated using the rgbds tokens.
; We have to define these here as sdcc doesn't make them global by default
GLOBAL __mulschar
GLOBAL __muluchar
GLOBAL __mulint
GLOBAL __divschar
GLOBAL __divuchar
GLOBAL __divsint
GLOBAL __divuint
GLOBAL __modschar
GLOBAL __moduchar
GLOBAL __modsint
GLOBAL __moduint
GLOBAL __mullong
GLOBAL __modslong
GLOBAL __divslong
GLOBAL banked_call
GLOBAL banked_ret
;--------------------------------------------------------
; Public variables in this module
;--------------------------------------------------------
GLOBAL _variable
GLOBAL _list_i_limit
GLOBAL _list_i
;--------------------------------------------------------
; special function registers
;--------------------------------------------------------
;--------------------------------------------------------
; ram data
;--------------------------------------------------------
SECTION "src/07-function.c_DATA",BSS
;--------------------------------------------------------
; absolute external ram data
;--------------------------------------------------------
SECTION "DABS (ABS)",CODE
;--------------------------------------------------------
; global & static initialisations
;--------------------------------------------------------
SECTION "HOME",CODE
SECTION "GSINIT",CODE
SECTION "GSFINAL",CODE
SECTION "GSINIT",CODE
;--------------------------------------------------------
; Home
;--------------------------------------------------------
SECTION "src/07-function.c_HOME",HOME
SECTION "src/07-function.c_HOME",HOME
;--------------------------------------------------------
; code
;--------------------------------------------------------
SECTION "src/07-function.c_CODE",CODE
;src/07-function.c:3: void list_i(void){
; ---------------------------------
; Function list_i
; ---------------------------------
_list_i::
;src/07-function.c:6: i = 0;
xor a, a
;src/07-function.c:8: while (i < 10) {
.l00107:
.l00109:
.l00111:
.l00113:
.l00115:
.l00101:
cp a, $0A
jp C, .l00102
jp .l00104
.l00102:
;src/07-function.c:9: j = i;
ld hl, _j
ld [hl], a
;src/07-function.c:10: ++i;
inc a
jp .l00101
.l00103:
.l00104:
;src/07-function.c:12: }
ret
;src/07-function.c:15: void list_i_limit(unsigned char limit){
; ---------------------------------
; Function list_i_limit
; ---------------------------------
_list_i_limit::
;src/07-function.c:18: i = 0;
ld c, $00
;src/07-function.c:20: while (i < limit) {
.l00107:
.l00109:
.l00111:
.l00113:
.l00115:
.l00101:
ld a, c
ld hl, [sp+2]
sub a, [hl]
jp C, .l00102
jp .l00104
.l00102:
;src/07-function.c:21: j = i;
ld hl, _j
ld [hl], c
;src/07-function.c:22: ++i;
inc c
jp .l00101
.l00103:
.l00104:
;src/07-function.c:24: }
ret
;src/07-function.c:28: void variable(void)
; ---------------------------------
; Function variable
; ---------------------------------
_variable::
;src/07-function.c:30: list_i();
call _list_i
;src/07-function.c:31: list_i_limit(123);
ld a, $7B
push af
inc sp
call _list_i_limit
inc sp
;src/07-function.c:33: return;
.l00101:
;src/07-function.c:34: }
ret
SECTION "src/07-function.c_CODE",CODE
SECTION "CABS (ABS)",CODE
|
;/*------------------------------------------------------------*/
;/* filename - sysint.asm */
;/* */
;/* function(s) */
;/* TSystemError member function */
;/*------------------------------------------------------------*/
;
; Turbo Vision - Version 2.0
;
; Copyright (c) 1994 by Borland International
; All Rights Reserved.
;
TITLE SYSINT
IFNDEF __FLAT__
PUBLIC @TSystemError@suspend$qv
PUBLIC @TSystemError@resume$qv
PUBLIC @TSystemError@Int24PMThunk$qv
PUBLIC @TSystemError@setupDPMI$qv
PUBLIC @TSystemError@shutdownDPMI$qv
EXTRN _AuxPrintR : FAR
EXTRN @TSystemError@Int24Regs : INT_REGS
EXTRN @TSystemError@Int24RMThunk : CODEPTR
EXTRN @TSystemError@Int24RMThunkSel : WORD
EXTRN @TSystemError@Int24RMCallback : CODEPTR
EXTRN @TSystemError@ctrlBreakHit : BYTE
EXTRN @TSystemError@saveCtrlBreak : BYTE
EXTRN @TSystemError@sysErrorFunc : CODEPTR
EXTRN @TSystemError@inIDE : BYTE
EXTRN @THardwareInfo@getBiosEquipmentFlag$qi : FAR
EXTRN @THardwareInfo@getBiosSelector$qv : FAR
EXTRN @THardwareInfo@dpmiFlag : BYTE
ENDIF
INCLUDE TV.INC
; JUMPS
IFNDEF __FLAT__
; Keyboard scan codes
scSpaceKey EQU 39H
scInsKey EQU 52H
scDelKey EQU 53H
; Keyboard shift flags
kbShiftKey EQU 03H
kbCtrlKey EQU 04H
kbAltKey EQU 08H
; ROM BIOS workspace
KeyFlags EQU (BYTE PTR 17H)
KeyBufHead EQU (WORD PTR 1AH)
KeyBufTail EQU (WORD PTR 1CH)
KeyBufOrgPtr EQU (WORD PTR 80H)
KeyBufEndPtr EQU (WORD PTR 82H)
; DOS function call classes
cNothing EQU 0 ;No check needed
cName EQU 2 ;Check name at DS:DX
cHandle EQU 4 ;Check handle in BX
cDrive EQU 6 ;Check drive in DL
ENDIF
; Data segment
DATASEG
; Externals
IFNDEF __FLAT__
; Structure definition for calling DPMI function 0300.
INT_REGS STRUC
_di dd ?
_si dd ?
_bp dd ?
dd ?
_bx dd ?
_dx dd ?
_cx dd ?
_ax dd ?
_flags dw ?
_es dw ?
_ds dw ?
_fs dw ?
_gs dw ?
_ip dw ?
_cs dw ?
_sp dw ?
_ss dw ?
INT_REGS ENDS
SaveInt09 DD ? ;Saved INT 09H vector
SaveInt1B DD ? ;Saved INT 1BH vector
SaveInt21 DD ? ;Saved INT 21H vector
SaveInt23 DD ? ;Saved INT 23H vector
SaveInt24 DD ? ;Saved INT 24H vector
SaveInt24R DD ? ;Saved INT 24H realmode vector for DPMI16.
SaveInt10 DD ? ;Saved INT 10H vector
critFlag DW ? ;Critical error code (FF = no error)
critDrive DW ? ;Drive on which critical error occured.
GInt21Stack DB 0400H DUP (0FFH)
OldSS DW ?
OldSP DW ?
NewSS DW SEG GInt21Stack
NewSP DW OFFSET GInt21Stack+03FEH
SavedFlags DW ?
InGInt21 DB 0
ENDIF
; Code segment
CODESEG
IFNDEF __FLAT__
; Keyboard conversion table
KeyConvertTab LABEL BYTE
DB scSpaceKey,kbAltKey
DW 0200H
DB scInsKey,kbCtrlKey
DW 0400H
DB scInsKey,kbShiftKey
DW 0500H
DB scDelKey,kbCtrlKey
DW 0600H
DB scDelKey,kbShiftKey
DW 0700H
KeyConvertCnt EQU ($-KeyConvertTab)/4
; DOS function call class table
FuncClassTab LABEL BYTE
DB cDrive ;36H - Get disk free space
DB cNothing
DB cNothing
DB cName ;39H - Make directory
DB cName ;3AH - Remove directory
DB cName ;3BH - Change directory
DB cName ;3CH - Create file
DB cName ;3DH - Open file
DB cHandle ;3EH - Close file
DB cHandle ;3FH - Read file
DB cHandle ;40H - Write file
DB cName ;41H - Delete file
DB cHandle ;42H - Seek file
DB cName ;43H - Change file attributes
DB cNothing
DB cNothing
DB cNothing
DB cDrive ;47H - Get current directory
DB cNothing
DB cNothing
DB cNothing
DB cName ;4BH - Load or execute program
DB cNothing
DB cNothing
DB cName ;4EH - Find first
DB cNothing
DB cNothing
DB cNothing
DB cNothing
DB cNothing
DB cNothing
DB cNothing
DB cName ;56H - Rename file
DB cHandle ;57H - Get/Set file date and time
; Function check routines table
FuncCheckTab LABEL ARGINT
Dnear CheckNothing
Dnear CheckName
Dnear CheckHandle
Dnear CheckDrive
DataSel DW @data
ENDIF
IFNDEF __FLAT__
; Install system error handlers
@TSystemError@resume$qv PROC ; 32-bit version is in SYSERR.CPP
PUSH SI
PUSH DI
; Save state of break flag and clear it.
MOV AX,3300H
INT 21H
MOV @TSystemError@saveCtrlBreak,DL
MOV AX,3301H
MOV DL,0
INT 21H
; Save & set Int 9 handler.
MOV AX, 3509H
INT 21H
MOV [WORD PTR SaveInt09], BX
MOV [WORD PTR SaveInt09+2], ES
; If running inside the DOS IDE, do not install Int 9 handler.
CMP @TSystemError@inIDE,0
JNE @@1
PUSH DS
MOV AX, 2509H
MOV DX, OFFSET Int09Handler
PUSH CS
POP DS
INT 21H
POP DS
; Save & set Int 1B handler.
@@1:
MOV AX, 351BH
INT 21H
MOV [WORD PTR SaveInt1B], BX
MOV [WORD PTR SaveInt1B+2], ES
PUSH DS
MOV AX, 251BH
MOV DX, OFFSET Int1BHandler
PUSH CS
POP DS
INT 21H
; Ensure the Ctrl+Break flag is Off by default.
CALL @THardwareInfo@getBiosSelector$qv
MOV DS, AX
AND BYTE PTR DS:[71H], 7FH
POP DS
; Save Int 21 handler.
MOV AX, 3521H
INT 21H
MOV [WORD PTR SaveInt21], BX
MOV [WORD PTR SaveInt21+2], ES
; DX is the offset of the handler that we'll install. If there is exactly
; one floppy drive, we install a special handler that chains to our global
; Int 21 handler. If there is more than one handler, then we simply install
; the global handler. The global handler chains to the old int 21 vector.
MOV DX, OFFSET GInt21Handler
CALL @THardwareInfo@getBiosEquipmentFlag$qi
AND AX,0C1H
DEC AX
JNE @@1A
MOV DX, OFFSET SDInt21Handler
@@1A: PUSH DS
MOV AX, 2521H
PUSH CS
POP DS
INT 21H
POP DS
; Save & set Int 23 handler.
@@2:
MOV AX, 3523H
INT 21H
MOV [WORD PTR SaveInt23], BX
MOV [WORD PTR SaveInt23+2], ES
PUSH DS
MOV AX, 2523H
MOV DX, OFFSET Int23Handler
PUSH CS
POP DS
INT 21H
POP DS
; Save & set Int 24 handler. This sets the protected mode version
; of the handler if we're running under DPMI16.
MOV AX, 3524H
INT 21H
MOV [WORD PTR SaveInt24], BX
MOV [WORD PTR SaveInt24+2], ES
PUSH DS
MOV AX, 2524H
MOV DX, OFFSET Int24Handler
PUSH CS
POP DS
INT 21H
POP DS
; If we're in DPMI16, we also need to set a real mode handler for
; Int 24.
CMP @THardwareInfo@dpmiFlag, 01H
JNE @@no_real_int24
CALL @TSystemError@installRealInt24$qv
@@no_real_int24:
; Save & set Int 10 handler, Check input status (to force an Int 23 if
; a ctrl-C is in the buffer?) and the reinstall old Int 10 handler.
MOV AX, 3510H
INT 21H
MOV [WORD PTR SaveInt10], BX
MOV [WORD PTR SaveInt10+2], ES
PUSH DS
MOV AX, 2510H
MOV DX, OFFSET Int10Handler
PUSH CS
POP DS
INT 21H
POP DS
MOV AH,0BH
INT 21H
PUSH DS
MOV AX, 2510H
MOV DX, [WORD PTR SaveInt10]
MOV DS, [WORD PTR SaveInt10+2]
INT 21H
POP DS
; Exit...
POP DI
POP SI
RET
@TSystemError@resume$qv endp
@TSystemError@installRealInt24$qv PROC
USES DI
MOV CX, 19H ; Zero out Int24Regs.
PUSH DS ; Assume direction flag is clear!
POP ES
LEA DI, [@TSystemError@Int24Regs]
XOR AX, AX
REP STOSW
MOV WORD PTR [@TSystemError@Int24Regs._ax], 3524H
MOV AX, 0300H
MOV BX, 0021H
XOR CX, CX
LEA DI, [@TSystemError@Int24Regs]
INT 31H
MOV AX, WORD PTR [@TSystemError@Int24Regs._bx]
MOV WORD PTR [SaveInt24R], AX
MOV AX, WORD PTR [@TSystemError@Int24Regs._es]
MOV WORD PTR [SaveInt24R+2], AX
MOV CX, 19H ; Zero out Int24Regs.
LEA DI, [@TSystemError@Int24Regs]
XOR AX, AX
REP STOSW
MOV WORD PTR [@TSystemError@Int24Regs._ax], 2524H
MOV AX, WORD PTR [@TSystemError@Int24RMCallback]
MOV WORD PTR [@TSystemError@Int24Regs._dx], AX
MOV AX, WORD PTR [@TSystemError@Int24RMCallback+2]
MOV WORD PTR [@TSystemError@Int24Regs._ds], AX
MOV AX, 0300H
MOV BX, 0021H
XOR CX, CX
LEA DI, [@TSystemError@Int24Regs]
INT 31H
RET
@TSystemError@installRealInt24$qv ENDP
@TSystemError@removeRealInt24$qv PROC
USES DI
MOV CX, 19H ; zero out INT_REGS structure.
PUSH DS ; assume direction flag is clear!
POP ES
LEA DI, [@TSystemError@Int24Regs]
XOR AX, AX
REP STOSW
MOV BX, WORD PTR [SaveInt24R]
MOV CX, WORD PTR [SaveInt24R+2]
MOV WORD PTR [@TSystemError@Int24Regs._ax], 2524H
MOV WORD PTR [@TSystemError@Int24Regs._dx], BX
MOV WORD PTR [@TSystemError@Int24Regs._ds], CX
MOV AX, 0300H
MOV BX, 0021H
XOR CX, CX
LEA DI, [@TSystemError@Int24Regs]
INT 31H
RET
@TSystemError@removeRealInt24$qv ENDP
; Remove system error handlers
@TSystemError@suspend$qv PROC ; 32-bit version is in SYSERR.CPP
PUSH SI
PUSH DI
; Restore handlers for Int 9, 1B, 21, 23, 24.
PUSH DS
MOV AX, 2509H
MOV DX, [WORD PTR SaveInt09]
MOV DS, [WORD PTR SaveInt09+2]
INT 21H
POP DS
PUSH DS
MOV AX, 251BH
MOV DX, [WORD PTR SaveInt1B]
MOV DS, [WORD PTR SaveInt1B+2]
INT 21H
POP DS
PUSH DS
MOV AX, 2521H
MOV DX, [WORD PTR SaveInt21]
MOV DS, [WORD PTR SaveInt21+2]
INT 21H
POP DS
PUSH DS
MOV AX, 2523H
MOV DX, [WORD PTR SaveInt23]
MOV DS, [WORD PTR SaveInt23+2]
INT 21H
POP DS
PUSH DS
MOV AX, 2524H
MOV DX, [WORD PTR SaveInt24]
MOV DS, [WORD PTR SaveInt24+2]
INT 21H
POP DS
; If we're in DPMI16, we also need to remove the real mode handler for
; Int 24.
CMP @THardwareInfo@dpmiFlag, 01H
JNE @@no_real_int24
CALL @TSystemError@removeRealInt24$qv
@@no_real_int24:
; Restore original state of Ctrl-Break flag.
MOV AX,3301H
MOV DL, @TSystemError@saveCtrlBreak
INT 21H
POP DI
POP SI
RET
@TSystemError@suspend$qv endp
ENDIF
IFNDEF __FLAT__
; INT 09H handler signature
DB 'TVI9'
; INT 09H handler
Int09Handler PROC FAR
PUSH ES
PUSH DS
PUSH DI
PUSH AX
; Get key state information before calling old handler to handle key.
; This is so that if the old handler stuffs a key that we want to alter,
; we can!
CALL @THardwareInfo@getBiosSelector$qv
MOV DS, AX
MOV ES, CS:[DataSel]
MOV DI, DS:[KeyBufTail]
IN AL, 60H
MOV AH, DS:[KeyFlags]
PUSHF
CALL ES:[SaveInt09]
; If key is not being released, exit.
TEST AL, 80H
JNE @@9
; Search key conversion table for a match of the scan code and correct shift
; state.
PUSH SI
PUSH CX
MOV SI, OFFSET CS:KeyConvertTab
MOV CX, KeyConvertCnt
@@1: CMP AL, CS:[SI]
JNE @@2
TEST AH, CS:[SI+1]
JNE @@3
@@2: ADD SI, 4
LOOP @@1
JMP SHORT @@8
; Having found match, if the old handler inserted a keystroke (in which
; case KeyBufTail will be different) just overwrite that keystroke in
; the buffer. If not, then we need to increment the keyboard buffer,
; adjusting for wraparound and possible overflow.
@@3: CMP DI, DS:KeyBufTail
JNE @@5
MOV AX, DI
INC AX
INC AX
CMP AX, DS:[KeyBufEndPtr]
JNE @@4
MOV AX, DS:[KeyBufOrgPtr]
@@4: CMP AX, DS:[KeyBufHead]
JE @@8
MOV DS:[KeyBufTail],AX
MOV DI, AX
; Write our "keystroke" into the buffer.
@@5: MOV AX, CS:[SI+2]
MOV [DI], AX
; Exit.
@@8: POP CX
POP SI
CALL @THardwareInfo@getBiosSelector$qv
MOV DS, AX
TEST BYTE PTR DS:[71H], 80H ; Ctrl+Break hit?
JZ @@9
AND BYTE PTR DS:[71H], 7FH
MOV DS, CS:[DataSel]
MOV @TSystemError@ctrlBreakHit, 1
@@9: POP AX
POP DI
POP DS
POP ES
IRET
Int09Handler ENDP
; INT 1BH handler
Int1BHandler PROC FAR
; Clear Bios Ctrl-Break flag and set Turbo Vision's Ctrl-Break flag.
PUSH DS
PUSH AX
CALL @THardwareInfo@getBiosSelector$qv
MOV DS,AX
AND BYTE PTR DS:[71H],7FH
MOV DS, CS:[DataSel]
MOV @TSystemError@ctrlBreakHit,1
POP AX
POP DS
IRET
Int1BHandler ENDP
; INT 21H handler for all systems. This assists in dealing with critical
; errors.
GInt21Handler PROC FAR
PUSH DS
PUSHF ; Check for re-entrance immediately.
MOV DS, CS:[DataSel]
CLI
CMP [InGInt21], 00H
JNE @@jmpToInt21
INC [InGInt21]
POPF
PUSH AX
PUSH BP
MOV BP, SP
MOV DS, CS:[DataSel]
MOV AX, [BP+4] ; Copy DS from old to new stack.
MOV WORD PTR [GInt21Stack+03FEH], AX
MOV AX, [BP+10] ; These are flags prior to INT 21H.
MOV [SavedFlags], AX ; Store the flags.
POP BP
POP AX
MOV [OldSS], SS
MOV [OldSP], SP
MOV SS, [NewSS]
MOV SP, [NewSP]
POP DS
@@entry:
; Save registers for retry.
PUSH AX
PUSH BX
PUSH CX
PUSH DX
PUSH SI
PUSH DI
PUSH ES
PUSH DS
PUSH BP
; Call old Int 21 handler & clear flag for critical error handler
@@callToInt21:
PUSHF
PUSH DS
PUSH AX ; Save AX for later.
PUSH BP
MOV BP, SP
MOV DS, CS:[DataSel]
MOV AX, [SavedFlags]
MOV [BP+6], AX ; Set the flags that the INT 21H call will get.
POP BP
POP AX
POP DS
PUSH CS
SUB SP, 6
PUSH BP
MOV BP, SP
PUSH DS
PUSH SI
MOV DS, CS:[DataSel]
MOV SI, WORD PTR [SaveInt21]
MOV WORD PTR [critFlag], 0FFH
MOV [BP+2], SI
MOV SI, WORD PTR [SaveInt21+2]
MOV [BP+4], SI
MOV SI, offset @@retFromDOS
MOV [BP+6], SI
POP SI
POP DS
POP BP
RETF
; Since this handler is not reentrant, this part of the code is
; used to jump to the old int 21 handler unconditionally if we
; are already active.
@@jmpToInt21:
POPF
POP DS
SUB SP, 4
PUSH BP
MOV BP, SP
PUSH DS
PUSH SI
MOV DS, CS:[DataSel]
MOV SI, [WORD PTR SaveInt21]
MOV [BP+2], SI
MOV SI, [WORD PTR SaveInt21+2]
MOV [BP+4], SI
POP SI
POP DS
POP BP
RETF
@@retFromDOS:
; Check for critical error during call.
PUSHF
PUSH DS
MOV DS, CS:[DataSel]
CMP WORD PTR [critFlag], 0FFH
JNE @@criticalError
@@done:
; Alter saved flags on stack to reflect new flag settings.
; Note that the stack at this point still has the old DS and Flags and
; DS points to the data segment.
PUSH BP
MOV BP, SP
PUSH AX
MOV AX, [BP+4]
MOV [SavedFlags], AX
POP AX
POP BP
; Switch back to old stack. (hence why we didn't have to clear off
; the other stack.)
MOV SS, [OldSS]
MOV SP, [OldSP]
MOV [InGInt21], 00H
PUSH BP
MOV BP, SP
PUSH AX
MOV AX, [SavedFlags]
MOV [BP+8], AX
; Pass back the DS returned by INT 21H. It may have changed.
MOV AX, WORD PTR [GInt21Stack+03EAH]
MOV [BP+2], AX
POP AX
POP BP
POP DS
IRET
@@criticalError:
; There was a critical error so ask the user for a response.
PUSH AX
PUSH BX
PUSH CX
PUSH DX
PUSH SI
PUSH DI
PUSH ES
MOV AX, [critDrive]
MOV DI, [critFlag]
PUSH AX
PUSH DI
MOV AX, SEG @TSystemError@sysErrorFunc
MOV ES, AX
CALL DWORD PTR ES:[@TSystemError@sysErrorFunc]
ADD SP, 4
OR AX, AX
JE @@retry
POP ES
POP DI
POP SI
POP DX
POP CX
POP BX
POP AX
JMP @@done
@@retry:
; User said retry, so restore the registers to entry conditions.
ADD SP, 12H
POP BP
POP DS
POP ES
POP DI
POP SI
POP DX
POP CX
POP BX
POP AX
JMP @@entry
GInt21Handler ENDP
; INT 21H handler for single drive systems.
SDInt21Handler PROC FAR
PUSHF
STI
CMP AH,36H
JB @@1
CMP AH,57H
JA @@1
PUSH DX
PUSH BX
MOV BL,AH
XOR BH,BH
MOV BL,CS:FuncClassTab[BX-36H]
CALL CS:FuncCheckTab[BX]
POP BX
POP DX
JC @@2
@@1: POPF
JMP GInt21Handler ; Chain to old handler.
@@2: POPF
STI
CMP AH,36H
MOV AX,0FFFFH
JE @@3
MOV AX,5
@@3: STC
RETF 2
SDInt21Handler ENDP
; Check filename
CheckName:
MOV BX,DX
MOV DX,[BX]
AND DL,1FH
DEC DL
CMP DH,':'
JE CheckAbsDrive
JMP SHORT CheckCurDrive
; Check handle
CheckHandle:
MOV BX,SP
MOV BX,SS:[BX+2]
PUSH AX
PUSH DS
MOV AX,4400H
MOV DS, CS:[DataSel]
PUSHF
CALl GInt21Handler
POP DS
POP AX
OR DL,DL
JNS CheckAbsDrive
JMP SHORT CheckNothing
; Check drive
CheckDrive:
DEC DL
JNS CheckAbsDrive
; Check current drive
CheckCurDrive:
PUSH AX
PUSH DS
MOV AH,19H
MOV DS, CS:[DataSel]
PUSHF
CALL GInt21Handler
MOV DL,AL
POP DS
POP AX
; Check absolute drive
; In DL = Drive (0=A, 1=B, etc)
; Out CF = 1 if drive swap failed
CheckAbsDrive:
CMP DL,2
JAE CheckNothing
PUSH DS
PUSH AX
CALL @THardwareInfo@getBiosSelector$qv
MOV DS,AX
PUSH CX
MOV CL, 6
MOV AL, DS:[10h]
SHR AL, CL
POP CX
CMP AL, 1
JAE @@1
MOV AL, DS:[104H]
CMP AL, 0FFH
JE @@1
CMP DL, AL
JE @@1
PUSH ES
PUSH DS
PUSH DI
PUSH SI
PUSH DX
PUSH CX
MOV DS, CS:[DataSel]
PUSH DX
MOV AX, 21
PUSH AX
MOV AX, SEG @TSystemError@sysErrorFunc
MOV ES, AX
CALL DWORD PTR ES:[@TSystemError@sysErrorFunc]
ADD SP, 4
POP CX
POP DX
POP SI
POP DI
POP DS
POP ES
MOV DS:[104H], DL
@@1:
POP AX
POP DS
; No check required
CheckNothing:
CLC
RET
; INT 23H and temporary INT 10H handler
Int10Handler:
Int23Handler:
IRET
; INT 24H handler
Int24Handler PROC FAR
STI ;Enable interrupts
PUSH DS
PUSH DI
PUSH ES
PUSH AX
PUSH BX
PUSH CX
PUSH DX
PUSH BP
PUSH SI
XOR BX, BX ; Get extended error information
MOV AH, 59H
INT 21H
SUB AX, 13H ; Convert extended error code to 00H-14H
; Anything over 14H will display a generic message
; which isn't likely.
MOV DI, AX ; Save the extended error code
POP SI
POP BP
POP DX
POP CX
POP BX
POP AX
POP ES
CMP DI, 09H ;Printer out of paper
JE @@0
TEST AH, 80H ;0 = disk error
JE @@1
MOV DS, BP
TEST BYTE PTR DS:[SI+5], 80H ;Block device gets error 0DH
JE @@1
@@0: MOV AL, 0FEH
@@1: MOV DS, CS:[DataSel]
MOV WORD PTR [critFlag], DI
MOV WORD PTR [critDrive], AX ; AH = 0, AL is drive code.
POP DI
POP DS
MOV AX, 03H ;Fail the error in all cases.
IRET
Int24Handler ENDP
@TSystemError@Int24PMThunk$qv PROC FAR
PUSH SI
PUSH DS
PUSH BP ; Save original BP
XOR AX, AX ; Allocate a descriptor for the device
MOV CX, 1 ; header's segment.
INT 31H
MOV BP, AX ; BP = descriptor for the segment
MOV DX, WORD PTR (INT_REGS PTR ES:[DI]._bp)
MOV CL, 4
SHL DX, CL
XOR CX, CX
MOV BX, BP
MOV AX, 7 ; Set base address (16 * real mode segment addr)
INT 31H
MOV BX, BP
XOR CX, CX
MOV DX, -1
MOV AX, 8 ; Set segment limit (64k)
INT 31H
MOV BX, DI
MOV AX, WORD PTR (INT_REGS PTR ES:[BX]._ax)
MOV DI, WORD PTR (INT_REGS PTR ES:[BX]._di)
MOV SI, WORD PTR (INT_REGS PTR ES:[BX]._si)
PUSHF
CALL Int24Handler
MOV DI, BX
PUSH AX
MOV BX, BP
MOV AX, 1
INT 31h ; Free the descriptor
POP AX
POP BP
POP DS
POP SI
MOV WORD PTR (INT_REGS PTR ES:[DI]._ax), AX
LODSW
MOV WORD PTR (INT_REGS PTR ES:[DI]._ip), AX
LODSW
MOV WORD PTR (INT_REGS PTR ES:[DI]._cs), AX
LODSW
MOV WORD PTR (INT_REGS PTR ES:[DI]._flags), AX
ADD WORD PTR (INT_REGS PTR ES:[DI]._sp), 6
IRET
@TSystemError@Int24PMThunk$qv ENDP
@TSystemError@setupDPMI$qv PROC FAR
USES SI, DI
; Allocate real mode callback address.
PUSH DS
MOV AX, 0303H
PUSH DS
POP ES
MOV DI, OFFSET @TSystemError@Int24Regs
MOV SI, SEG @TSystemError@Int24PMThunk$qv
MOV DS, SI
MOV SI, OFFSET @TSystemError@Int24PMThunk$qv
INT 31H
POP DS
MOV WORD PTR [@TSystemError@Int24RMCallback], DX
MOV WORD PTR [@TSystemError@Int24RMCallback+2], CX
RET
@TSystemError@setupDPMI$qv ENDP
@TSystemError@shutdownDPMI$qv PROC FAR
; Free real mode callback thunk.
MOV AX, 0304H
MOV CX, [@TSystemError@Int24RMCallback+2]
MOV DX, [@TSystemError@Int24RMCallback]
INT 31H
RET
@TSystemError@shutdownDPMI$qv ENDP
ENDIF
END
|
// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#ifdef __INTEL_COMPILER
#pragma warning disable: 2586
#endif
#include "ie_const_infer_holder.hpp"
#include "ie_mul_const_infer.hpp"
#include "ie_add_const_infer.hpp"
#include "ie_div_const_infer.hpp"
#include "ie_const_const_infer.hpp"
#include "ie_shape_const_infer.hpp"
#include "ie_power_const_infer.hpp"
#include "ie_tile_const_infer.hpp"
#include "ie_reshape_const_infer.hpp"
#include "ie_gather_const_infer.hpp"
#include "ie_split_const_infer.hpp"
#include "ie_concat_const_infer.hpp"
#include "ie_in_place_const_infer.hpp"
#include "ie_strided_slice_const_infer.hpp"
#include "ie_fill_const_infer.hpp"
#include "ie_range_const_infer.hpp"
#include <list>
#include <memory>
#include <string>
namespace InferenceEngine {
namespace ShapeInfer {
ConstInferHolder::ImplsHolder::Ptr ConstInferHolder::GetImplsHolder() {
static ImplsHolder::Ptr localHolder;
if (localHolder == nullptr) {
localHolder = std::make_shared<ImplsHolder>();
}
return localHolder;
}
void ConstInferHolder::AddImpl(const std::string& name, const IConstInferImpl::Ptr& impl) {
GetImplsHolder()->list[name] = impl;
}
std::list<std::string> ConstInferHolder::getConstInferTypes() {
std::list<std::string> types;
auto& factories = GetImplsHolder()->list;
for (const auto& factory : factories) {
types.push_back(factory.first);
}
return types;
}
IConstInferImpl::Ptr ConstInferHolder::getConstInferImpl(const std::string& type) {
auto& impls = ConstInferHolder::GetImplsHolder()->list;
if (impls.find(type) != impls.end()) {
return impls[type];
}
return nullptr;
}
REG_CONST_INFER_FOR_TYPE(MulConstInfer, Mul);
REG_CONST_INFER_FOR_TYPE(AddConstInfer, Add);
REG_CONST_INFER_FOR_TYPE(DivConstInfer, Div);
REG_CONST_INFER_FOR_TYPE(ShapeConstInfer, Shape);
REG_CONST_INFER_FOR_TYPE(ConstConstInfer, Const);
REG_CONST_INFER_FOR_TYPE(PowerConstInfer, Power);
REG_CONST_INFER_FOR_TYPE(TileConstInfer, Tile);
REG_CONST_INFER_FOR_TYPE(ReshapeConstInfer, Reshape);
REG_CONST_INFER_FOR_TYPE(GatherConstInfer, Gather);
REG_CONST_INFER_FOR_TYPE(SplitConstInfer, Split);
REG_CONST_INFER_FOR_TYPE(ConcatConstInfer, Concat);
REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Unsqueeze);
REG_CONST_INFER_FOR_TYPE(InPlaceConstInfer, Squeeze);
REG_CONST_INFER_FOR_TYPE(StridedSliceConstInfer, StridedSlice);
REG_CONST_INFER_FOR_TYPE(FillConstInfer, Fill);
REG_CONST_INFER_FOR_TYPE(RangeConstInfer, Range);
} // namespace ShapeInfer
} // namespace InferenceEngine
|
; A256716: a(n) = n*(n+1)*(22*n-19)/6.
; Submitted by Jamie Morken(s1.)
; 0,1,25,94,230,455,791,1260,1884,2685,3685,4906,6370,8099,10115,12440,15096,18105,21489,25270,29470,34111,39215,44804,50900,57525,64701,72450,80794,89755,99355,109616,120560,132209,144585,157710,171606,186295,201799,218140,235340,253421,272405,292314,313170,334995,357811,381640,406504,432425,459425,487526,516750,547119,578655,611380,645316,680485,716909,754610,793610,833931,875595,918624,963040,1008865,1056121,1104830,1155014,1206695,1259895,1314636,1370940,1428829,1488325,1549450,1612226
mov $3,$0
lpb $0
sub $0,1
add $2,$3
add $4,$2
add $1,$4
mov $3,19
lpe
mov $0,$1
|
/*
* Copyright (c) 2015 Cryptonomex, Inc., and contributors.
*
* The MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <graphene/chain/database.hpp>
#include <graphene/chain/fba_accumulator_id.hpp>
#include <graphene/chain/account_object.hpp>
#include <graphene/chain/asset_object.hpp>
#include <graphene/chain/balance_object.hpp>
#include <graphene/chain/block_summary_object.hpp>
#include <graphene/chain/budget_record_object.hpp>
#include <graphene/chain/buyback_object.hpp>
#include <graphene/chain/chain_property_object.hpp>
#include <graphene/chain/committee_member_object.hpp>
#include <graphene/chain/confidential_object.hpp>
#include <graphene/chain/fba_object.hpp>
#include <graphene/chain/global_property_object.hpp>
#include <graphene/chain/market_object.hpp>
#include <graphene/chain/operation_history_object.hpp>
#include <graphene/chain/proposal_object.hpp>
#include <graphene/chain/special_authority_object.hpp>
#include <graphene/chain/transaction_object.hpp>
#include <graphene/chain/vesting_balance_object.hpp>
#include <graphene/chain/withdraw_permission_object.hpp>
#include <graphene/chain/witness_object.hpp>
#include <graphene/chain/witness_schedule_object.hpp>
#include <graphene/chain/worker_object.hpp>
#include <graphene/chain/account_evaluator.hpp>
#include <graphene/chain/asset_evaluator.hpp>
#include <graphene/chain/assert_evaluator.hpp>
#include <graphene/chain/balance_evaluator.hpp>
#include <graphene/chain/committee_member_evaluator.hpp>
#include <graphene/chain/confidential_evaluator.hpp>
#include <graphene/chain/custom_evaluator.hpp>
#include <graphene/chain/market_evaluator.hpp>
#include <graphene/chain/proposal_evaluator.hpp>
#include <graphene/chain/transfer_evaluator.hpp>
#include <graphene/chain/vesting_balance_evaluator.hpp>
#include <graphene/chain/withdraw_permission_evaluator.hpp>
#include <graphene/chain/witness_evaluator.hpp>
#include <graphene/chain/worker_evaluator.hpp>
#include <graphene/chain/protocol/fee_schedule.hpp>
#include <fc/smart_ref_impl.hpp>
#include <fc/uint128.hpp>
#include <fc/crypto/digest.hpp>
#include <boost/algorithm/string.hpp>
namespace graphene { namespace chain {
// C++ requires that static class variables declared and initialized
// in headers must also have a definition in a single source file,
// else linker errors will occur [1].
//
// The purpose of this source file is to collect such definitions in
// a single place.
//
// [1] http://stackoverflow.com/questions/8016780/undefined-reference-to-static-constexpr-char
const uint8_t account_object::space_id;
const uint8_t account_object::type_id;
const uint8_t asset_object::space_id;
const uint8_t asset_object::type_id;
const uint8_t block_summary_object::space_id;
const uint8_t block_summary_object::type_id;
const uint8_t call_order_object::space_id;
const uint8_t call_order_object::type_id;
const uint8_t committee_member_object::space_id;
const uint8_t committee_member_object::type_id;
const uint8_t force_settlement_object::space_id;
const uint8_t force_settlement_object::type_id;
const uint8_t global_property_object::space_id;
const uint8_t global_property_object::type_id;
const uint8_t limit_order_object::space_id;
const uint8_t limit_order_object::type_id;
const uint8_t operation_history_object::space_id;
const uint8_t operation_history_object::type_id;
const uint8_t proposal_object::space_id;
const uint8_t proposal_object::type_id;
const uint8_t transaction_object::space_id;
const uint8_t transaction_object::type_id;
const uint8_t vesting_balance_object::space_id;
const uint8_t vesting_balance_object::type_id;
const uint8_t withdraw_permission_object::space_id;
const uint8_t withdraw_permission_object::type_id;
const uint8_t witness_object::space_id;
const uint8_t witness_object::type_id;
const uint8_t worker_object::space_id;
const uint8_t worker_object::type_id;
void database::initialize_evaluators()
{
_operation_evaluators.resize(255);
register_evaluator<account_create_evaluator>();
register_evaluator<account_update_evaluator>();
register_evaluator<account_upgrade_evaluator>();
register_evaluator<account_whitelist_evaluator>();
register_evaluator<committee_member_create_evaluator>();
register_evaluator<committee_member_update_evaluator>();
register_evaluator<committee_member_update_global_parameters_evaluator>();
register_evaluator<custom_evaluator>();
register_evaluator<asset_create_evaluator>();
register_evaluator<asset_issue_evaluator>();
register_evaluator<asset_reserve_evaluator>();
register_evaluator<asset_update_evaluator>();
register_evaluator<asset_update_bitasset_evaluator>();
register_evaluator<asset_update_feed_producers_evaluator>();
register_evaluator<asset_settle_evaluator>();
register_evaluator<asset_global_settle_evaluator>();
register_evaluator<assert_evaluator>();
register_evaluator<limit_order_create_evaluator>();
register_evaluator<limit_order_cancel_evaluator>();
register_evaluator<call_order_update_evaluator>();
register_evaluator<transfer_evaluator>();
register_evaluator<override_transfer_evaluator>();
register_evaluator<asset_fund_fee_pool_evaluator>();
register_evaluator<asset_publish_feeds_evaluator>();
register_evaluator<proposal_create_evaluator>();
register_evaluator<proposal_update_evaluator>();
register_evaluator<proposal_delete_evaluator>();
register_evaluator<vesting_balance_create_evaluator>();
register_evaluator<vesting_balance_withdraw_evaluator>();
register_evaluator<witness_create_evaluator>();
register_evaluator<witness_update_evaluator>();
register_evaluator<withdraw_permission_create_evaluator>();
register_evaluator<withdraw_permission_claim_evaluator>();
register_evaluator<withdraw_permission_update_evaluator>();
register_evaluator<withdraw_permission_delete_evaluator>();
register_evaluator<worker_create_evaluator>();
register_evaluator<balance_claim_evaluator>();
register_evaluator<transfer_to_blind_evaluator>();
register_evaluator<transfer_from_blind_evaluator>();
register_evaluator<blind_transfer_evaluator>();
register_evaluator<asset_claim_fees_evaluator>();
}
void database::initialize_indexes()
{
reset_indexes();
_undo_db.set_max_size( GRAPHENE_MIN_UNDO_HISTORY );
//Protocol object indexes
add_index< primary_index<asset_index> >();
add_index< primary_index<force_settlement_index> >();
auto acnt_index = add_index< primary_index<account_index> >();
acnt_index->add_secondary_index<account_member_index>();
acnt_index->add_secondary_index<account_referrer_index>();
add_index< primary_index<committee_member_index> >();
add_index< primary_index<witness_index> >();
add_index< primary_index<limit_order_index > >();
add_index< primary_index<call_order_index > >();
auto prop_index = add_index< primary_index<proposal_index > >();
prop_index->add_secondary_index<required_approval_index>();
add_index< primary_index<withdraw_permission_index > >();
add_index< primary_index<vesting_balance_index> >();
add_index< primary_index<worker_index> >();
add_index< primary_index<balance_index> >();
add_index< primary_index<blinded_balance_index> >();
//Implementation object indexes
add_index< primary_index<transaction_index > >();
add_index< primary_index<account_balance_index > >();
add_index< primary_index<asset_bitasset_data_index > >();
add_index< primary_index<simple_index<global_property_object >> >();
add_index< primary_index<simple_index<dynamic_global_property_object >> >();
add_index< primary_index<simple_index<account_statistics_object >> >();
add_index< primary_index<simple_index<asset_dynamic_data_object >> >();
add_index< primary_index<flat_index< block_summary_object >> >();
add_index< primary_index<simple_index<chain_property_object > > >();
add_index< primary_index<simple_index<witness_schedule_object > > >();
add_index< primary_index<simple_index<budget_record_object > > >();
add_index< primary_index< special_authority_index > >();
add_index< primary_index< buyback_index > >();
add_index< primary_index< simple_index< fba_accumulator_object > > >();
}
void database::init_genesis(const genesis_state_type& genesis_state)
{ try {
FC_ASSERT( genesis_state.initial_timestamp != time_point_sec(), "Must initialize genesis timestamp." );
FC_ASSERT( genesis_state.initial_timestamp.sec_since_epoch() % GRAPHENE_DEFAULT_BLOCK_INTERVAL == 0,
"Genesis timestamp must be divisible by GRAPHENE_DEFAULT_BLOCK_INTERVAL." );
FC_ASSERT(genesis_state.initial_witness_candidates.size() > 0,
"Cannot start a chain with zero witnesses.");
FC_ASSERT(genesis_state.initial_active_witnesses <= genesis_state.initial_witness_candidates.size(),
"initial_active_witnesses is larger than the number of candidate witnesses.");
_undo_db.disable();
struct auth_inhibitor {
auth_inhibitor(database& db) : db(db), old_flags(db.node_properties().skip_flags)
{ db.node_properties().skip_flags |= skip_authority_check; }
~auth_inhibitor()
{ db.node_properties().skip_flags = old_flags; }
private:
database& db;
uint32_t old_flags;
} inhibitor(*this);
transaction_evaluation_state genesis_eval_state(this);
flat_index<block_summary_object>& bsi = get_mutable_index_type< flat_index<block_summary_object> >();
bsi.resize(0xffff+1);
// Create blockchain accounts
fc::ecc::private_key null_private_key = fc::ecc::private_key::regenerate(fc::sha256::hash(string("null_key")));
create<account_balance_object>([](account_balance_object& b) {
b.balance = GRAPHENE_MAX_SHARE_SUPPLY;
});
const account_object& committee_account =
create<account_object>( [&](account_object& n) {
n.membership_expiration_date = time_point_sec::maximum();
n.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
n.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
n.owner.weight_threshold = 1;
n.active.weight_threshold = 1;
n.name = "committee-account";
n.statistics = create<account_statistics_object>( [&](account_statistics_object& s){ s.owner = n.id; }).id;
});
FC_ASSERT(committee_account.get_id() == GRAPHENE_COMMITTEE_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "witness-account";
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_WITNESS_ACCOUNT;
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
}).get_id() == GRAPHENE_WITNESS_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "relaxed-committee-account";
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_RELAXED_COMMITTEE_ACCOUNT;
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
}).get_id() == GRAPHENE_RELAXED_COMMITTEE_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "null-account";
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT;
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = 0;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT;
}).get_id() == GRAPHENE_NULL_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "temp-account";
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 0;
a.active.weight_threshold = 0;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_TEMP_ACCOUNT;
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
}).get_id() == GRAPHENE_TEMP_ACCOUNT);
FC_ASSERT(create<account_object>([this](account_object& a) {
a.name = "proxy-to-self";
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = GRAPHENE_NULL_ACCOUNT;
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = 0;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT;
}).get_id() == GRAPHENE_PROXY_TO_SELF_ACCOUNT);
// Create more special accounts
while( true )
{
uint64_t id = get_index<account_object>().get_next_id().instance();
if( id >= genesis_state.immutable_parameters.num_special_accounts )
break;
const account_object& acct = create<account_object>([&](account_object& a) {
a.name = "special-account-" + std::to_string(id);
a.statistics = create<account_statistics_object>([&](account_statistics_object& s){s.owner = a.id;}).id;
a.owner.weight_threshold = 1;
a.active.weight_threshold = 1;
a.registrar = a.lifetime_referrer = a.referrer = account_id_type(id);
a.membership_expiration_date = time_point_sec::maximum();
a.network_fee_percentage = GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
a.lifetime_referrer_fee_percentage = GRAPHENE_100_PERCENT - GRAPHENE_DEFAULT_NETWORK_PERCENT_OF_FEE;
});
FC_ASSERT( acct.get_id() == account_id_type(id) );
remove( acct );
}
// Create core asset
const asset_dynamic_data_object& dyn_asset =
create<asset_dynamic_data_object>([&](asset_dynamic_data_object& a) {
a.current_supply = GRAPHENE_MAX_SHARE_SUPPLY;
});
const asset_object& core_asset =
create<asset_object>( [&]( asset_object& a ) {
a.symbol = GRAPHENE_SYMBOL;
a.options.max_supply = genesis_state.max_core_supply;
a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS;
a.options.flags = 0;
a.options.issuer_permissions = 0;
a.issuer = GRAPHENE_NULL_ACCOUNT;
a.options.core_exchange_rate.base.amount = 1;
a.options.core_exchange_rate.base.asset_id = asset_id_type(0);
a.options.core_exchange_rate.quote.amount = 1;
a.options.core_exchange_rate.quote.asset_id = asset_id_type(0);
a.dynamic_asset_data_id = dyn_asset.id;
});
assert( asset_id_type(core_asset.id) == asset().asset_id );
assert( get_balance(account_id_type(), asset_id_type()) == asset(dyn_asset.current_supply) );
// Create more special assets
while( true )
{
uint64_t id = get_index<asset_object>().get_next_id().instance();
if( id >= genesis_state.immutable_parameters.num_special_assets )
break;
const asset_dynamic_data_object& dyn_asset =
create<asset_dynamic_data_object>([&](asset_dynamic_data_object& a) {
a.current_supply = 0;
});
const asset_object& asset_obj = create<asset_object>( [&]( asset_object& a ) {
a.symbol = "SPECIAL" + std::to_string( id );
a.options.max_supply = 0;
a.precision = GRAPHENE_BLOCKCHAIN_PRECISION_DIGITS;
a.options.flags = 0;
a.options.issuer_permissions = 0;
a.issuer = GRAPHENE_NULL_ACCOUNT;
a.options.core_exchange_rate.base.amount = 1;
a.options.core_exchange_rate.base.asset_id = asset_id_type(0);
a.options.core_exchange_rate.quote.amount = 1;
a.options.core_exchange_rate.quote.asset_id = asset_id_type(0);
a.dynamic_asset_data_id = dyn_asset.id;
});
FC_ASSERT( asset_obj.get_id() == asset_id_type(id) );
remove( asset_obj );
}
chain_id_type chain_id = genesis_state.compute_chain_id();
// Create global properties
create<global_property_object>([&](global_property_object& p) {
p.parameters = genesis_state.initial_parameters;
// Set fees to zero initially, so that genesis initialization needs not pay them
// We'll fix it at the end of the function
p.parameters.current_fees->zero_all_fees();
});
create<dynamic_global_property_object>([&](dynamic_global_property_object& p) {
p.time = genesis_state.initial_timestamp;
p.dynamic_flags = 0;
p.witness_budget = 0;
p.recent_slots_filled = fc::uint128::max_value();
});
FC_ASSERT( (genesis_state.immutable_parameters.min_witness_count & 1) == 1, "min_witness_count must be odd" );
FC_ASSERT( (genesis_state.immutable_parameters.min_committee_member_count & 1) == 1, "min_committee_member_count must be odd" );
create<chain_property_object>([&](chain_property_object& p)
{
p.chain_id = chain_id;
p.immutable_parameters = genesis_state.immutable_parameters;
} );
create<block_summary_object>([&](block_summary_object&) {});
// Create initial accounts
for( const auto& account : genesis_state.initial_accounts )
{
account_create_operation cop;
cop.name = account.name;
cop.registrar = GRAPHENE_TEMP_ACCOUNT;
cop.owner = authority(1, account.owner_key, 1);
if( account.active_key == public_key_type() )
{
cop.active = cop.owner;
cop.options.memo_key = account.owner_key;
}
else
{
cop.active = authority(1, account.active_key, 1);
cop.options.memo_key = account.active_key;
}
account_id_type account_id(apply_operation(genesis_eval_state, cop).get<object_id_type>());
if( account.is_lifetime_member )
{
account_upgrade_operation op;
op.account_to_upgrade = account_id;
op.upgrade_to_lifetime_member = true;
apply_operation(genesis_eval_state, op);
}
}
// Helper function to get account ID by name
const auto& accounts_by_name = get_index_type<account_index>().indices().get<by_name>();
auto get_account_id = [&accounts_by_name](const string& name) {
auto itr = accounts_by_name.find(name);
FC_ASSERT(itr != accounts_by_name.end(),
"Unable to find account '${acct}'. Did you forget to add a record for it to initial_accounts?",
("acct", name));
return itr->get_id();
};
// Helper function to get asset ID by symbol
const auto& assets_by_symbol = get_index_type<asset_index>().indices().get<by_symbol>();
const auto get_asset_id = [&assets_by_symbol](const string& symbol) {
auto itr = assets_by_symbol.find(symbol);
// TODO: This is temporary for handling BTS snapshot
if( symbol == "BTS" )
itr = assets_by_symbol.find(GRAPHENE_SYMBOL);
FC_ASSERT(itr != assets_by_symbol.end(),
"Unable to find asset '${sym}'. Did you forget to add a record for it to initial_assets?",
("sym", symbol));
return itr->get_id();
};
map<asset_id_type, share_type> total_supplies;
map<asset_id_type, share_type> total_debts;
// Create initial assets
for( const genesis_state_type::initial_asset_type& asset : genesis_state.initial_assets )
{
asset_id_type new_asset_id = get_index_type<asset_index>().get_next_id();
total_supplies[ new_asset_id ] = 0;
asset_dynamic_data_id_type dynamic_data_id;
optional<asset_bitasset_data_id_type> bitasset_data_id;
if( asset.is_bitasset )
{
int collateral_holder_number = 0;
total_debts[ new_asset_id ] = 0;
for( const auto& collateral_rec : asset.collateral_records )
{
account_create_operation cop;
cop.name = asset.symbol + "-collateral-holder-" + std::to_string(collateral_holder_number);
boost::algorithm::to_lower(cop.name);
cop.registrar = GRAPHENE_TEMP_ACCOUNT;
cop.owner = authority(1, collateral_rec.owner, 1);
cop.active = cop.owner;
account_id_type owner_account_id = apply_operation(genesis_eval_state, cop).get<object_id_type>();
modify( owner_account_id(*this).statistics(*this), [&]( account_statistics_object& o ) {
o.total_core_in_orders = collateral_rec.collateral;
});
create<call_order_object>([&](call_order_object& c) {
c.borrower = owner_account_id;
c.collateral = collateral_rec.collateral;
c.debt = collateral_rec.debt;
c.call_price = price::call_price(chain::asset(c.debt, new_asset_id),
chain::asset(c.collateral, core_asset.id),
GRAPHENE_DEFAULT_MAINTENANCE_COLLATERAL_RATIO);
});
total_supplies[ asset_id_type(0) ] += collateral_rec.collateral;
total_debts[ new_asset_id ] += collateral_rec.debt;
++collateral_holder_number;
}
bitasset_data_id = create<asset_bitasset_data_object>([&](asset_bitasset_data_object& b) {
b.options.short_backing_asset = core_asset.id;
b.options.minimum_feeds = GRAPHENE_DEFAULT_MINIMUM_FEEDS;
}).id;
}
dynamic_data_id = create<asset_dynamic_data_object>([&](asset_dynamic_data_object& d) {
d.accumulated_fees = asset.accumulated_fees;
}).id;
total_supplies[ new_asset_id ] += asset.accumulated_fees;
create<asset_object>([&](asset_object& a) {
a.symbol = asset.symbol;
a.options.description = asset.description;
a.precision = asset.precision;
string issuer_name = asset.issuer_name;
a.issuer = get_account_id(issuer_name);
a.options.max_supply = asset.max_supply;
a.options.flags = witness_fed_asset;
a.options.issuer_permissions = charge_market_fee | override_authority | white_list | transfer_restricted | disable_confidential |
( asset.is_bitasset ? disable_force_settle | global_settle | witness_fed_asset | committee_fed_asset : 0 );
a.dynamic_asset_data_id = dynamic_data_id;
a.bitasset_data_id = bitasset_data_id;
});
}
// Create initial balances
share_type total_allocation;
for( const auto& handout : genesis_state.initial_balances )
{
const auto asset_id = get_asset_id(handout.asset_symbol);
create<balance_object>([&handout,&get_asset_id,total_allocation,asset_id](balance_object& b) {
b.balance = asset(handout.amount, asset_id);
b.owner = handout.owner;
});
total_supplies[ asset_id ] += handout.amount;
}
// Create initial vesting balances
for( const genesis_state_type::initial_vesting_balance_type& vest : genesis_state.initial_vesting_balances )
{
const auto asset_id = get_asset_id(vest.asset_symbol);
create<balance_object>([&](balance_object& b) {
b.owner = vest.owner;
b.balance = asset(vest.amount, asset_id);
linear_vesting_policy policy;
policy.begin_timestamp = vest.begin_timestamp;
policy.vesting_cliff_seconds = 0;
policy.vesting_duration_seconds = vest.vesting_duration_seconds;
policy.begin_balance = vest.begin_balance;
b.vesting_policy = std::move(policy);
});
total_supplies[ asset_id ] += vest.amount;
}
if( total_supplies[ asset_id_type(0) ] > 0 )
{
adjust_balance(GRAPHENE_COMMITTEE_ACCOUNT, -get_balance(GRAPHENE_COMMITTEE_ACCOUNT,{}));
}
else
{
total_supplies[ asset_id_type(0) ] = GRAPHENE_MAX_SHARE_SUPPLY;
}
const auto& idx = get_index_type<asset_index>().indices().get<by_symbol>();
auto it = idx.begin();
bool has_imbalanced_assets = false;
while( it != idx.end() )
{
if( it->bitasset_data_id.valid() )
{
auto supply_itr = total_supplies.find( it->id );
auto debt_itr = total_debts.find( it->id );
FC_ASSERT( supply_itr != total_supplies.end() );
FC_ASSERT( debt_itr != total_debts.end() );
if( supply_itr->second != debt_itr->second )
{
has_imbalanced_assets = true;
elog( "Genesis for asset ${aname} is not balanced\n"
" Debt is ${debt}\n"
" Supply is ${supply}\n",
("debt", debt_itr->second)
("supply", supply_itr->second)
);
}
}
++it;
}
FC_ASSERT( !has_imbalanced_assets );
// Save tallied supplies
for( const auto& item : total_supplies )
{
const auto asset_id = item.first;
const auto total_supply = item.second;
modify( get( asset_id ), [ & ]( asset_object& asset ) {
modify( get( asset.dynamic_asset_data_id ), [ & ]( asset_dynamic_data_object& asset_data ) {
asset_data.current_supply = total_supply;
} );
} );
}
// Create special witness account
const witness_object& wit = create<witness_object>([&](witness_object& w) {});
FC_ASSERT( wit.id == GRAPHENE_NULL_WITNESS );
remove(wit);
// Create initial witnesses
std::for_each(genesis_state.initial_witness_candidates.begin(), genesis_state.initial_witness_candidates.end(),
[&](const genesis_state_type::initial_witness_type& witness) {
witness_create_operation op;
op.witness_account = get_account_id(witness.owner_name);
op.block_signing_key = witness.block_signing_key;
apply_operation(genesis_eval_state, op);
});
// Create initial committee members
std::for_each(genesis_state.initial_committee_candidates.begin(), genesis_state.initial_committee_candidates.end(),
[&](const genesis_state_type::initial_committee_member_type& member) {
committee_member_create_operation op;
op.committee_member_account = get_account_id(member.owner_name);
apply_operation(genesis_eval_state, op);
});
// Create initial workers
std::for_each(genesis_state.initial_worker_candidates.begin(), genesis_state.initial_worker_candidates.end(),
[&](const genesis_state_type::initial_worker_type& worker)
{
worker_create_operation op;
op.owner = get_account_id(worker.owner_name);
op.work_begin_date = genesis_state.initial_timestamp;
op.work_end_date = time_point_sec::maximum();
op.daily_pay = worker.daily_pay;
op.name = "Genesis-Worker-" + worker.owner_name;
op.initializer = vesting_balance_worker_initializer{uint16_t(0)};
apply_operation(genesis_eval_state, std::move(op));
});
// Set active witnesses
modify(get_global_properties(), [&](global_property_object& p) {
for( uint32_t i = 1; i <= genesis_state.initial_active_witnesses; ++i )
{
p.active_witnesses.insert(witness_id_type(i));
}
});
// Enable fees
modify(get_global_properties(), [&genesis_state](global_property_object& p) {
p.parameters.current_fees = genesis_state.initial_parameters.current_fees;
});
// Create witness scheduler
create<witness_schedule_object>([&]( witness_schedule_object& wso )
{
for( const witness_id_type& wid : get_global_properties().active_witnesses )
wso.current_shuffled_witnesses.push_back( wid );
});
// Create FBA counters
create<fba_accumulator_object>([&]( fba_accumulator_object& acc )
{
FC_ASSERT( acc.id == fba_accumulator_id_type( fba_accumulator_id_transfer_to_blind ) );
acc.accumulated_fba_fees = 0;
#ifdef GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET
acc.designated_asset = GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET;
#endif
});
create<fba_accumulator_object>([&]( fba_accumulator_object& acc )
{
FC_ASSERT( acc.id == fba_accumulator_id_type( fba_accumulator_id_blind_transfer ) );
acc.accumulated_fba_fees = 0;
#ifdef GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET
acc.designated_asset = GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET;
#endif
});
create<fba_accumulator_object>([&]( fba_accumulator_object& acc )
{
FC_ASSERT( acc.id == fba_accumulator_id_type( fba_accumulator_id_transfer_from_blind ) );
acc.accumulated_fba_fees = 0;
#ifdef GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET
acc.designated_asset = GRAPHENE_FBA_STEALTH_DESIGNATED_ASSET;
#endif
});
FC_ASSERT( get_index<fba_accumulator_object>().get_next_id() == fba_accumulator_id_type( fba_accumulator_id_count ) );
debug_dump();
_undo_db.enable();
} FC_CAPTURE_AND_RETHROW() }
} }
|
/*-------------------------------------------------------------------------
* drawElements Quality Program EGL Module
* ---------------------------------------
*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Surface query tests.
*//*--------------------------------------------------------------------*/
#include "teglQuerySurfaceTests.hpp"
#include "teglSimpleConfigCase.hpp"
#include "egluNativeDisplay.hpp"
#include "egluNativeWindow.hpp"
#include "egluNativePixmap.hpp"
#include "egluStrUtil.hpp"
#include "egluUtil.hpp"
#include "tcuTestLog.hpp"
#include "tcuTestContext.hpp"
#include "tcuCommandLine.hpp"
#include "deUniquePtr.hpp"
#include "deRandom.hpp"
#include <string>
#include <vector>
namespace deqp
{
namespace egl
{
using eglu::ConfigInfo;
using tcu::TestLog;
static void logSurfaceAttribute (tcu::TestLog& log, EGLint attribute, EGLint value)
{
const char* name = eglu::getSurfaceAttribName(attribute);
const eglu::SurfaceAttribValueFmt valueFmt (attribute, value);
log << TestLog::Message << " " << name << ": " << valueFmt << TestLog::EndMessage;
}
static void logSurfaceAttributes (tcu::TestLog& log, const tcu::egl::Surface& surface, const EGLint* attributes, int num)
{
for (int ndx = 0; ndx < num; ndx++)
{
const EGLint attrib = attributes[ndx];
logSurfaceAttribute(log, attrib, surface.getAttribute(attrib));
}
}
static void logCommonSurfaceAttributes (tcu::TestLog& log, const tcu::egl::Surface& surface)
{
static const EGLint attributes[] =
{
EGL_CONFIG_ID,
EGL_WIDTH,
EGL_HEIGHT,
EGL_HORIZONTAL_RESOLUTION,
EGL_VERTICAL_RESOLUTION,
EGL_MULTISAMPLE_RESOLVE,
EGL_PIXEL_ASPECT_RATIO,
EGL_RENDER_BUFFER,
EGL_SWAP_BEHAVIOR,
EGL_VG_ALPHA_FORMAT,
EGL_VG_COLORSPACE
};
logSurfaceAttributes(log, surface, attributes, DE_LENGTH_OF_ARRAY(attributes));
}
static void logPbufferSurfaceAttributes (tcu::TestLog& log, const tcu::egl::Surface& surface)
{
static const EGLint attributes[] = {
EGL_LARGEST_PBUFFER,
EGL_TEXTURE_FORMAT,
EGL_TEXTURE_TARGET,
EGL_MIPMAP_TEXTURE,
EGL_MIPMAP_LEVEL,
};
logSurfaceAttributes(log, surface, attributes, DE_LENGTH_OF_ARRAY(attributes));
}
class QuerySurfaceCase : public SimpleConfigCase
{
public:
QuerySurfaceCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds);
void checkCommonAttributes (const tcu::egl::Surface& surface, const ConfigInfo& info);
void checkNonPbufferAttributes (EGLDisplay display, const tcu::egl::Surface& surface);
};
QuerySurfaceCase::QuerySurfaceCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: SimpleConfigCase(eglTestCtx, name, description, configIds)
{
}
void QuerySurfaceCase::checkCommonAttributes (const tcu::egl::Surface& surface, const ConfigInfo& info)
{
tcu::TestLog& log = m_testCtx.getLog();
// Attributes which are common to all surface types
// Config ID
{
const EGLint id = surface.getAttribute(EGL_CONFIG_ID);
if (id != info.configId)
{
log << TestLog::Message << " Fail, config ID " << id << " does not match the one used to create the surface" << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Config ID mismatch");
}
}
// Width and height
{
const EGLint width = surface.getWidth();
const EGLint height = surface.getHeight();
if (width <= 0 || height <= 0)
{
log << TestLog::Message << " Fail, invalid surface size " << width << "x" << height << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid surface size");
}
}
// Horizontal and vertical resolution
{
const EGLint hRes = surface.getAttribute(EGL_HORIZONTAL_RESOLUTION);
const EGLint vRes = surface.getAttribute(EGL_VERTICAL_RESOLUTION);
if ((hRes <= 0 || vRes <= 0) && (hRes != EGL_UNKNOWN && vRes != EGL_UNKNOWN))
{
log << TestLog::Message << " Fail, invalid surface resolution " << hRes << "x" << vRes << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid surface resolution");
}
}
// Pixel aspect ratio
{
const EGLint pixelRatio = surface.getAttribute(EGL_PIXEL_ASPECT_RATIO);
if (pixelRatio <= 0 && pixelRatio != EGL_UNKNOWN)
{
log << TestLog::Message << " Fail, invalid pixel aspect ratio " << surface.getAttribute(EGL_PIXEL_ASPECT_RATIO) << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid pixel aspect ratio");
}
}
// Render buffer
{
const EGLint renderBuffer = surface.getAttribute(EGL_RENDER_BUFFER);
if (renderBuffer != EGL_BACK_BUFFER && renderBuffer != EGL_SINGLE_BUFFER)
{
log << TestLog::Message << " Fail, invalid render buffer value " << renderBuffer << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid render buffer");
}
}
// Multisample resolve
{
const EGLint multisampleResolve = surface.getAttribute(EGL_MULTISAMPLE_RESOLVE);
if (multisampleResolve != EGL_MULTISAMPLE_RESOLVE_DEFAULT && multisampleResolve != EGL_MULTISAMPLE_RESOLVE_BOX)
{
log << TestLog::Message << " Fail, invalid multisample resolve value " << multisampleResolve << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid multisample resolve");
}
if (multisampleResolve == EGL_MULTISAMPLE_RESOLVE_BOX && !(info.surfaceType & EGL_MULTISAMPLE_RESOLVE_BOX_BIT))
{
log << TestLog::Message << " Fail, multisample resolve is reported as box filter but configuration does not support it." << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid multisample resolve");
}
}
// Swap behavior
{
const EGLint swapBehavior = surface.getAttribute(EGL_SWAP_BEHAVIOR);
if (swapBehavior != EGL_BUFFER_DESTROYED && swapBehavior != EGL_BUFFER_PRESERVED)
{
log << TestLog::Message << " Fail, invalid swap behavior value " << swapBehavior << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid swap behavior");
}
if (swapBehavior == EGL_BUFFER_PRESERVED && !(info.surfaceType & EGL_SWAP_BEHAVIOR_PRESERVED_BIT))
{
log << TestLog::Message << " Fail, swap behavior is reported as preserve but configuration does not support it." << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid swap behavior");
}
}
// OpenVG alpha format
{
const EGLint vgAlphaFormat = surface.getAttribute(EGL_VG_ALPHA_FORMAT);
if (vgAlphaFormat != EGL_VG_ALPHA_FORMAT_NONPRE && vgAlphaFormat != EGL_VG_ALPHA_FORMAT_PRE)
{
log << TestLog::Message << " Fail, invalid OpenVG alpha format value " << vgAlphaFormat << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid OpenVG alpha format");
}
if (vgAlphaFormat == EGL_VG_ALPHA_FORMAT_PRE && !(info.surfaceType & EGL_VG_ALPHA_FORMAT_PRE_BIT))
{
log << TestLog::Message << " Fail, OpenVG is set to use premultiplied alpha but configuration does not support it." << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid OpenVG alpha format");
}
}
// OpenVG color space
{
const EGLint vgColorspace = surface.getAttribute(EGL_VG_COLORSPACE);
if (vgColorspace != EGL_VG_COLORSPACE_sRGB && vgColorspace != EGL_VG_COLORSPACE_LINEAR)
{
log << TestLog::Message << " Fail, invalid OpenVG color space value " << vgColorspace << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid OpenVG color space");
}
if (vgColorspace == EGL_VG_COLORSPACE_LINEAR && !(info.surfaceType & EGL_VG_COLORSPACE_LINEAR_BIT))
{
log << TestLog::Message << " Fail, OpenVG is set to use a linear color space but configuration does not support it." << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid OpenVG color space");
}
}
}
void QuerySurfaceCase::checkNonPbufferAttributes (EGLDisplay display, const tcu::egl::Surface& surface)
{
const EGLint uninitializedMagicValue = -42;
tcu::TestLog& log = m_testCtx.getLog();
EGLint value = uninitializedMagicValue;
static const EGLint pbufferAttribs[] = {
EGL_LARGEST_PBUFFER,
EGL_TEXTURE_FORMAT,
EGL_TEXTURE_TARGET,
EGL_MIPMAP_TEXTURE,
EGL_MIPMAP_LEVEL,
};
for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(pbufferAttribs); ndx++)
{
const EGLint attribute = pbufferAttribs[ndx];
const std::string name = eglu::getSurfaceAttribName(pbufferAttribs[ndx]);
eglQuerySurface(display, surface.getEGLSurface(), attribute, &value);
{
const EGLint error = eglGetError();
if (error != EGL_SUCCESS)
{
log << TestLog::Message << " Fail, querying " << name << " from a non-pbuffer surface should not result in an error, received "
<< eglu::getErrorStr(error) << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Illegal error condition");
}
break;
}
// "For a window or pixmap surface, the contents of value are not modified."
if (value != uninitializedMagicValue)
{
log << TestLog::Message << " Fail, return value contents were modified when querying " << name << " from a non-pbuffer surface." << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Illegal modification of return value");
}
}
}
class QuerySurfaceSimpleWindowCase : public QuerySurfaceCase
{
public:
QuerySurfaceSimpleWindowCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: QuerySurfaceCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
const int width = 64;
const int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating window surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
de::UniquePtr<eglu::NativeWindow> window(m_eglTestCtx.createNativeWindow(display.getEGLDisplay(), config, DE_NULL, width, height, eglu::parseWindowVisibility(m_testCtx.getCommandLine())));
tcu::egl::WindowSurface surface(display, eglu::createWindowSurface(m_eglTestCtx.getNativeDisplay(), *window, display.getEGLDisplay(), config, DE_NULL));
logCommonSurfaceAttributes(log, surface);
checkCommonAttributes(surface, info);
checkNonPbufferAttributes(display.getEGLDisplay(), surface);
}
};
class QuerySurfaceSimplePixmapCase : public QuerySurfaceCase
{
public:
QuerySurfaceSimplePixmapCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: QuerySurfaceCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
const int width = 64;
const int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating pixmap surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
de::UniquePtr<eglu::NativePixmap> pixmap (m_eglTestCtx.createNativePixmap(display.getEGLDisplay(), config, DE_NULL, width, height));
tcu::egl::PixmapSurface surface (display, eglu::createPixmapSurface(m_eglTestCtx.getNativeDisplay(), *pixmap, display.getEGLDisplay(), config, DE_NULL));
logCommonSurfaceAttributes(log, surface);
checkCommonAttributes(surface, info);
checkNonPbufferAttributes(display.getEGLDisplay(), surface);
}
};
class QuerySurfaceSimplePbufferCase : public QuerySurfaceCase
{
public:
QuerySurfaceSimplePbufferCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: QuerySurfaceCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
int width = 64;
int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating pbuffer surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
// Clamp to maximums reported by implementation
width = deMin32(width, display.getConfigAttrib(config, EGL_MAX_PBUFFER_WIDTH));
height = deMin32(height, display.getConfigAttrib(config, EGL_MAX_PBUFFER_HEIGHT));
if (width == 0 || height == 0)
{
log << TestLog::Message << " Fail, maximum pbuffer size of " << width << "x" << height << " reported" << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid maximum pbuffer size");
return;
}
const EGLint attribs[] =
{
EGL_WIDTH, width,
EGL_HEIGHT, height,
EGL_TEXTURE_FORMAT, EGL_NO_TEXTURE,
EGL_NONE
};
{
tcu::egl::PbufferSurface surface(display, config, attribs);
logCommonSurfaceAttributes(log, surface);
logPbufferSurfaceAttributes(log, surface);
checkCommonAttributes(surface, info);
// Pbuffer-specific attributes
// Largest pbuffer
{
const EGLint largestPbuffer = surface.getAttribute(EGL_LARGEST_PBUFFER);
if (largestPbuffer != EGL_FALSE && largestPbuffer != EGL_TRUE)
{
log << TestLog::Message << " Fail, invalid largest pbuffer value " << largestPbuffer << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid largest pbuffer");
}
}
// Texture format
{
const EGLint textureFormat = surface.getAttribute(EGL_TEXTURE_FORMAT);
if (textureFormat != EGL_NO_TEXTURE && textureFormat != EGL_TEXTURE_RGB && textureFormat != EGL_TEXTURE_RGBA)
{
log << TestLog::Message << " Fail, invalid texture format value " << textureFormat << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid texture format");
}
}
// Texture target
{
const EGLint textureTarget = surface.getAttribute(EGL_TEXTURE_TARGET);
if (textureTarget != EGL_NO_TEXTURE && textureTarget != EGL_TEXTURE_2D)
{
log << TestLog::Message << " Fail, invalid texture target value " << textureTarget << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid texture target");
}
}
// Mipmap texture
{
const EGLint mipmapTexture = surface.getAttribute(EGL_MIPMAP_TEXTURE);
if (mipmapTexture != EGL_FALSE && mipmapTexture != EGL_TRUE)
{
log << TestLog::Message << " Fail, invalid mipmap texture value " << mipmapTexture << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid mipmap texture");
}
}
}
}
};
class SurfaceAttribCase : public SimpleConfigCase
{
public:
SurfaceAttribCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds);
virtual ~SurfaceAttribCase (void) {}
void testAttributes (tcu::egl::Surface& surface, const ConfigInfo& info);
};
SurfaceAttribCase::SurfaceAttribCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: SimpleConfigCase(eglTestCtx, name, description, configIds)
{
}
void SurfaceAttribCase::testAttributes (tcu::egl::Surface& surface, const ConfigInfo& info)
{
const tcu::egl::Display& display = surface.getDisplay();
tcu::TestLog& log = m_testCtx.getLog();
const int majorVersion = display.getEGLMajorVersion();
const int minorVersion = display.getEGLMinorVersion();
de::Random rnd (deStringHash(m_name.c_str()) ^ 0xf215918f);
if (majorVersion == 1 && minorVersion == 0)
{
log << TestLog::Message << "No attributes can be set in EGL 1.0" << TestLog::EndMessage;
return;
}
// Mipmap level
if (info.renderableType & EGL_OPENGL_ES_BIT || info.renderableType & EGL_OPENGL_ES2_BIT)
{
const EGLint initialValue = 0xDEADBAAD;
EGLint value = initialValue;
TCU_CHECK_EGL_CALL(eglQuerySurface(surface.getDisplay().getEGLDisplay(), surface.getEGLSurface(), EGL_MIPMAP_LEVEL, &value));
logSurfaceAttribute(log, EGL_MIPMAP_LEVEL, value);
if (dynamic_cast<tcu::egl::PbufferSurface*>(&surface))
{
if (value != 0)
{
log << TestLog::Message << " Fail, initial mipmap level value should be 0, is " << value << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid default mipmap level");
}
}
else if (value != initialValue)
{
log << TestLog::Message << " Fail, eglQuerySurface changed value when querying EGL_MIPMAP_LEVEL for non-pbuffer surface. Result: " << value << ". Expected: " << initialValue << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "EGL_MIPMAP_LEVEL query modified result for non-pbuffer surface.");
}
eglSurfaceAttrib(display.getEGLDisplay(), surface.getEGLSurface(), EGL_MIPMAP_LEVEL, 1);
{
const EGLint error = eglGetError();
if (error != EGL_SUCCESS)
{
log << TestLog::Message << " Fail, setting EGL_MIPMAP_LEVEL should not result in an error, received " << eglu::getErrorStr(error) << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Illegal error condition");
}
}
}
// Only mipmap level can be set in EGL 1.3 and lower
if (majorVersion == 1 && minorVersion <= 3) return;
// Multisample resolve
{
const EGLint value = surface.getAttribute(EGL_MULTISAMPLE_RESOLVE);
logSurfaceAttribute(log, EGL_MULTISAMPLE_RESOLVE, value);
if (value != EGL_MULTISAMPLE_RESOLVE_DEFAULT)
{
log << TestLog::Message << " Fail, initial multisample resolve value should be EGL_MULTISAMPLE_RESOLVE_DEFAULT, is "
<< eglu::getSurfaceAttribValueStr(EGL_MULTISAMPLE_RESOLVE, value) << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid default multisample resolve");
}
if (info.renderableType & EGL_MULTISAMPLE_RESOLVE_BOX_BIT)
{
log << TestLog::Message << " Box filter is supported by surface, trying to set." << TestLog::EndMessage;
surface.setAttribute(EGL_MULTISAMPLE_RESOLVE, EGL_MULTISAMPLE_RESOLVE_BOX);
if (surface.getAttribute(EGL_MULTISAMPLE_RESOLVE) != EGL_MULTISAMPLE_RESOLVE_BOX)
{
log << TestLog::Message << " Fail, tried to enable box filter but value did not change.";
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Failed to set multisample resolve");
}
}
}
// Swap behavior
{
const EGLint value = surface.getAttribute(EGL_SWAP_BEHAVIOR);
logSurfaceAttribute(log, EGL_SWAP_BEHAVIOR, value);
if (info.renderableType & EGL_SWAP_BEHAVIOR_PRESERVED_BIT)
{
const EGLint nextValue = (value == EGL_BUFFER_DESTROYED) ? EGL_BUFFER_PRESERVED : EGL_BUFFER_DESTROYED;
surface.setAttribute(EGL_SWAP_BEHAVIOR, nextValue);
if (surface.getAttribute(EGL_SWAP_BEHAVIOR) != nextValue)
{
log << TestLog::Message << " Fail, tried to set swap behavior to " << eglu::getSurfaceAttribStr(nextValue) << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Failed to set swap behavior");
}
}
}
}
class SurfaceAttribWindowCase : public SurfaceAttribCase
{
public:
SurfaceAttribWindowCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: SurfaceAttribCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
const int width = 64;
const int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating window surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
de::UniquePtr<eglu::NativeWindow> window(m_eglTestCtx.createNativeWindow(display.getEGLDisplay(), config, DE_NULL, width, height, eglu::parseWindowVisibility(m_testCtx.getCommandLine())));
tcu::egl::WindowSurface surface(display, eglu::createWindowSurface(m_eglTestCtx.getNativeDisplay(), *window, display.getEGLDisplay(), config, DE_NULL));
testAttributes(surface, info);
}
};
class SurfaceAttribPixmapCase : public SurfaceAttribCase
{
public:
SurfaceAttribPixmapCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: SurfaceAttribCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
const int width = 64;
const int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating pixmap surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
de::UniquePtr<eglu::NativePixmap> pixmap (m_eglTestCtx.createNativePixmap(display.getEGLDisplay(), config, DE_NULL, width, height));
tcu::egl::PixmapSurface surface (display, eglu::createPixmapSurface(m_eglTestCtx.getNativeDisplay(), *pixmap, display.getEGLDisplay(), config, DE_NULL));
testAttributes(surface, info);
}
};
class SurfaceAttribPbufferCase : public SurfaceAttribCase
{
public:
SurfaceAttribPbufferCase (EglTestContext& eglTestCtx, const char* name, const char* description, const std::vector<EGLint>& configIds)
: SurfaceAttribCase(eglTestCtx, name, description, configIds)
{
}
void executeForConfig (tcu::egl::Display& display, EGLConfig config)
{
tcu::TestLog& log = m_testCtx.getLog();
int width = 64;
int height = 64;
ConfigInfo info;
display.describeConfig(config, info);
log << TestLog::Message << "Creating pbuffer surface with config ID " << info.configId << TestLog::EndMessage;
TCU_CHECK_EGL();
// Clamp to maximums reported by implementation
width = deMin32(width, display.getConfigAttrib(config, EGL_MAX_PBUFFER_WIDTH));
height = deMin32(height, display.getConfigAttrib(config, EGL_MAX_PBUFFER_HEIGHT));
if (width == 0 || height == 0)
{
log << TestLog::Message << " Fail, maximum pbuffer size of " << width << "x" << height << " reported" << TestLog::EndMessage;
m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid maximum pbuffer size");
return;
}
const EGLint attribs[] =
{
EGL_WIDTH, width,
EGL_HEIGHT, height,
EGL_TEXTURE_FORMAT, EGL_NO_TEXTURE,
EGL_NONE
};
tcu::egl::PbufferSurface surface(display, config, attribs);
testAttributes(surface, info);
}
};
QuerySurfaceTests::QuerySurfaceTests (EglTestContext& eglTestCtx)
: TestCaseGroup(eglTestCtx, "query_surface", "Surface Query Tests")
{
}
QuerySurfaceTests::~QuerySurfaceTests (void)
{
}
std::vector<EGLint> getConfigs (const tcu::egl::Display& display, EGLint surfaceType)
{
std::vector<EGLint> out;
std::vector<EGLConfig> eglConfigs;
display.getConfigs(eglConfigs);
for (size_t ndx = 0; ndx < eglConfigs.size(); ndx++)
{
ConfigInfo info;
display.describeConfig(eglConfigs[ndx], info);
if (info.surfaceType & surfaceType)
out.push_back(info.configId);
}
return out;
}
void QuerySurfaceTests::init (void)
{
// Simple queries
{
tcu::TestCaseGroup* simpleGroup = new tcu::TestCaseGroup(m_testCtx, "simple", "Simple queries");
addChild(simpleGroup);
// Window
{
tcu::TestCaseGroup* windowGroup = new tcu::TestCaseGroup(m_testCtx, "window", "Window surfaces");
simpleGroup->addChild(windowGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_WINDOW_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
windowGroup->addChild(new QuerySurfaceSimpleWindowCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
// Pixmap
{
tcu::TestCaseGroup* pixmapGroup = new tcu::TestCaseGroup(m_testCtx, "pixmap", "Pixmap surfaces");
simpleGroup->addChild(pixmapGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_PIXMAP_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
pixmapGroup->addChild(new QuerySurfaceSimplePixmapCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
// Pbuffer
{
tcu::TestCaseGroup* pbufferGroup = new tcu::TestCaseGroup(m_testCtx, "pbuffer", "Pbuffer surfaces");
simpleGroup->addChild(pbufferGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_PBUFFER_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
pbufferGroup->addChild(new QuerySurfaceSimplePbufferCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
}
// Set surface attributes
{
tcu::TestCaseGroup* setAttributeGroup = new tcu::TestCaseGroup(m_testCtx, "set_attribute", "Setting attributes");
addChild(setAttributeGroup);
// Window
{
tcu::TestCaseGroup* windowGroup = new tcu::TestCaseGroup(m_testCtx, "window", "Window surfaces");
setAttributeGroup->addChild(windowGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_WINDOW_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
windowGroup->addChild(new SurfaceAttribWindowCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
// Pixmap
{
tcu::TestCaseGroup* pixmapGroup = new tcu::TestCaseGroup(m_testCtx, "pixmap", "Pixmap surfaces");
setAttributeGroup->addChild(pixmapGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_PIXMAP_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
pixmapGroup->addChild(new SurfaceAttribPixmapCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
// Pbuffer
{
tcu::TestCaseGroup* pbufferGroup = new tcu::TestCaseGroup(m_testCtx, "pbuffer", "Pbuffer surfaces");
setAttributeGroup->addChild(pbufferGroup);
eglu::FilterList filters;
filters << (eglu::ConfigSurfaceType() & EGL_PBUFFER_BIT);
std::vector<NamedConfigIdSet> configIdSets;
NamedConfigIdSet::getDefaultSets(configIdSets, m_eglTestCtx.getConfigs(), filters);
for (std::vector<NamedConfigIdSet>::iterator i = configIdSets.begin(); i != configIdSets.end(); i++)
pbufferGroup->addChild(new SurfaceAttribPbufferCase(m_eglTestCtx, i->getName(), i->getDescription(), i->getConfigIds()));
}
}
}
} // egl
} // deqp
|
MACRO printargs
REPT _NARG
PRINTLN \1
SHIFT
ENDR
ENDM
printargs mul(3.0, 4.0)
MACRO printlit
REPT _NARG
PRINTLN "\1"
SHIFT
ENDR
ENDM
printlit a(b,c\,d), ((e,f),g), ))h, i\,j,
printlit \(k, l), (m:\)n,o(p)q), (r,s)t
printlit "))u,v(", ("w,x","y,z"),
|
LavenderPokecenter_h:
db POKECENTER ; tileset
db LAVENDER_POKECENTER_HEIGHT, LAVENDER_POKECENTER_WIDTH ; dimensions (y, x)
dw LavenderPokecenterBlocks, LavenderPokecenterTextPointers, LavenderPokecenterScript ; blocks, texts, scripts
db $00 ; connections
dw LavenderPokecenterObject ; objects
|
; A014989: a(n) = (1 - (-7)^n)/8.
; 1,-6,43,-300,2101,-14706,102943,-720600,5044201,-35309406,247165843,-1730160900,12111126301,-84777884106,593445188743,-4154116321200,29078814248401,-203551699738806,1424861898171643,-9974033287201500,69818233010410501,-488727631072873506,3421093417510114543,-23947653922570801800,167633577457995612601,-1173435042205969288206,8214045295441785017443,-57498317068092495122100,402488219476647465854701,-2817417536336532260982906,19721922754355725826880343,-138053459280490080788162400,966374214963430565517136801,-6764619504744013958619957606,47352336533208097710339703243,-331466355732456683972377922700,2320264490127196787806645458901,-16241851430890377514646518212306,113692960016232642602525627486143,-795850720113628498217679392403000,5570955040795399487523755746821001,-38996685285567796412666290227747006,272976796998974574888664031594229043
mov $1,-7
pow $1,$0
sub $1,1
div $1,8
mul $1,7
add $1,1
mov $0,$1
|
; A146559: Expansion of (1-x)/(1 - 2*x + 2*x^2).
; 1,1,0,-2,-4,-4,0,8,16,16,0,-32,-64,-64,0,128,256,256,0,-512,-1024,-1024,0,2048,4096,4096,0,-8192,-16384,-16384,0,32768,65536,65536,0,-131072,-262144,-262144,0,524288,1048576,1048576,0,-2097152,-4194304,-4194304,0,8388608,16777216,16777216,0,-33554432,-67108864,-67108864,0,134217728,268435456,268435456,0,-536870912,-1073741824,-1073741824,0,2147483648,4294967296,4294967296,0,-8589934592,-17179869184,-17179869184,0,34359738368,68719476736,68719476736,0,-137438953472,-274877906944,-274877906944,0,549755813888,1099511627776,1099511627776,0,-2199023255552,-4398046511104,-4398046511104,0,8796093022208,17592186044416,17592186044416,0,-35184372088832,-70368744177664,-70368744177664,0,140737488355328,281474976710656,281474976710656,0,-562949953421312,-1125899906842624,-1125899906842624,0,2251799813685248,4503599627370496,4503599627370496,0,-9007199254740992
mov $1,1
mov $2,2
lpb $0
sub $0,1
add $2,$1
add $1,1
mul $1,2
sub $1,$2
lpe
|
; A313051: Coordination sequence Gal.3.33.1 where G.u.t.v denotes the coordination sequence for a vertex of type v in tiling number t in the Galebach list of u-uniform tilings.
; 1,4,9,14,18,23,28,32,36,41,46,50,55,60,64,68,73,78,82,87,92,96,100,105,110,114,119,124,128,132,137,142,146,151,156,160,164,169,174,178,183,188,192,196,201,206,210,215,220,224
mov $12,$0
mov $14,2
lpb $14
mov $0,$12
sub $14,1
add $0,$14
sub $0,1
mov $4,$0
cmp $4,$0
mov $8,2
sub $8,$4
add $8,$0
add $0,$8
pow $0,2
mul $0,4
add $7,1
lpb $7
lpb $5,3
add $0,3
div $7,$0
lpe
div $0,7
lpe
mov $1,$0
mov $15,$14
lpb $15
mov $13,$1
sub $15,1
lpe
lpe
lpb $12
mov $12,0
sub $13,$1
lpe
mov $0,$13
|
//
// Created by jieming on 18.12.20.
//
#include "CollisionCheck.h"
using std::cout;
using std::endl;
int main(int argc, char **argv){
ros::init(argc, argv, "collisionCheckNode");
CollisionCheck checker;
ros::Rate rate(50);
while(ros::ok()){
rate.sleep();
ros::spinOnce();
if(checker.isFirstReceiv()){
cout << "wait for initial" << endl;
continue;
}
auto data = checker.getEllipsoid();
checker.publishEllipData(data);
}
}
|
;
; SORD M5 Stdio
;
; getkey() Wait for keypress
;
; Stefano Bodrato - Apr. 2000
;
;
; $Id: fgetc_cons.asm,v 1.7+ (now on GIT) $
;
SECTION code_clib
PUBLIC fgetc_cons
PUBLIC _fgetc_cons
EXTERN msxbios
INCLUDE "m5bios.def"
.fgetc_cons
._fgetc_cons
ld ix,ACECHI
call msxbios
IF STANDARDESCAPECHARS
cp 13
jr nz,not_return
ld a,10
.not_return
ENDIF
ld h,0
ld l,a
ret
|
[section .text]
global _start
_start:
jmp ender
starter:
;fd = open('/etc/passwd',O_RDONLY /* = 0 */,0)
pop rdi ;rdi = '/etc/passwd'
mov rsi, 0x0
mov rdx, 0x0
mov rax, 2 ;sys_open = 2
syscall
; save fd to rbx
mov rbx, rax
;read(fd, memory_w, size)
mov rdi, rbx ;fd
mov rsi, 0x0000000000614480 ;writable memory
mov rdx, 0x100000
mov rax, 0 ;sys_read = 0
syscall
;write(stdout, memory_w, size)
mov rdi, 1 ;stdout = 1
mov rsi, 0x0000000000614480 ;writable memory
mov rdx, rax ;rax = bytes readed by read()
mov rax, 1 ;sys_write = 1
syscall
;exit(0)
mov rdi, 0
mov rax, 60 ;sys_exit = 60
syscall
ender:
call starter
db '/etc/passwd', 0
|
/// \file Owned.hpp
#pragma once
#include <llvm-c/Core.h>
#include <llvm-c/DebugInfo.h>
#include <llvm-c/ExecutionEngine.h>
#include <llvm-c/Transforms/PassManagerBuilder.h>
#include "chi/Fwd.hpp"
namespace chi {
namespace detail {
template <typename T>
using Deleter = void (*)(T);
}
/// An owned object. It takes in the type that thie object that it wraps, and the deleter function
/// This is a no-cost abstraction.
template <typename T, detail::Deleter<T> Deleter>
class Owned {
public:
Owned() : mObject(nullptr) {}
Owned(std::nullptr_t) : Owned() {}
explicit Owned(T object) : mObject(object) {}
// move only
Owned(Owned&& other) : mObject{other.mObject} { other.mObject = nullptr; }
Owned(const Owned&) = delete;
Owned& operator=(Owned&& other) {
this->mObject = other.mObject;
other.mObject = nullptr;
return *this;
}
Owned& operator=(const Owned&) = delete;
bool operator==(const Owned& other) { return this->mObject == other.mObject; }
operator bool() { return mObject != nullptr; }
~Owned() {
if (mObject) {
Deleter(mObject);
mObject = nullptr;
}
}
const T& operator*() const { return mObject; }
T& operator*() { return mObject; }
T take_ownership() {
auto ret = mObject;
mObject = nullptr;
return ret;
}
private:
T mObject = nullptr;
};
using OwnedLLVMModule = Owned<LLVMModuleRef, LLVMDisposeModule>;
using OwnedLLVMGenericValue = Owned<LLVMGenericValueRef, LLVMDisposeGenericValue>;
using OwnedLLVMContext = Owned<LLVMContextRef, LLVMContextDispose>;
using OwnedMessage = Owned<char*, LLVMDisposeMessage>;
using OwnedLLVMExecutionEngine = Owned<LLVMExecutionEngineRef, LLVMDisposeExecutionEngine>;
using OwnedLLVMBuilder = Owned<LLVMBuilderRef, LLVMDisposeBuilder>;
using OwnedLLVMDIBuilder = Owned<LLVMDIBuilderRef, LLVMDisposeDIBuilder>;
using OwnedLLVMMemoryBuffer = Owned<LLVMMemoryBufferRef, LLVMDisposeMemoryBuffer>;
using OwnedLLVMPassManager = Owned<LLVMPassManagerRef, LLVMDisposePassManager>;
using OwnedLLVMPassManagerBuilder = Owned<LLVMPassManagerBuilderRef, LLVMPassManagerBuilderDispose>;
} // namespace chi
|
// Copyright 2012 Cloudera Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "exec/sort-node.h"
#include "exec/sort-exec-exprs.h"
#include "runtime/row-batch.h"
#include "runtime/runtime-state.h"
#include "runtime/sorted-run-merger.h"
using namespace std;
namespace impala {
SortNode::SortNode(ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs)
: ExecNode(pool, tnode, descs),
offset_(tnode.sort_node.__isset.offset ? tnode.sort_node.offset : 0),
num_rows_skipped_(0) {
}
SortNode::~SortNode() {
}
Status SortNode::Init(const TPlanNode& tnode) {
RETURN_IF_ERROR(ExecNode::Init(tnode));
RETURN_IF_ERROR(sort_exec_exprs_.Init(tnode.sort_node.sort_info, pool_));
is_asc_order_ = tnode.sort_node.sort_info.is_asc_order;
nulls_first_ = tnode.sort_node.sort_info.nulls_first;
return Status::OK;
}
Status SortNode::Prepare(RuntimeState* state) {
SCOPED_TIMER(runtime_profile_->total_time_counter());
RETURN_IF_ERROR(ExecNode::Prepare(state));
RETURN_IF_ERROR(sort_exec_exprs_.Prepare(state, child(0)->row_desc(), row_descriptor_));
return Status::OK;
}
Status SortNode::Open(RuntimeState* state) {
SCOPED_TIMER(runtime_profile_->total_time_counter());
RETURN_IF_ERROR(ExecNode::Open(state));
RETURN_IF_ERROR(sort_exec_exprs_.Open(state));
RETURN_IF_CANCELLED(state);
RETURN_IF_ERROR(state->CheckQueryState());
RETURN_IF_ERROR(child(0)->Open(state));
TupleRowComparator less_than(
sort_exec_exprs_.lhs_ordering_expr_ctxs(), sort_exec_exprs_.rhs_ordering_expr_ctxs(),
is_asc_order_, nulls_first_);
sorter_.reset(new Sorter(
less_than, sort_exec_exprs_.sort_tuple_slot_expr_ctxs(),
&row_descriptor_, mem_tracker(), runtime_profile(), state));
// The child has been opened and the sorter created. Sort the input.
// The final merge is done on-demand as rows are requested in GetNext().
RETURN_IF_ERROR(SortInput(state));
// The child can be closed at this point.
child(0)->Close(state);
return Status::OK;
}
Status SortNode::GetNext(RuntimeState* state, RowBatch* row_batch, bool* eos) {
SCOPED_TIMER(runtime_profile_->total_time_counter());
RETURN_IF_ERROR(ExecDebugAction(TExecNodePhase::GETNEXT, state));
RETURN_IF_CANCELLED(state);
RETURN_IF_ERROR(state->CheckQueryState());
if (ReachedLimit()) {
*eos = true;
return Status::OK;
} else {
*eos = false;
}
DCHECK_EQ(row_batch->num_rows(), 0);
RETURN_IF_ERROR(sorter_->GetNext(row_batch, eos));
while ((num_rows_skipped_ < offset_)) {
num_rows_skipped_ += row_batch->num_rows();
// Throw away rows in the output batch until the offset is skipped.
int rows_to_keep = num_rows_skipped_ - offset_;
if (rows_to_keep > 0) {
row_batch->CopyRows(0, row_batch->num_rows() - rows_to_keep, rows_to_keep);
row_batch->set_num_rows(rows_to_keep);
} else {
row_batch->set_num_rows(0);
}
if (rows_to_keep > 0 || *eos) break;
RETURN_IF_ERROR(sorter_->GetNext(row_batch, eos));
}
num_rows_returned_ += row_batch->num_rows();
if (ReachedLimit()) {
row_batch->set_num_rows(row_batch->num_rows() - (num_rows_returned_ - limit_));
*eos = true;
}
COUNTER_SET(rows_returned_counter_, num_rows_returned_);
return Status::OK;
}
void SortNode::Close(RuntimeState* state) {
if (is_closed()) return;
sort_exec_exprs_.Close(state);
sorter_.reset();
ExecNode::Close(state);
}
void SortNode::DebugString(int indentation_level, stringstream* out) const {
*out << string(indentation_level * 2, ' ');
*out << "SortNode("
<< Expr::DebugString(sort_exec_exprs_.lhs_ordering_expr_ctxs());
for (int i = 0; i < is_asc_order_.size(); ++i) {
*out << (i > 0 ? " " : "")
<< (is_asc_order_[i] ? "asc" : "desc")
<< " nulls " << (nulls_first_[i] ? "first" : "last");
}
ExecNode::DebugString(indentation_level, out);
*out << ")";
}
Status SortNode::SortInput(RuntimeState* state) {
RowBatch batch(child(0)->row_desc(), state->batch_size(), mem_tracker());
bool eos;
do {
batch.Reset();
RETURN_IF_ERROR(child(0)->GetNext(state, &batch, &eos));
RETURN_IF_ERROR(sorter_->AddBatch(&batch));
RETURN_IF_ERROR(state->CheckQueryState());
} while(!eos);
RETURN_IF_ERROR(sorter_->InputDone());
return Status::OK;
}
}
|
; L1002.asm
; Generated 01.02.1998 by mlevel
; Modified 01.02.1998 by Abe Pralle
INCLUDE "Source/Defs.inc"
INCLUDE "Source/Levels.inc"
INCLUDE "Source/Items.inc"
;---------------------------------------------------------------------
SECTION "Level1002Section",ROMX
;---------------------------------------------------------------------
L1002_Contents::
DW L1002_Load
DW L1002_Init
DW L1002_Check
DW L1002_Map
;---------------------------------------------------------------------
; Load
;---------------------------------------------------------------------
L1002_Load:
DW ((L1002_LoadFinished - L1002_Load2)) ;size
L1002_Load2:
call ParseMap
ret
L1002_LoadFinished:
;---------------------------------------------------------------------
; Map
;---------------------------------------------------------------------
L1002_Map:
INCBIN "Data/Levels/L1002_guardpost.lvl"
;---------------------------------------------------------------------
; Init
;---------------------------------------------------------------------
L1002_Init:
DW ((L1002_InitFinished - L1002_Init2)) ;size
L1002_Init2:
call UseAlternatePalette
ld a,ENV_SNOW
call SetEnvEffect
ld a,BANK(main_in_game_gbm)
ld hl,main_in_game_gbm
call InitMusic
ld bc,ITEM_CODE1002
call RemoveClearanceIfTaken
ret
L1002_InitFinished:
;---------------------------------------------------------------------
; Check
;---------------------------------------------------------------------
L1002_Check:
DW ((L1002_CheckFinished - L1002_Check2)) ;size
L1002_Check2:
ret
L1002_CheckFinished:
PRINT "1002 Script Sizes (Load/Init/Check) (of $500): "
PRINT (L1002_LoadFinished - L1002_Load2)
PRINT " / "
PRINT (L1002_InitFinished - L1002_Init2)
PRINT " / "
PRINT (L1002_CheckFinished - L1002_Check2)
PRINT "\n"
|
db 0 ; species ID placeholder
db 250, 05, 05, 50, 35, 105
; hp atk def spd sat sdf
db FAIRY, FAIRY ; type
db 30 ; catch rate
db 255 ; base exp
db NO_ITEM, LUCKY_EGG ; items
db GENDER_F100 ; gender ratio
db 100 ; unknown 1
db 40 ; step cycles to hatch
db 5 ; unknown 2
INCBIN "gfx/pokemon/chansey/front.dimensions"
db 0, 0, 0, 0 ; padding
db GROWTH_FAST ; growth rate
dn EGG_FAIRY, EGG_FAIRY ; egg groups
; tm/hm learnset
tmhm DYNAMICPUNCH, HEADBUTT, CURSE, ROLLOUT, TOXIC, ZAP_CANNON, ROCK_SMASH, PSYCH_UP, HIDDEN_POWER, SUNNY_DAY, SNORE, BLIZZARD, HYPER_BEAM, ICY_WIND, PROTECT, RAIN_DANCE, ENDURE, FRUSTRATION, SOLARBEAM, IRON_TAIL, THUNDER, RETURN, PSYCHIC_M, SHADOW_BALL, MUD_SLAP, DOUBLE_TEAM, SWAGGER, SLEEP_TALK, SANDSTORM, FIRE_BLAST, DEFENSE_CURL, DREAM_EATER, REST, ATTRACT, STRENGTH, FLASH, FLAMETHROWER, THUNDERBOLT, ICE_BEAM
; end
|
[org 0x0100]
mov ax,5
mov bx,10
add ax,bx
mov ax,0x4c00
int 0x21
|
/*
---------------------------------------------------------------------------
Open Asset Import Library (assimp)
---------------------------------------------------------------------------
Copyright (c) 2006-2012, assimp team
All rights reserved.
Redistribution and use of this software in source and binary forms,
with or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
* Neither the name of the assimp team, nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior
written permission of the assimp team.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
*/
/** @file Implementation of the STL importer class */
#include "AssimpPCH.h"
#ifndef ASSIMP_BUILD_NO_STL_IMPORTER
// internal headers
#include "STLLoader.h"
#include "ParsingUtils.h"
#include "fast_atof.h"
using namespace Assimp;
namespace {
static const aiImporterDesc desc = {
"Stereolithography (STL) Importer",
"",
"",
"",
aiImporterFlags_SupportTextFlavour | aiImporterFlags_SupportBinaryFlavour,
0,
0,
0,
0,
"stl"
};
// A valid binary STL buffer should consist of the following elements, in order:
// 1) 80 byte header
// 2) 4 byte face count
// 3) 50 bytes per face
bool IsBinarySTL(const char* buffer, unsigned int fileSize) {
if (fileSize < 84)
return false;
const uint32_t faceCount = *reinterpret_cast<const uint32_t*>(buffer + 80);
const uint32_t expectedBinaryFileSize = faceCount * 50 + 84;
return expectedBinaryFileSize == fileSize;
}
// An ascii STL buffer will begin with "solid NAME", where NAME is optional.
// Note: The "solid NAME" check is necessary, but not sufficient, to determine
// if the buffer is ASCII; a binary header could also begin with "solid NAME".
bool IsAsciiSTL(const char* buffer, unsigned int fileSize) {
if (IsBinarySTL(buffer, fileSize))
return false;
if (fileSize < 5)
return false;
return strncmp(buffer, "solid", 5) == 0;
}
} // namespace
// ------------------------------------------------------------------------------------------------
// Constructor to be privately used by Importer
STLImporter::STLImporter()
{}
// ------------------------------------------------------------------------------------------------
// Destructor, private as well
STLImporter::~STLImporter()
{}
// ------------------------------------------------------------------------------------------------
// Returns whether the class can handle the format of the given file.
bool STLImporter::CanRead( const std::string& pFile, IOSystem* pIOHandler, bool checkSig) const
{
const std::string extension = GetExtension(pFile);
if (extension == "stl")
return true;
else if (!extension.length() || checkSig) {
if (!pIOHandler)
return true;
const char* tokens[] = {"STL","solid"};
return SearchFileHeaderForToken(pIOHandler,pFile,tokens,2);
}
return false;
}
// ------------------------------------------------------------------------------------------------
const aiImporterDesc* STLImporter::GetInfo () const
{
return &desc;
}
// ------------------------------------------------------------------------------------------------
// Imports the given file into the given scene structure.
void STLImporter::InternReadFile( const std::string& pFile,
aiScene* pScene, IOSystem* pIOHandler)
{
boost::scoped_ptr<IOStream> file( pIOHandler->Open( pFile, "rb"));
// Check whether we can read from the file
if( file.get() == NULL) {
throw DeadlyImportError( "Failed to open STL file " + pFile + ".");
}
fileSize = (unsigned int)file->FileSize();
// allocate storage and copy the contents of the file to a memory buffer
// (terminate it with zero)
std::vector<char> mBuffer2;
TextFileToBuffer(file.get(),mBuffer2);
this->pScene = pScene;
this->mBuffer = &mBuffer2[0];
// the default vertex color is white
clrColorDefault.r = clrColorDefault.g = clrColorDefault.b = clrColorDefault.a = 1.0f;
// allocate one mesh
pScene->mNumMeshes = 1;
pScene->mMeshes = new aiMesh*[1];
aiMesh* pMesh = pScene->mMeshes[0] = new aiMesh();
pMesh->mMaterialIndex = 0;
// allocate a single node
pScene->mRootNode = new aiNode();
pScene->mRootNode->mNumMeshes = 1;
pScene->mRootNode->mMeshes = new unsigned int[1];
pScene->mRootNode->mMeshes[0] = 0;
bool bMatClr = false;
if (IsBinarySTL(mBuffer, fileSize)) {
bMatClr = LoadBinaryFile();
} else if (IsAsciiSTL(mBuffer, fileSize)) {
LoadASCIIFile();
} else {
throw DeadlyImportError( "Failed to determine STL storage representation for " + pFile + ".");
}
// now copy faces
pMesh->mFaces = new aiFace[pMesh->mNumFaces];
for (unsigned int i = 0, p = 0; i < pMesh->mNumFaces;++i) {
aiFace& face = pMesh->mFaces[i];
face.mIndices = new unsigned int[face.mNumIndices = 3];
for (unsigned int o = 0; o < 3;++o,++p) {
face.mIndices[o] = p;
}
}
// create a single default material - everything white, as we have vertex colors
aiMaterial* pcMat = new aiMaterial();
aiString s;
s.Set(AI_DEFAULT_MATERIAL_NAME);
pcMat->AddProperty(&s, AI_MATKEY_NAME);
aiColor4D clrDiffuse(1.0f,1.0f,1.0f,1.0f);
if (bMatClr) {
clrDiffuse = clrColorDefault;
}
pcMat->AddProperty(&clrDiffuse,1,AI_MATKEY_COLOR_DIFFUSE);
pcMat->AddProperty(&clrDiffuse,1,AI_MATKEY_COLOR_SPECULAR);
clrDiffuse = aiColor4D(0.05f,0.05f,0.05f,1.0f);
pcMat->AddProperty(&clrDiffuse,1,AI_MATKEY_COLOR_AMBIENT);
pScene->mNumMaterials = 1;
pScene->mMaterials = new aiMaterial*[1];
pScene->mMaterials[0] = pcMat;
}
// ------------------------------------------------------------------------------------------------
// Read an ASCII STL file
void STLImporter::LoadASCIIFile()
{
aiMesh* pMesh = pScene->mMeshes[0];
const char* sz = mBuffer + 5; // skip the "solid"
SkipSpaces(&sz);
const char* szMe = sz;
while (!::IsSpaceOrNewLine(*sz)) {
sz++;
}
size_t temp;
// setup the name of the node
if ((temp = (size_t)(sz-szMe))) {
pScene->mRootNode->mName.length = temp;
memcpy(pScene->mRootNode->mName.data,szMe,temp);
pScene->mRootNode->mName.data[temp] = '\0';
}
else pScene->mRootNode->mName.Set("<STL_ASCII>");
// try to guess how many vertices we could have
// assume we'll need 160 bytes for each face
pMesh->mNumVertices = ( pMesh->mNumFaces = std::max(1u,fileSize / 160u )) * 3;
pMesh->mVertices = new aiVector3D[pMesh->mNumVertices];
pMesh->mNormals = new aiVector3D[pMesh->mNumVertices];
unsigned int curFace = 0, curVertex = 3;
for ( ;; )
{
// go to the next token
if(!SkipSpacesAndLineEnd(&sz))
{
// seems we're finished although there was no end marker
DefaultLogger::get()->warn("STL: unexpected EOF. \'endsolid\' keyword was expected");
break;
}
// facet normal -0.13 -0.13 -0.98
if (!strncmp(sz,"facet",5) && IsSpaceOrNewLine(*(sz+5))) {
if (3 != curVertex) {
DefaultLogger::get()->warn("STL: A new facet begins but the old is not yet complete");
}
if (pMesh->mNumFaces == curFace) {
ai_assert(pMesh->mNumFaces != 0);
// need to resize the arrays, our size estimate was wrong
unsigned int iNeededSize = (unsigned int)(sz-mBuffer) / pMesh->mNumFaces;
if (iNeededSize <= 160)iNeededSize >>= 1; // prevent endless looping
unsigned int add = (unsigned int)((mBuffer+fileSize)-sz) / iNeededSize;
add += add >> 3; // add 12.5% as buffer
iNeededSize = (pMesh->mNumFaces + add)*3;
aiVector3D* pv = new aiVector3D[iNeededSize];
memcpy(pv,pMesh->mVertices,pMesh->mNumVertices*sizeof(aiVector3D));
delete[] pMesh->mVertices;
pMesh->mVertices = pv;
pv = new aiVector3D[iNeededSize];
memcpy(pv,pMesh->mNormals,pMesh->mNumVertices*sizeof(aiVector3D));
delete[] pMesh->mNormals;
pMesh->mNormals = pv;
pMesh->mNumVertices = iNeededSize;
pMesh->mNumFaces += add;
}
aiVector3D* vn = &pMesh->mNormals[curFace++*3];
sz += 6;
curVertex = 0;
SkipSpaces(&sz);
if (strncmp(sz,"normal",6)) {
DefaultLogger::get()->warn("STL: a facet normal vector was expected but not found");
}
else
{
sz += 7;
SkipSpaces(&sz);
sz = fast_atoreal_move<float>(sz, (float&)vn->x );
SkipSpaces(&sz);
sz = fast_atoreal_move<float>(sz, (float&)vn->y );
SkipSpaces(&sz);
sz = fast_atoreal_move<float>(sz, (float&)vn->z );
*(vn+1) = *vn;
*(vn+2) = *vn;
}
}
// vertex 1.50000 1.50000 0.00000
else if (!strncmp(sz,"vertex",6) && ::IsSpaceOrNewLine(*(sz+6)))
{
if (3 == curVertex) {
DefaultLogger::get()->error("STL: a facet with more than 3 vertices has been found");
}
else
{
sz += 7;
SkipSpaces(&sz);
aiVector3D* vn = &pMesh->mVertices[(curFace-1)*3 + curVertex++];
sz = fast_atoreal_move<float>(sz, (float&)vn->x );
SkipSpaces(&sz);
sz = fast_atoreal_move<float>(sz, (float&)vn->y );
SkipSpaces(&sz);
sz = fast_atoreal_move<float>(sz, (float&)vn->z );
}
}
else if (!::strncmp(sz,"endsolid",8)) {
// finished!
break;
}
// else skip the whole identifier
else while (!::IsSpaceOrNewLine(*sz)) {
++sz;
}
}
if (!curFace) {
pMesh->mNumFaces = 0;
throw DeadlyImportError("STL: ASCII file is empty or invalid; no data loaded");
}
pMesh->mNumFaces = curFace;
pMesh->mNumVertices = curFace*3;
// we are finished!
}
// ------------------------------------------------------------------------------------------------
// Read a binary STL file
bool STLImporter::LoadBinaryFile()
{
// skip the first 80 bytes
if (fileSize < 84) {
throw DeadlyImportError("STL: file is too small for the header");
}
bool bIsMaterialise = false;
// search for an occurence of "COLOR=" in the header
const char* sz2 = (const char*)mBuffer;
const char* const szEnd = sz2+80;
while (sz2 < szEnd) {
if ('C' == *sz2++ && 'O' == *sz2++ && 'L' == *sz2++ &&
'O' == *sz2++ && 'R' == *sz2++ && '=' == *sz2++) {
// read the default vertex color for facets
bIsMaterialise = true;
DefaultLogger::get()->info("STL: Taking code path for Materialise files");
clrColorDefault.r = (*sz2++) / 255.0f;
clrColorDefault.g = (*sz2++) / 255.0f;
clrColorDefault.b = (*sz2++) / 255.0f;
clrColorDefault.a = (*sz2++) / 255.0f;
break;
}
}
const unsigned char* sz = (const unsigned char*)mBuffer + 80;
// now read the number of facets
aiMesh* pMesh = pScene->mMeshes[0];
pScene->mRootNode->mName.Set("<STL_BINARY>");
pMesh->mNumFaces = *((uint32_t*)sz);
sz += 4;
if (fileSize < 84 + pMesh->mNumFaces*50) {
throw DeadlyImportError("STL: file is too small to hold all facets");
}
if (!pMesh->mNumFaces) {
throw DeadlyImportError("STL: file is empty. There are no facets defined");
}
pMesh->mNumVertices = pMesh->mNumFaces*3;
aiVector3D* vp,*vn;
vp = pMesh->mVertices = new aiVector3D[pMesh->mNumVertices];
vn = pMesh->mNormals = new aiVector3D[pMesh->mNumVertices];
for (unsigned int i = 0; i < pMesh->mNumFaces;++i) {
// NOTE: Blender sometimes writes empty normals ... this is not
// our fault ... the RemoveInvalidData helper step should fix that
*vn = *((aiVector3D*)sz);
sz += sizeof(aiVector3D);
*(vn+1) = *vn;
*(vn+2) = *vn;
vn += 3;
*vp++ = *((aiVector3D*)sz);
sz += sizeof(aiVector3D);
*vp++ = *((aiVector3D*)sz);
sz += sizeof(aiVector3D);
*vp++ = *((aiVector3D*)sz);
sz += sizeof(aiVector3D);
uint16_t color = *((uint16_t*)sz);
sz += 2;
if (color & (1 << 15))
{
// seems we need to take the color
if (!pMesh->mColors[0])
{
pMesh->mColors[0] = new aiColor4D[pMesh->mNumVertices];
for (unsigned int i = 0; i <pMesh->mNumVertices;++i)
*pMesh->mColors[0]++ = this->clrColorDefault;
pMesh->mColors[0] -= pMesh->mNumVertices;
DefaultLogger::get()->info("STL: Mesh has vertex colors");
}
aiColor4D* clr = &pMesh->mColors[0][i*3];
clr->a = 1.0f;
if (bIsMaterialise) // this is reversed
{
clr->r = (color & 0x31u) / 31.0f;
clr->g = ((color & (0x31u<<5))>>5u) / 31.0f;
clr->b = ((color & (0x31u<<10))>>10u) / 31.0f;
}
else
{
clr->b = (color & 0x31u) / 31.0f;
clr->g = ((color & (0x31u<<5))>>5u) / 31.0f;
clr->r = ((color & (0x31u<<10))>>10u) / 31.0f;
}
// assign the color to all vertices of the face
*(clr+1) = *clr;
*(clr+2) = *clr;
}
}
if (bIsMaterialise && !pMesh->mColors[0])
{
// use the color as diffuse material color
return true;
}
return false;
}
#endif // !! ASSIMP_BUILD_NO_STL_IMPORTER
|
; A125811: Number of coefficients in the n-th q-Bell number as a polynomial in q.
; 1,1,1,2,3,5,8,11,15,20,26,32,39,47,56,66,76,87,99,112,126,141,156,172,189,207,226,246,267,288,310,333,357,382,408,435,463,491,520,550,581,613,646,680,715,751,787,824,862,901,941,982,1024,1067,1111,1156,1201
mov $2,$0
lpb $0
add $1,$0
sub $0,1
lpe
add $1,1
lpb $1
sub $2,$0
add $0,1
sub $1,$2
lpe
|
[bits 16]
; uint64_t adjust_page_table(uint32_t initReserveLower, uint32_t initReserveHigher);
; Identity maps the first megabyte and the given initial reserve with kernel-exclusive read-write access.
; Note that initReserveLower/Higher are physical addresses which contain 2mb of usable memory.
; Sets eax to the lower half of the linear address of the 2mb-long initial reserve
; Sets ebx to the higher half
adjust_page_table:
push bp
mov bp, sp
; Initialize the first entry of PML4
mov ebx, paging64.pml4
mov dword [ebx], paging64.pdpt
or dword [ebx], 3
mov dword [ebx+4096], paging64.pdpt
; Initialize the first entry of PDPT
mov ebx, paging64.pdpt
mov dword [ebx], paging64.pd
or dword [ebx], 3
mov dword [ebx+4096], paging64.pd
; Initialize the first entry of PD
mov ebx, paging64.pd
mov dword [ebx], paging64.pt
or dword [ebx], 3
mov dword [ebx+4096], paging64.pt
; Initialize the relevant part of PT
; eax : current physical address to map, ebx: current physical address of current PT entry, ecx: number of PT entries initialized so far
mov eax, 0
mov ecx, 0
mov ebx, paging64.pt
.pt_loop_begin:
cmp ecx, 256 ; 1 mb = 256 * 4kb
jae .pt_loop_end
mov edx, eax
or edx, 11b
mov [ebx], edx
;mov [ebx+4096], edx ; We don't need to set the linear fields for PT entries, because they don't have any child.
add eax, 4096 ; 4kb
add ebx, 8
inc ecx
jmp .pt_loop_begin
.pt_loop_end:
mov eax, [bp+4] ; Page in the initial reserve
mov ebx, [bp+8]
or eax, 10000011b ; Present, writable, pagesize
mov [paging64.pd+8], eax
mov [paging64.pd+12], ebx ; No need to initialize the lin field of the entry because it maps a page, because it has pagesize bit set.
mov eax, 2*1024*1024 ; TODO: Randomize the linear address of the initial reserve.
xor ebx, ebx
leave
ret 8
paging64:
times 4096-(($-$$+0x7E00) % 4096) db 0 ; 4kb alignment
.pml4:
times 512 dq 0 ; This is the format expected by 64bit Pager
times 512 dq 0
.pdpt:
times 512 dq 0
times 512 dq 0
.pd:
times 512 dq 0
times 512 dq 0
.pt:
times 512 dq 0
times 512 dq 0 |
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r11
push %r13
push %r8
push %rbp
push %rcx
push %rdi
push %rsi
lea addresses_WT_ht+0x198d0, %r13
nop
nop
inc %rsi
movb (%r13), %cl
nop
nop
nop
add $61572, %r10
lea addresses_A_ht+0x18b22, %rsi
lea addresses_WT_ht+0x11be2, %rdi
nop
nop
nop
nop
nop
and $63037, %r11
mov $51, %rcx
rep movsw
nop
dec %rcx
lea addresses_WC_ht+0xb4d2, %rsi
lea addresses_WT_ht+0x16722, %rdi
nop
nop
nop
nop
add %rbp, %rbp
mov $62, %rcx
rep movsq
nop
cmp $53205, %rdi
lea addresses_D_ht+0x1cd06, %rsi
lea addresses_UC_ht+0x5722, %rdi
nop
nop
nop
nop
dec %r8
mov $105, %rcx
rep movsb
and $52537, %r10
lea addresses_D_ht+0xc322, %r10
nop
nop
xor %rdi, %rdi
mov $0x6162636465666768, %r8
movq %r8, %xmm6
vmovups %ymm6, (%r10)
nop
nop
nop
nop
nop
and %rsi, %rsi
lea addresses_A_ht+0x17d22, %r11
nop
nop
nop
sub %r13, %r13
mov $0x6162636465666768, %rsi
movq %rsi, %xmm4
movups %xmm4, (%r11)
nop
nop
nop
nop
inc %r11
lea addresses_WT_ht+0x7f22, %r11
nop
sub $31744, %r8
mov (%r11), %edi
nop
nop
nop
nop
xor $54660, %rsi
lea addresses_WC_ht+0x12b22, %r8
nop
nop
nop
inc %rbp
mov $0x6162636465666768, %r11
movq %r11, %xmm7
movups %xmm7, (%r8)
nop
nop
nop
xor %r11, %r11
lea addresses_normal_ht+0x180fa, %rcx
nop
nop
nop
cmp %rdi, %rdi
movb $0x61, (%rcx)
and $37364, %rcx
pop %rsi
pop %rdi
pop %rcx
pop %rbp
pop %r8
pop %r13
pop %r11
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r13
push %r14
push %r8
push %rbp
push %rbx
push %rcx
push %rdi
// Store
lea addresses_UC+0x1f322, %rbp
nop
nop
nop
sub %r14, %r14
mov $0x5152535455565758, %r8
movq %r8, %xmm7
vmovups %ymm7, (%rbp)
nop
nop
inc %r13
// Store
lea addresses_D+0x157ee, %rbp
nop
nop
nop
nop
nop
cmp %r8, %r8
mov $0x5152535455565758, %rbx
movq %rbx, %xmm2
vmovups %ymm2, (%rbp)
nop
nop
nop
nop
nop
inc %r13
// Faulty Load
lea addresses_UC+0x1f322, %rbx
nop
nop
nop
nop
and $13793, %rdi
movups (%rbx), %xmm7
vpextrq $1, %xmm7, %r14
lea oracles, %rbx
and $0xff, %r14
shlq $12, %r14
mov (%rbx,%r14,1), %r14
pop %rdi
pop %rcx
pop %rbx
pop %rbp
pop %r8
pop %r14
pop %r13
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'type': 'addresses_UC', 'AVXalign': False, 'congruent': 0, 'size': 2, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_UC', 'AVXalign': False, 'congruent': 0, 'size': 32, 'same': True, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_D', 'AVXalign': False, 'congruent': 0, 'size': 32, 'same': False, 'NT': False}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'type': 'addresses_UC', 'AVXalign': False, 'congruent': 0, 'size': 16, 'same': True, 'NT': False}}
<gen_prepare_buffer>
{'OP': 'LOAD', 'src': {'type': 'addresses_WT_ht', 'AVXalign': False, 'congruent': 1, 'size': 1, 'same': False, 'NT': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_A_ht', 'congruent': 11, 'same': True}, 'dst': {'type': 'addresses_WT_ht', 'congruent': 6, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_WC_ht', 'congruent': 4, 'same': False}, 'dst': {'type': 'addresses_WT_ht', 'congruent': 9, 'same': False}}
{'OP': 'REPM', 'src': {'type': 'addresses_D_ht', 'congruent': 2, 'same': False}, 'dst': {'type': 'addresses_UC_ht', 'congruent': 9, 'same': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_D_ht', 'AVXalign': False, 'congruent': 11, 'size': 32, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_A_ht', 'AVXalign': False, 'congruent': 9, 'size': 16, 'same': False, 'NT': False}}
{'OP': 'LOAD', 'src': {'type': 'addresses_WT_ht', 'AVXalign': False, 'congruent': 10, 'size': 4, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_WC_ht', 'AVXalign': False, 'congruent': 10, 'size': 16, 'same': False, 'NT': False}}
{'OP': 'STOR', 'dst': {'type': 'addresses_normal_ht', 'AVXalign': False, 'congruent': 2, 'size': 1, 'same': False, 'NT': False}}
{'00': 21829}
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
*/
|
SECTION rodata_clib
PUBLIC CRT_FONT
EXTERN _font_8x8_pmd85_system
defc CRT_FONT = _font_8x8_pmd85_system
|
;=====================================================================================================================================
;Program Name: manager.asm *
;Programming Language: x86 Assembly *
;Program Description: This program asks the user to enter numbers into an array (followed by the enter key). *
;The user is to put as many numbers as they can inside the array until Ctrl+D is pressed, so the program can *
;stop the user from doing so. When the user enters in numbers, anything entred that is NOT an integer will *
;not be put inside of the array. If the array is full, the program automatically stops the user from entering input. *
;After the numbers are entered, the program returns the sum of all of the integers and how many items are put *
;in the array. *
;Author: Shaochen Ren *
;Email: renleo@csu.fullerton.edu *
;Institution: California State University, Fullerton *
;Course: CPSC 240-05 *
;Start Date: 20 September, 2020 *
;Copyright (C) 2020 Shaochen Ren *
;This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License *
;version 3 as published by the Free Software Foundation. *
;This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied *
;Warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. *
;A copy of the GNU General Public License v3 is available here: <https://www.gnu.org/licenses/>. ;======================================================================================================================================
;//
;//
;//
;//
;//===== Begin code area ===========================================================================================================
extern printf
extern scanf
extern input_array
extern display_array
extern array_sum
global manager
section .data
welcomeMessage db "Welcome to Arrays of Integers",10,0
authorIntro db "Brought to you by shaochenren",10,10,0
progExplanation db "This program will sum your array of integers",10,0
longIntFormat db "%ld",0
backIn db "Back in manager.", 10, 0
arrayDisplay db "These numbers were received and placed into the array:",10,0
resultSum db 10,"The sum of the %ld numbers in this array is %ld.",10,0
returnToMain db "The sum will now be returned to the main function",10,0
stringFormat db "%s", 0
section .bss
array: resq 100
section .text
manager:
;All the pushes and pops
push rbp
mov rbp, rsp ;now the base pointer(rbp) points to the top of the stack
push rdi
push rsi
push rdx
push rcx
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
push rbx
pushf ;backup rflags
;registers rax, rip, and rsp are usually not backed up
;push qword -1
;push rbp
;push rdi
;push r13
;==================================
;display array function
mov rbp, rsp
;Print the welcome message
mov rdi, stringFormat
mov rsi, welcomeMessage
mov rax, 0
call printf
;===============================;
;Author introduction
mov rdi, stringFormat
mov rsi, authorIntro
mov rax, 0
call printf
;===============================;
;Program explanation
mov rdi, stringFormat
mov rsi, progExplanation
mov rax, 0
call printf
;================================;
;MOVE INTO INPUT_ARRAY.ASM FILE
mov rdi, array ;puts array into rdi to get access in other files
call input_array
mov r13, rax ;holds the size of the array in r13
;=================================;
;PRINT ARRAY FUNCTION FROM DISPLAY_ARRAY.CPP
mov rdi, array
mov qword rsi, r13
mov rax, 0
call display_array
;================================;
;summing up the array in SUM.ASM FILE
mov rdi, array
mov rsi, r13 ;holds the size of the array in rsi as well
call array_sum
;==================================
;display the sum
mov rdi, resultSum
mov rsi, r13
mov rdx, rax
mov rax, 0
call printf
;return sum to main output
mov rdi, stringFormat
mov rsi, returnToMain
mov rax, 0
call printf
mov rax, r12
;Restore the original values to the general registers before returning to the caller
;pop rax
popf
pop rbx
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop rcx
pop rdx
pop rsi
pop rdi
pop rbp
ret
|
;********************************************************************************************************
; uC/OS-II
; The Real-Time Kernel
;
; Copyright 1992-2020 Silicon Laboratories Inc. www.silabs.com
;
; SPDX-License-Identifier: APACHE-2.0
;
; This software is subject to an open source license and is distributed by
; Silicon Laboratories Inc. pursuant to the terms of the Apache License,
; Version 2.0 available at www.apache.org/licenses/LICENSE-2.0.
;
;********************************************************************************************************
;********************************************************************************************************
;
; Renesas M32C Port
;
; Filename : os_cpu_a.asm
; Version : V2.93.00
;********************************************************************************************************
; For : Renesas M32C
; Toolchain : IAR's EW for M32C
;********************************************************************************************************
MODULE OS_CPU_A
;**********************************************************************************************
; Section Control
;**********************************************************************************************
RSEG CSTACK
RSEG ISTACK
RSEG UDATA0
EXTERN OSTCBCur ; Declared as OS_TCB *, 24-bit long
EXTERN OSTCBHighRdy ; Declared as OS_TCB *, 24-bit long
EXTERN OSPrioCur ; Declared as INT8U, 8-bit long
EXTERN OSPrioHighRdy ; Declared as INT8U, 8-bit long
EXTERN OSIntNesting ; Declared as INT8U, 8-bit long
EXTERN OSRunning ; Declared as Boolean (unsigned char), 8-bit long
RSEG CODE(1)
EXTERN OSIntExit ; External functions written in C
EXTERN OSTimeTick
EXTERN OSTaskSwHook
PUBLIC OSStartHighRdy
PUBLIC OSCtxSw
PUBLIC OSIntCtxSw
PUBLIC OSTickISR
;********************************************************************************************************
; START MULTITASKING
; void OSStartHighRdy(void)
;
; Note(s) : 1) OSStartHighRdy() MUST:
; a) Call OSTaskSwHook() then,
; b) Set OSRunning to TRUE,
; c) Switch to the highest priority task.
;********************************************************************************************************
.EVEN
OSStartHighRdy:
JSR OSTaskSwHook
MOV.L OSTCBHighRdy, A0 ; ISP = OSTCBHighRdy->OSTCBStkPtr
LDC [A0], ISP
MOV.B #01H, OSRunning ; OSRunning = TRUE
POPM R0,R1,R2,R3,A0,A1,SB,FB
REIT
;********************************************************************************************************
; PERFORM A CONTEXT SWITCH (From task level) - OSCtxSw()
;
; Note(s) : 1) OSCtxSw() is called in SVC mode with BOTH FIQ and IRQ interrupts DISABLED.
;
; 2) The pseudo-code for OSCtxSw() is:
; a) Save the current task's context onto the current task's stack,
; b) OSTCBCur->OSTCBStkPtr = SP;
; c) OSTaskSwHook();
; d) OSPrioCur = OSPrioHighRdy;
; e) OSTCBCur = OSTCBHighRdy;
; f) SP = OSTCBHighRdy->OSTCBStkPtr;
; g) Restore the new task's context from the new task's stack,
; h) Return to new task's code.
;
; 3) Upon entry:
; OSTCBCur points to the OS_TCB of the task to suspend,
; OSTCBHighRdy points to the OS_TCB of the task to resume.
;
; 4) OSCtxSw must be mapped to interrupt #0 in the vector table.
;********************************************************************************************************
.EVEN
OSCtxSw:
PUSHM R0,R1,R2,R3,A0,A1,SB,FB
MOV.L OSTCBCur, A0 ; OSTCBCur->OSTCBStkPtr = SP
STC ISP, [A0]
JSR OSTaskSwHook ; OSTaskSwHook()
MOV.L OSTCBHighRdy, OSTCBCur ; OSTCBCur = OSTCBHighRdy
MOV.B OSPrioHighRdy, OSPrioCur ; OSPrioCur = OSPrioHighRdy
MOV.L OSTCBHighRdy, A0 ; SP = OSTCBHighRdy->OSTCBStkPtr
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB ; Restore all processor registers from the new task's stack
REIT
;********************************************************************************************************
; PERFORM A CONTEXT SWITCH (From interrupt level) - OSIntCtxSw()
;
; Note(s) : 1) OSIntCtxSw() is called in SVC mode with BOTH FIQ and IRQ interrupts DISABLED.
;
; 2) The pseudo-code for OSCtxSw() is:
; a) OSTaskSwHook();
; b) OSPrioCur = OSPrioHighRdy;
; c) OSTCBCur = OSTCBHighRdy;
; d) SP = OSTCBHighRdy->OSTCBStkPtr;
; e) Restore the new task's context from the new task's stack,
; f) Return to new task's code.
;
; 3) Upon entry:
; OSTCBCur points to the OS_TCB of the task to suspend,
; OSTCBHighRdy points to the OS_TCB of the task to resume.
;********************************************************************************************************
.EVEN
OSIntCtxSw:
JSR OSTaskSwHook ; OSTaskSwHook()
MOV.L OSTCBHighRdy, OSTCBCur ; OSTCBCur = OSTCBHighRdy
MOV.B OSPrioHighRdy, OSPrioCur ; OSPrioCur = OSPrioHighRdy
MOV.L OSTCBHighRdy, A0 ; SP = OSTCBHighRdy->OSTCBStkPtr
LDC [A0], ISP
POPM R0,R1,R2,R3,A0,A1,SB,FB ; Restore all processor registers from the new task's stack
REIT
;********************************************************************************************************
; uC/OS-II TIME TICK ISR
; void OSTickISR(void)
;
; Note(s) : 1) OSTickISR() should be placed on the appropriate interrupt vector.
;
; 2) Pseudo code:
; a) Save all registers
; b) OSIntNesting++
; c) if (OSIntNesting == 1) {
; OSTCBCur->OSTCBStkPtr = SP
; }
; d) OSTimeTick();
; e) OSIntExit();
; f) Restore all registers
; g) Return from interrupt;
;********************************************************************************************************
.EVEN
OSTickISR:
PUSHM R0,R1,R2,R3,A0,A1,SB,FB ; Save current task's registers
INC.B OSIntNesting ; OSIntNesting++
CMP.B #1,OSIntNesting ; if (OSIntNesting == 1) {
JNE OSTickISR1
MOV.L OSTCBCur, A0 ; OSTCBCur->OSTCBStkPtr = SP
STC ISP, [A0] ; }
OSTickISR1:
JSR OSTimeTick ; OSTimeTick()
JSR OSIntExit ; OSIntExit()
POPM R0,R1,R2,R3,A0,A1,SB,FB ; Restore registers from the new task's stack
REIT
END
|
; CRT0 (rom) stub for the SEGA SC-3000/SG-1000
;
; Stefano Bodrato - Jun 2010
;
; $Id: sc3000_crt0.asm,v 1.18 2016-07-13 22:12:25 dom Exp $
;
defc ROM_Start = $0000
defc RAM_Start = $C000
defc RAM_Length = $0800
defc Stack_Top = $c400
defc CRT_ORG_CODE = ROM_Start
defc TAR__register_sp = Stack_Top
defc TAR__clib_exit_stack_size = 0
defc TAR__fgetc_cons_inkey = 1
defc __CPU_CLOCK = 3580000
; VDP signals delivered to im1
defc TAR__crt_enable_rst = $8080
defc _z80_rst_38h = tms9918_interrupt
; NMI is delivered by BREAK on the keyboard
IFNDEF CRT_ENABLE_NMI
defc TAR__crt_enable_nmi = 1
EXTERN asm_nmi_handler
defc _z80_nmi = asm_nmi_handler
ENDIF
INCLUDE "crt/classic/crt_rules.inc"
EXTERN msx_set_mode
EXTERN im1_vectors
EXTERN asm_interrupt_handler
org CRT_ORG_CODE
if (ASMPC<>$0000)
defs CODE_ALIGNMENT_ERROR
endif
di
jp program
INCLUDE "crt/classic/crt_z80_rsts.asm"
; Interrupt routine, defines tms9918_interrupt
INCLUDE "crt/classic/tms9918/interrupt.asm"
ei
reti
int_VBL:
ld hl,im1_vectors
call asm_interrupt_handler
pop hl
pop af
ei
reti
program:
INCLUDE "crt/classic/crt_init_sp.asm"
INCLUDE "crt/classic/crt_init_atexit.asm"
call crt0_init_bss
ld (exitsp),sp
IF DEFINED_USING_amalloc
INCLUDE "crt/classic/crt_init_amalloc.asm"
ENDIF
; Initialise mode 2 by default
ld hl,2
call msx_set_mode
im 1
; ei
; Entry to the user code
call _main
cleanup:
push hl
call crt0_exit
endloop:
jr endloop
defc __crt_org_bss = RAM_Start
; If we were given a model then use it
IF DEFINED_CRT_MODEL
defc __crt_model = CRT_MODEL
ELSE
defc __crt_model = 1
ENDIF
|
;Example of fcntl API. API calls found in this example program:
; open, close, fcntl, exit
; High level description of what theis example program does:
; Opens a file with the open() API.
; Changes the flags for the file to NOATIME, APPEND, and WRONLY using fcntl()_SETFD
; Checks the flags of the file using fcntl()_GETFD (making sure the previously set value gets returned into EAX)
; closes the file handle with close()
; exits program with exit()
section .text
global _start
_start:
; Open file as Read-Only
;------------------------------------------------------------------------------
mov eax, 5 ;open
mov ebx, newfile ;pointer to the filename
mov ecx, 2 ;Flags, for Read Only
int 0x80
mov [filehandle], eax ;save filehandle
; Nah Fuck that, change it to WRONLY, append mode, and don't set the timestamp
;------------------------------------------------------------------------------
mov eax, 55 ;fcntl
mov ebx, [filehandle]
mov ecx, 4 ;SETFL
mov edx, 1002001o ;NOATIME, APPEND, WRONLY
int 0x80
; Read the file mode back to verify that it took
;------------------------------------------------------------------------------
mov eax, 55 ;fcntl
mov ebx, [filehandle]
mov ecx, 3 ;GETFL
int 0x80
; Now close the file
;------------------------------------------------------------------------------
mov ebx, [filehandle] ;get filehandle return value
mov eax, 6 ;close
int 0x80
; Exit program
;------------------------------------------------------------------------------
mov eax, 1
int 0x80
section .data
newfile db 'newfile.txt', 0x00
section .bss
filehandle resb 4
; ------------------------------
; | Some bitfield explanations |
; ------------------------------
; fctl commands (goes in ecx)
;------------------------------------------------------------------------------
; 0 F_DUPFD (needs argument)
; 1 F_GETFD
; 2 F_SETFD (needs argument)
; 3 F_GETFL
; 4 F_SETFL (needs permissions as argument)
; 5 F_GETLK (needs argment, address)
; 6 F_SETLK (needs argument, address)
; 7 F_SETLKW (needs argument)
; 8 F_SETOWN (needs argument)
; 9 F_GETOWN
; 10 F_SETSIG (needs argument)
; 11 F_GETSIG (needs argument)
; Mode Octal codes
;------------------------------------------------------------------------------
; Read 4
; Write 2
; Execute 1
; Flags octal codes, ones with *'s can be modified with SETFL
;------------------------------------------------------------------------------
; O_RDONLY 0
; O_WRONLY 1
; O_RDWR 2
; O_ACCMODE 3
; O_CREAT 100
; O_EXCL 200 (Create file exclusively, with CREAT; call fails if file already exists)
; O_NOCTTY 400 (Don’t let pathname become the controlling terminal)
; O_TRUNC 1000 (Truncate existing file to zero length)
; *O_APPEND 2000 (Append mode, also mitigates race conditions that lseek with SEEK_END doesn't)
; *O_NONBLOCK 4000 (Open in nonblocking mode)
; O_DSYNC 10000 (Provide synchronized I/O data integrity)
; *O_ASYNC 20000 (Generate a signal when I/O is possible)
; *O_DIRECT 40000 (File I/O bypasses buffer cache)
; O_LARGEFILE 100000
; O_DIRECTORY 200000 (Fail if pathname is not a directory)
; O_NOFOLLOW 400000 (Don't dereference symbolic links)
; *O_NOATIME 1000000 (Don't update access time with read syscall)
; O_CLOEXEC 2000000 (Set the close-on-exec flag)
; O_SYNC 4010000 (Make file writes synchronous)
; O_PATH 10000000
; O_TMPFILE 20200000
|
; A258321: a(n) = Fibonacci(n) + n*Lucas(n).
; 0,2,7,14,31,60,116,216,397,718,1285,2278,4008,7006,12179,21070,36299,62304,106588,181812,309305,524942,888977,1502474,2534736,4269050,7178911,12054926,20215927,33859908,56646980,94667088,158045413,263604046,439272349,731390830,1216800504,2022843094,3360432683,5578716622,9255379235,15345766632,25429046572,42114384684,69710903441,115332331790,190717751081,315231642386,520805468832,860074746098,1419773925175,2342780016398,3864378996943,6371915413836,10502875866644,17306129136840,28506924315709,46942310621134,77276411417653,127175155688182,209235177236040,344150376703822,565909207849667,930323282242894,1529019841691771,2512394173223280,4127252415802876,6778536039202596,11130516306069737,18272669646513038,29991531104887745,49216163204946458,80748001915684848,132456435180027626,217237014760959439,356218297665630350,584012737816480039,957313308596643924,1568965744917547748,2571001025133148992,4212328440174077845,6900413107049565262,11302186859089362637,18509128919746985854,30307290044310125976,49618922183138947270,81224689712004686171,132944592598781081806,217568740498978829267,356013771989590420728,582482409568592820940,952896517530037322268,1558679160150507794753,2549276246704276849166,4168956208930394027609,6816933826734011992610,11145592208839356519744,18220929579847660127714,29784627506136258762343,48682066347707452620302
mov $1,$0
lpb $1
sub $1,1
add $2,2
mov $3,$0
add $4,$2
mov $0,$4
mov $2,$3
add $2,$1
lpe
|
/*
* Copyright (C) 2005-2017 Centre National d'Etudes Spatiales (CNES)
*
* This file is part of Orfeo Toolbox
*
* https://www.orfeo-toolbox.org/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "otbWrapperQtWidgetRAMParameter.h"
namespace otb
{
namespace Wrapper
{
QtWidgetRAMParameter::QtWidgetRAMParameter(RAMParameter* param, QtWidgetModel* m)
: QtWidgetParameterBase(param, m),
m_RAMParam(param)
{
}
QtWidgetRAMParameter::~QtWidgetRAMParameter()
{
}
void QtWidgetRAMParameter::DoCreateWidget()
{
// Set up input text edit
m_QHBoxLayout = new QHBoxLayout;
m_QHBoxLayout->setSpacing(0);
m_QHBoxLayout->setContentsMargins(0, 0, 0, 0);
m_QSpinBox = new QSpinBox;
m_QSpinBox->setToolTip(m_RAMParam->GetDescription());
connect( m_QSpinBox, SIGNAL(valueChanged(int)), this, SLOT(SetValue(int)) );
connect( m_QSpinBox, SIGNAL(valueChanged(int)), GetModel(), SLOT(NotifyUpdate()) );
// Set a valid range
// Using m_UnsignedIntParam->GetMaximum() to set the maximum range
// of the SpinBox give a maximum of 0. The SpinBox max is an
// integer and m_UnsignedIntParam->GetMaximum() returns an unsigned
// integer which is 2 times the itk::NumericTraits<int>::max().
// static_cast<int>(m_UnsignedIntParam->GetMaximum()) = 0 then.
m_QSpinBox->setRange(itk::NumericTraits<int>::Zero,
itk::NumericTraits<int>::max());
m_QHBoxLayout->addWidget(m_QSpinBox);
m_QHBoxLayout->addStretch();
this->setLayout(m_QHBoxLayout);
}
void QtWidgetRAMParameter::DoUpdateGUI()
{
bool blocked = m_QSpinBox->blockSignals( true );
if (m_RAMParam->HasValue())
{
m_QSpinBox->setValue(static_cast<int>(m_RAMParam->GetValue()));
}
m_QSpinBox->blockSignals( blocked );
QFont f = m_QSpinBox->font();
if (m_RAMParam->HasUserValue())
{
f.setBold(true);
}
else
{
f.setBold(false);
}
m_QSpinBox->setFont(f);
}
void QtWidgetRAMParameter::SetValue(int value)
{
m_RAMParam->SetValue(static_cast<unsigned int>(value));
m_RAMParam->SetUserValue(true);
m_RAMParam->SetAutomaticValue(false);
}
}
}
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE121_Stack_Based_Buffer_Overflow__CWE805_char_declare_memcpy_73a.cpp
Label Definition File: CWE121_Stack_Based_Buffer_Overflow__CWE805.string.label.xml
Template File: sources-sink-73a.tmpl.cpp
*/
/*
* @description
* CWE: 121 Stack Based Buffer Overflow
* BadSource: Set data pointer to the bad buffer
* GoodSource: Set data pointer to the good buffer
* Sinks: memcpy
* BadSink : Copy string to data using memcpy
* Flow Variant: 73 Data flow: data passed in a list from one function to another in different source files
*
* */
#include "std_testcase.h"
#include <list>
#include <wchar.h>
using namespace std;
namespace CWE121_Stack_Based_Buffer_Overflow__CWE805_char_declare_memcpy_73
{
#ifndef OMITBAD
/* bad function declaration */
void badSink(list<char *> dataList);
void bad()
{
char * data;
list<char *> dataList;
char dataBadBuffer[50];
char dataGoodBuffer[100];
/* FLAW: Set a pointer to a "small" buffer. This buffer will be used in the sinks as a destination
* buffer in various memory copying functions using a "large" source buffer. */
data = dataBadBuffer;
data[0] = '\0'; /* null terminate */
/* Put data in a list */
dataList.push_back(data);
dataList.push_back(data);
dataList.push_back(data);
badSink(dataList);
}
#endif /* OMITBAD */
#ifndef OMITGOOD
/* good function declarations */
/* goodG2B uses the GoodSource with the BadSink */
void goodG2BSink(list<char *> dataList);
static void goodG2B()
{
char * data;
list<char *> dataList;
char dataBadBuffer[50];
char dataGoodBuffer[100];
/* FIX: Set a pointer to a "large" buffer, thus avoiding buffer overflows in the sinks. */
data = dataGoodBuffer;
data[0] = '\0'; /* null terminate */
/* Put data in a list */
dataList.push_back(data);
dataList.push_back(data);
dataList.push_back(data);
goodG2BSink(dataList);
}
void good()
{
goodG2B();
}
#endif /* OMITGOOD */
} /* close namespace */
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
#ifdef INCLUDEMAIN
using namespace CWE121_Stack_Based_Buffer_Overflow__CWE805_char_declare_memcpy_73; /* so that we can use good and bad easily */
int main(int argc, char * argv[])
{
/* seed randomness */
srand( (unsigned)time(NULL) );
#ifndef OMITGOOD
printLine("Calling good()...");
good();
printLine("Finished good()");
#endif /* OMITGOOD */
#ifndef OMITBAD
printLine("Calling bad()...");
bad();
printLine("Finished bad()");
#endif /* OMITBAD */
return 0;
}
#endif
|
/**
* Copyright (C) 2009-2015 MongoDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* As a special exception, the copyright holders give permission to link the
* code of portions of this program with the OpenSSL library under certain
* conditions as described in each individual source file and distribute
* linked combinations including the program with the OpenSSL library. You
* must comply with the GNU Affero General Public License in all respects for
* all of the code used other than as permitted herein. If you modify file(s)
* with this exception, you may extend this exception to your version of the
* file(s), but you are not obligated to do so. If you do not wish to do so,
* delete this exception statement from your version. If you delete this
* exception statement from all source files in the program, then also delete
* it in the license file.
*/
#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kNetwork
#include "mongo/platform/basic.h"
#include "mongo/client/connection_string.h"
#include "mongo/base/status_with.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
ConnectionString::ConnectionString(const HostAndPort& server) : _type(MASTER) {
_servers.push_back(server);
_finishInit();
}
ConnectionString::ConnectionString(StringData setName, std::vector<HostAndPort> servers)
: _type(SET), _servers(std::move(servers)), _setName(setName.toString()) {
_finishInit();
}
ConnectionString::ConnectionString(ConnectionType type,
const std::string& s,
const std::string& setName) {
_type = type;
_setName = setName;
_fillServers(s);
switch (_type) {
case MASTER:
verify(_servers.size() == 1);
break;
case SET:
verify(_setName.size());
verify(_servers.size() >= 1); // 1 is ok since we can derive
break;
default:
verify(_servers.size() > 0);
}
_finishInit();
}
ConnectionString::ConnectionString(const std::string& s, ConnectionType favoredMultipleType) {
_fillServers(s);
if (_type != INVALID) {
// set already
} else if (_servers.size() == 1) {
_type = MASTER;
} else {
_type = favoredMultipleType;
verify(_type == SET || _type == SYNC);
}
_finishInit();
}
ConnectionString ConnectionString::forReplicaSet(StringData setName,
std::vector<HostAndPort> servers) {
return ConnectionString(setName, std::move(servers));
}
void ConnectionString::_fillServers(std::string s) {
//
// Custom-handled servers/replica sets start with '$'
// According to RFC-1123/952, this will not overlap with valid hostnames
// (also disallows $replicaSetName hosts)
//
if (s.find('$') == 0)
_type = CUSTOM;
{
std::string::size_type idx = s.find('/');
if (idx != std::string::npos) {
_setName = s.substr(0, idx);
s = s.substr(idx + 1);
if (_type != CUSTOM)
_type = SET;
}
}
std::string::size_type idx;
while ((idx = s.find(',')) != std::string::npos) {
_servers.push_back(HostAndPort(s.substr(0, idx)));
s = s.substr(idx + 1);
}
_servers.push_back(HostAndPort(s));
}
void ConnectionString::_finishInit() {
// Needed here as well b/c the parsing logic isn't used in all constructors
// TODO: Refactor so that the parsing logic *is* used in all constructors
if (_type == MASTER && _servers.size() > 0) {
if (_servers[0].host().find('$') == 0) {
_type = CUSTOM;
}
}
std::stringstream ss;
if (_type == SET) {
ss << _setName << "/";
}
for (unsigned i = 0; i < _servers.size(); i++) {
if (i > 0) {
ss << ",";
}
ss << _servers[i].toString();
}
_string = ss.str();
}
bool ConnectionString::sameLogicalEndpoint(const ConnectionString& other) const {
if (_type != other._type) {
return false;
}
switch (_type) {
case INVALID:
return true;
case MASTER:
return _servers[0] == other._servers[0];
case SET:
return _setName == other._setName;
case SYNC:
// The servers all have to be the same in each, but not in the same order.
if (_servers.size() != other._servers.size()) {
return false;
}
for (unsigned i = 0; i < _servers.size(); i++) {
bool found = false;
for (unsigned j = 0; j < other._servers.size(); j++) {
if (_servers[i] == other._servers[j]) {
found = true;
break;
}
}
if (!found)
return false;
}
return true;
case CUSTOM:
return _string == other._string;
}
MONGO_UNREACHABLE;
}
ConnectionString ConnectionString::parse(const std::string& url, std::string& errmsg) {
auto status = parse(url);
if (status.isOK()) {
errmsg = "";
return status.getValue();
}
errmsg = status.getStatus().toString();
return ConnectionString();
}
StatusWith<ConnectionString> ConnectionString::parse(const std::string& url) {
const std::string::size_type i = url.find('/');
// Replica set
if (i != std::string::npos && i != 0) {
return ConnectionString(SET, url.substr(i + 1), url.substr(0, i));
}
const int numCommas = str::count(url, ',');
// Single host
if (numCommas == 0) {
HostAndPort singleHost;
Status status = singleHost.initialize(url);
if (!status.isOK()) {
return status;
}
return ConnectionString(singleHost);
}
// Sharding config server
if (numCommas == 2) {
return ConnectionString(SYNC, url, "");
}
return Status(ErrorCodes::FailedToParse, str::stream() << "invalid url [" << url << "]");
}
std::string ConnectionString::typeToString(ConnectionType type) {
switch (type) {
case INVALID:
return "invalid";
case MASTER:
return "master";
case SET:
return "set";
case SYNC:
return "sync";
case CUSTOM:
return "custom";
}
MONGO_UNREACHABLE;
}
} // namespace mongo
|
#pragma once
#include "./Boilerplate/Boilerplate.hpp"
#include "./ecs/Utils/Utils.hpp" |
.model small
.data
.code
main proc
mov bl,48h
mov al,85h
sub al,bl
das
endp
end main |
/*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| license@php.net so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include <algorithm>
#include <cxxabi.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <boost/algorithm/string/predicate.hpp>
#include "folly/FBString.h"
#include "folly/Format.h"
#include "folly/ScopeGuard.h"
#include "hphp/tools/bootstrap/idl.h"
using folly::fbstring;
using namespace HPHP::IDL;
using namespace HPHP;
std::unordered_map<fbstring, const PhpFunc*> g_mangleMap;
std::unordered_map<fbstring, const PhpClass*> g_classMap;
// Functions with return types that don't fit in registers are handled
// differently on ARM. Instead of using a pointer-to-return-value-space hidden
// first parameter like on x64, the pointer is passed in a register reserved for
// this purpose, not part of the normal argument sequence.
bool g_armMode = false;
constexpr char* g_allIncludes = R"(
#include "hphp/runtime/ext_hhvm/ext_hhvm.h"
#include "hphp/runtime/base/builtin-functions.h"
#include "hphp/runtime/base/array-init.h"
#include "hphp/runtime/ext/ext.h"
#include "hphp/runtime/vm/class.h"
#include "hphp/runtime/vm/runtime.h"
#include <exception>
)";
///////////////////////////////////////////////////////////////////////////////
// Code emission helpers -- leaf functions
void emitCtorHelper(const fbstring& className, std::ostream& out) {
out << folly::format(
R"(
ObjectData* new_{0:s}_Instance(HPHP::Class* cls) {{
size_t nProps = cls->numDeclProperties();
size_t builtinObjSize = sizeof(c_{0:s}) - sizeof(ObjectData);
size_t size = ObjectData::sizeForNProps(nProps) + builtinObjSize;
return new (MM().objMallocLogged(size)) c_{0:s}(cls);
}})",
className) << "\n\n";
}
void emitDtorHelper(const fbstring& className, std::ostream& out) {
out << folly::format(
R"(
void delete_{0:s}(ObjectData* obj, const Class* cls) {{
auto const ptr = static_cast<c_{0:s}*>(obj);
ptr->~c_{0:s}();
auto const nProps = cls->numDeclProperties();
auto const propVec = reinterpret_cast<TypedValue*>(ptr + 1);
for (auto i = Slot{{0}}; i < nProps; ++i) {{
tvRefcountedDecRef(&propVec[i]);
}}
auto const builtinSz = sizeof(c_{0:s}) - sizeof(ObjectData);
auto const size = ObjectData::sizeForNProps(nProps) + builtinSz;
if (LIKELY(size <= kMaxSmartSize)) {{
return MM().smartFreeSizeLogged(ptr, size);
}}
return MM().smartFreeSizeBigLogged(ptr, size);
}})",
className) << "\n\n";
}
/*
* Emits a declaration that corresponds to the f_* functions, but with types and
* signatures adjusted to reflect the underlying C++ ABI.
*/
void emitRemappedFuncDecl(const PhpFunc& func,
const fbstring& mangled,
const fbstring& prefix,
std::ostream& out) {
int returnKindOf = func.returnKindOf();
bool indirectReturn = func.isIndirectReturn();
if (indirectReturn) {
if (returnKindOf == KindOfAny) {
out << "TypedValue* ";
} else {
out << "Value* ";
}
} else {
out << func.returnCppType() << ' ';
}
out << prefix << func.getUniqueName() << '(';
bool isFirstParam = true;
if (!g_armMode && indirectReturn) {
if (func.returnKindOf() == KindOfAny) {
out << "TypedValue* _rv";
} else {
out << "Value* _rv";
}
isFirstParam = false;
}
if (func.usesThis()) {
if (!isFirstParam) {
out << ", ";
}
out << "c_" << func.className() << "* this_";
isFirstParam = false;
}
if (func.isVarArgs()) {
if (!isFirstParam) {
out << ", ";
}
out << "int64_t _argc";
isFirstParam = false;
}
for (auto const& param : func.params()) {
if (!isFirstParam) {
out << ", ";
}
auto kindof = param.kindOf();
if (param.isIndirectPass()) {
if (kindof == KindOfAny || kindof == KindOfRef) {
out << "TypedValue*";
} else {
out << "Value*";
}
} else {
out << param.getCppType();
}
out << ' ' << param.name();
isFirstParam = false;
}
if (func.isVarArgs()) {
assert(!isFirstParam);
out << ", Value* _argv";
}
out << ") asm(\""
<< mangled << "\");\n\n";
}
static void emitZendParamPrefix(std::ostream& out,
int32_t index,
const PhpParam& param,
const char* ind) {
out << ind << "if (!"
<< "tvCoerceParamTo" << kindOfString(param.kindOf())
<< "InPlace(args-" << index << ")) {\n"
<< ind << " raise_param_type_warning(__func__, " << index << " + 1, "
<< "KindOf" << kindOfString(param.kindOf()) << ", "
<< "(args-" << index << ")->m_type);\n";
}
static void emitZendParamSuffix(std::ostream& out, const char* ind) {
out << ind << " return;\n"
<< ind << "}\n";
}
void emitCast(const PhpParam& param, int32_t index, std::ostream& out,
const char* ind, bool doCheck) {
if (doCheck) {
out << ind << "if (";
if (param.kindOf() == KindOfString) {
out << "!IS_STRING_TYPE((args-" << index << ")->m_type)";
} else {
out << "(args-" << index << ")->m_type != KindOf"
<< kindOfString(param.kindOf());
}
out << ") {\n";
ind -= 2;
}
if (param.getParamMode() == ParamMode::ZendNull) {
emitZendParamPrefix(out, index, param, ind);
out << ind << " rv->m_type = KindOfUninit;\n";
emitZendParamSuffix(out, ind);
} else if (param.getParamMode() == ParamMode::ZendFalse) {
emitZendParamPrefix(out, index, param, ind);
out << ind << " rv->m_type = KindOfBoolean;\n"
<< ind << " rv->m_data.num = 0;\n";
emitZendParamSuffix(out, ind);
} else if (param.kindOf() == KindOfObject && param.hasDefault()) {
out << ind << "tvCastToNullableObject"
<< "InPlace(args-" << index << ");\n";
} else if (param.kindOf() != KindOfAny) {
out << ind << "tvCastTo" << kindOfString(param.kindOf())
<< "InPlace(args-" << index << ");\n";
}
if (doCheck) {
ind += 2;
out << ind << "}\n";
}
}
/*
* Emits an expression which will check the types of arguments on the VM stack.
*/
void emitTypechecks(const PhpFunc& func, std::ostream& out, const char* ind) {
bool isFirstParam = true;
for (int k = func.numParams() - 1; k >= 0; --k) {
auto const& param = func.param(k);
auto kindof = param.kindOf();
if (kindof == KindOfAny || kindof == KindOfRef) {
continue;
}
if (!isFirstParam) {
out << " &&\n" << ind << " ";
}
isFirstParam = false;
bool isOptional = (k >= func.minNumParams());
if (isOptional) {
out << "(count <= " << k << " || ";
}
if (kindof == KindOfString) {
out << "IS_STRING_TYPE((args - " << k << ")->m_type)";
} else {
out << "(args - " << k << ")->m_type == KindOf" << kindOfString(kindof);
}
if (isOptional) {
out << ")";
}
}
}
/*
* Marshals varargs into an array.
*/
void emitBuildExtraArgs(const PhpFunc& func, std::ostream& out,
const char* ind) {
out << folly::format(
R"(
{0}Array extraArgs;
{0}if (count > {1}) {{
{0} ArrayInit ai((size_t)count-{1}, ArrayInit::Mixed{{}});
{0} for (int32_t i = {1}; i < count; ++i) {{
{0} TypedValue* extraArg = ar->getExtraArg(i-{1});
{0} if (tvIsStronglyBound(extraArg)) {{
{0} ai.setRef(i-{1}, tvAsVariant(extraArg));
{0} }} else {{
{0} ai.set(i-{1}, tvAsVariant(extraArg));
{0} }}
{0} }}
{0} extraArgs = ai.create();
{0}}}
)",
ind,
func.numParams()
);
}
void emitCallExpression(const PhpFunc& func, const fbstring& prefix,
std::ostream& out) {
out << prefix << func.getUniqueName() << '(';
bool isFirstParam = true;
if (!g_armMode && func.isIndirectReturn()) {
isFirstParam = false;
if (func.returnKindOf() == KindOfAny) {
out << "rv";
} else {
out << "&(rv->m_data)";
}
}
if (func.usesThis()) {
if (!isFirstParam) {
out << ", ";
}
isFirstParam = false;
out << "(this_)";
}
if (func.isVarArgs()) {
if (!isFirstParam) {
out << ", ";
}
isFirstParam = false;
out << "count";
}
for (auto k = 0; k < func.numParams(); ++k) {
auto const& param = func.param(k);
if (!isFirstParam) {
out << ", ";
}
isFirstParam = false;
if (param.hasDefault()) {
out << "(count > " << k << ") ? ";
}
if (param.isIndirectPass()) {
auto kindof = param.kindOf();
if (kindof == KindOfAny || kindof == KindOfRef) {
out << "(args-" << k << ')';
} else {
out << "&args[-" << k << "].m_data";
}
} else {
if (param.kindOf() == KindOfDouble) {
out << "(args[-" << k << "].m_data.dbl)";
} else {
out << '(' << param.getCppType() << ")(args[-" << k << "].m_data.num)";
}
}
if (param.hasDefault()) {
out << " : ";
auto kindof = param.kindOf();
if (param.defValueNeedsVariable()) {
if (kindof == KindOfAny || kindof == KindOfRef) {
out << "(TypedValue*)(&defVal" << k << ')';
} else {
out << "(Value*)(&defVal" << k << ')';
}
} else if (param.isIndirectPass()) {
if (kindof == KindOfAny || kindof == KindOfRef) {
out << "(TypedValue*)(&" << param.getDefault() << ')';
} else {
out << "(Value*)(&" << param.getDefault() << ')';
}
} else {
out << '(' << param.getCppType() << ")(" << param.getDefault() << ')';
}
}
}
if (func.isVarArgs()) {
assert(!isFirstParam);
out << ", (Value*)(&extraArgs)";
}
out << ')';
}
///////////////////////////////////////////////////////////////////////////////
// Code emission helpers -- non-leaf functions
/*
* Marshals varargs into an array if necessary, emits variables for default
* values if necessary, and emits the call itself.
*/
void emitExtCall(const PhpFunc& func, std::ostream& out, const char* ind) {
fbstring call_prefix;
fbstring call_suffix;
// Set up the type of the return value, and emit post-call code to normalize
// return types and values
auto returnKindOf = func.returnKindOf();
if (returnKindOf == KindOfBoolean) {
out << ind << "rv->m_type = KindOfBoolean;\n";
call_prefix = "rv->m_data.num = (";
call_suffix = ") ? 1LL : 0LL;\n";
} else if (returnKindOf == KindOfInt64) {
out << ind << "rv->m_type = KindOfInt64;\n";
call_prefix = "rv->m_data.num = (int64_t)";
call_suffix = ";\n";
} else if (returnKindOf == KindOfDouble) {
out << ind << "rv->m_type = KindOfDouble;\n";
call_prefix = "rv->m_data.dbl = ";
call_suffix = ";\n";
} else if (returnKindOf == KindOfInvalid || returnKindOf == KindOfNull) {
out << ind << "rv->m_type = KindOfNull;\n";
call_suffix = ";\n";
} else if (returnKindOf == KindOfString) {
out << ind << "rv->m_type = KindOfString;\n";
call_suffix = (fbstring(";\n") + ind +
"if (UNLIKELY(rv->m_data.num == 0LL)) "
"rv->m_type = KindOfNull;\n");
} else if (returnKindOf == KindOfArray) {
out << ind << "rv->m_type = KindOfArray;\n";
call_suffix = (fbstring(";\n") + ind +
"if (UNLIKELY(rv->m_data.num == 0LL)) "
"rv->m_type = KindOfNull;\n");
} else if (returnKindOf == KindOfObject) {
out << ind << "rv->m_type = KindOfObject;\n";
call_suffix = (fbstring(";\n") + ind +
"if (UNLIKELY(rv->m_data.num == 0LL)) "
"rv->m_type = KindOfNull;\n");
} else if (returnKindOf == KindOfResource) {
out << ind << "rv->m_type = KindOfResource;\n";
call_suffix = (fbstring(";\n") + ind +
"if (UNLIKELY(rv->m_data.num == 0LL)) "
"rv->m_type = KindOfNull;\n");
} else {
call_suffix = (fbstring(";\n") + ind +
"if (UNLIKELY(rv->m_type == KindOfUninit)) "
"rv->m_type = KindOfNull;\n");
}
if (func.isVarArgs()) {
emitBuildExtraArgs(func, out, ind);
}
// If any default values need variables (because they have nontrivial values),
// declare and initialize those
for (auto k = 0; k < func.numParams(); ++k) {
auto const& param = func.param(k);
if (param.defValueNeedsVariable()) {
DataType kindof = param.kindOf();
out << ind << param.getStrippedCppType() << " defVal" << k;
fbstring defVal = param.getDefault();
if (kindof != KindOfAny ||
(defVal != "null" && defVal != "null_variant")) {
out << " = ";
if (boost::starts_with(defVal, "null")) {
switch (kindof) {
case KindOfArray:
out << "Array()";
break;
case KindOfString:
out << "String()";
break;
case KindOfResource:
out << "Resource()";
break;
case KindOfObject:
out << "Object()";
break;
case KindOfRef:
out << "init_null()";
break;
default:
out << "No valid null object.";
}
} else {
out << defVal;
}
}
out << ";\n";
}
}
// Put the return-value-space pointer into x8
if (g_armMode) {
out << ind << "asm volatile (\"mov x8, %0\\n\" : : \"r\"(";
if (func.returnKindOf() == KindOfAny) {
out << "rv";
} else {
out << "&(rv->m_data)";
}
out << ") : \"x8\");\n";
}
out << ind << call_prefix;
emitCallExpression(func, func.isMethod() ? "th_" : "fh_", out);
out << call_suffix;
}
void emitCasts(const PhpFunc& func, std::ostream& out, const char* ind) {
assert(func.numTypeChecks() > 0);
if (func.numTypeChecks() == 1) {
for (auto i = func.numParams() - 1; i >= 0; --i) {
if (func.param(i).isCheckedType()) {
emitCast(func.param(i), i, out, ind, false);
return;
}
}
assert(false); // not reached
}
if (func.minNumParams() != func.numParams()) {
out << ind << "switch (count) {\n";
for (auto i = func.numParams() - 1; i >= func.minNumParams(); --i) {
auto const& param = func.param(i);
if (i == func.numParams() - 1) {
out << ind << "default: // count >= " << func.numParams() << '\n';
} else {
out << ind << "case " << (i + 1) << ":\n";
}
if (param.isCheckedType()) {
emitCast(param, i, out, ind - 2, true);
}
}
out << ind << "case " << func.minNumParams() << ":\n";
out << ind << " break;\n";
out << ind << "}\n";
}
for (auto i = func.minNumParams() - 1; i >= 0; --i) {
auto const& param = func.param(i);
if (param.isCheckedType()) {
emitCast(param, i, out, ind, true);
}
}
}
/*
* Emits the fg1_ helper, which assumes that the arg count is acceptable, but at
* least one typecheck has failed. It will cast arguments to the appropriate
* types and then call the fh_ alias.
*/
void emitSlowPathHelper(const PhpFunc& func, const fbstring& prefix,
std::ostream& out) {
out << "void " << prefix << func.getUniqueName()
<< "(TypedValue* rv, ActRec* ar, int32_t count";
if (func.usesThis()) {
out << ", c_" << func.className() << "* this_";
}
out << ") __attribute__((__noinline__,cold));\n";
out << "void " << prefix << func.getUniqueName()
<< "(TypedValue* rv, ActRec* ar, int32_t count";
if (func.usesThis()) {
out << ", c_" << func.className() << "* this_";
}
out << ") {\n";
const char* eightSpaces = " ";
const char* ind = eightSpaces + 6;
out << ind << "TypedValue* args UNUSED = ((TypedValue*)ar) - 1;\n";
emitCasts(func, out, ind);
emitExtCall(func, out, ind);
out << "}\n\n";
}
/**
* Emits all the methods that are needed for class creation
* */
static void emitClassCtorAndDtor(const PhpClass& klass, std::ostream& out) {
if (!(klass.flags() & IsCppAbstract)) {
emitCtorHelper(klass.getCppName(), out);
if (!(klass.flags() & CppCustomDelete)) {
emitDtorHelper(klass.getCppName(), out);
}
}
if (klass.flags() & NoDefaultSweep) {
out << "IMPLEMENT_CLASS_NO_SWEEP(" << klass.getCppName() << ");\n";
} else {
out << "IMPLEMENT_CLASS(" << klass.getCppName() << ");\n";
}
}
///////////////////////////////////////////////////////////////////////////////
// Top level
/*
* Called for each line on stdin. Looks up the symbol's characteristics in the
* IDL, and emits up to three things:
*
* - An fh_* declaration, which is the f_* signature with ABI exposed.
* - [Maybe] an fg1_* stub which does type casting of arguments, then calls fh_.
* - An fg_ stub which checks arg counts and types, then calls fh_ or fg1_.
*/
void processSymbol(const fbstring& symbol, std::ostream& header,
std::ostream& cpp) {
int status;
const char *mangledSymbol = symbol.c_str();
#ifdef __APPLE__
mangledSymbol++;
#endif
auto demangled = abi::__cxa_demangle(mangledSymbol, nullptr, 0, &status);
SCOPE_EXIT { free(demangled); };
if (status != 0) {
return;
}
auto idlIt = g_mangleMap.find(demangled);
if (idlIt == g_mangleMap.end()) {
fbstring munged = demangled;
fbstring target = "HPHP::String";
size_t pos = 0;
while (true) {
pos = munged.find(target, pos);
if (pos == fbstring::npos) break;
pos += target.size();
if (pos >= munged.size() ||
munged[pos] == ' ') {
continue;
}
munged.replace(pos, 0, " const&");
}
idlIt = g_mangleMap.find(munged);
if (idlIt == g_mangleMap.end()) {
// A symbol that doesn't correspond to anything in the IDL.
return;
}
}
auto& func = *idlIt->second;
bool isMethod = func.isMethod();
auto classIt = g_classMap.find(func.className());
if (classIt != g_classMap.end()) {
auto& klass = *classIt->second;
emitClassCtorAndDtor(klass, cpp);
g_classMap.erase(classIt);
}
fbstring declPrefix = (isMethod ? "th_" : "fh_");
fbstring slowPathPrefix = (isMethod ? "tg1_" : "fg1_");
fbstring stubPrefix = (isMethod ? "tg_" : "fg_");
std::ostringstream decl;
emitRemappedFuncDecl(func, symbol, declPrefix, decl);
header << decl.str();
cpp << decl.str();
if (func.numTypeChecks() > 0) {
emitSlowPathHelper(func, slowPathPrefix, cpp);
}
// This is how we change the indentation level. So clever.
const char* eightSpaces = " ";
const char* in = eightSpaces + 6;
cpp << "TypedValue* " << stubPrefix << func.getUniqueName()
<< "(ActRec* ar) {\n";
cpp << in << "TypedValue rvSpace;\n";
cpp << in << "TypedValue* rv = &rvSpace;\n";
cpp << in << "int32_t count = ar->numArgs();\n";
cpp << in << "TypedValue* args UNUSED = ((TypedValue*)ar) - 1;\n";
if (func.usesThis()) {
auto cklass = "c_" + func.className();
cpp << in
<< cklass << "* this_ = (ar->hasThis() ? "
<< "static_cast<" << cklass << "*>(ar->getThis()) : "
<< " nullptr);\n";
cpp << in << "if (LIKELY(this_ != nullptr)) {\n";
in -= 2;
}
// Check the arg count
bool needArgMiscountClause = false;
if (func.isVarArgs()) {
if (func.minNumParams() > 0) {
cpp << in << "if (LIKELY(count >= " << func.minNumParams() << ")) {\n";
needArgMiscountClause = true;
in -= 2;
}
} else {
if (func.minNumParams() == func.numParams()) {
cpp << in << "if (LIKELY(count == " << func.minNumParams() << ")) {\n";
} else if (func.minNumParams() == 0) {
cpp << in << "if (LIKELY(count <= " << func.numParams() << ")) {\n";
} else {
cpp << in << "if (LIKELY(count >= " << func.minNumParams()
<< " && count <= "<< func.numParams() << ")) {\n";
}
needArgMiscountClause = true;
in -= 2;
}
// Count is OK. Check arg types
if (func.numTypeChecks() > 0) {
cpp << in << "if (LIKELY(";
emitTypechecks(func, cpp, in);
cpp << ")) {\n";
in -= 2;
}
// Call the f_ function via the fh_ alias
emitExtCall(func, cpp, in);
if (needArgMiscountClause && (func.numParams() == 0) && func.usesThis()) {
cpp << in << "frame_free_inl(ar, rv);\n";
cpp << in << "ar->m_r = *rv;\n";
cpp << in << "return &ar->m_r;\n";
}
// Deal with type mismatches: punt to fg1_
if (func.numTypeChecks() > 0) {
cpp << in + 2 << "} else {\n";
cpp << in << slowPathPrefix << func.getUniqueName() << "(rv, ar, count";
if (func.usesThis()) {
cpp << ", this_";
}
cpp << ");\n";
in += 2;
cpp << in << "}\n";
}
if (needArgMiscountClause) {
cpp << in + 2 << "} else {\n";
if (func.isVarArgs()) {
cpp << in << "throw_missing_arguments_nr(\""
<< escapeCpp(func.getPrettyName())
<< "\", " << func.minNumParams() << ", count, 1, rv);\n";
} else {
if (func.minNumParams() == 0) {
cpp << in << "throw_toomany_arguments_nr(\""
<< escapeCpp(func.getPrettyName())
<< "\", " << func.numParams() << ", count, 1, rv);\n";
} else {
cpp << in << "throw_wrong_arguments_nr(\""
<< escapeCpp(func.getPrettyName())
<< "\", count, " << func.minNumParams() << ", "
<< func.numParams() << ", 1, rv);\n";
}
}
in += 2;
cpp << in << "}\n";
}
if (func.isMethod() && !func.isStatic()) {
cpp << in + 2 << "} else {\n";
cpp << in << "throw_instance_method_fatal(\"" << func.className()
<< "::" << func.getCppName() << "\");\n";
in += 2;
cpp << in << "}\n";
}
auto numLocals = func.numParams();
auto frameFree =
func.usesThis() ? "frame_free_locals_inl" : "frame_free_locals_no_this_inl";
cpp << in << frameFree << "(ar, " << numLocals << ", rv);\n";
cpp << in << "ar->m_r = *rv;\n";
cpp << in << "return &ar->m_r;\n";
cpp << "}\n\n";
}
int main(int argc, const char* argv[]) {
if (argc < 5) {
std::cout << "Usage: " << argv[0]
<< " <x64|arm> <output .h> <output .cpp> <*.idl.json>...\n"
<< "Pipe mangled C++ symbols to stdin.\n";
return 0;
}
g_armMode = (strcmp(argv[1], "arm") == 0);
std::ofstream header(argv[2]);
std::ofstream cpp(argv[3]);
fbvector<PhpFunc> funcs;
fbvector<PhpClass> classes;
for (auto i = 4; i < argc; ++i) {
try {
parseIDL(argv[i], funcs, classes);
} catch (const std::exception& exc) {
std::cerr << argv[i] << ": " << exc.what() << "\n";
return 1;
}
}
for (auto const& func : funcs) {
g_mangleMap[func.getCppSig()] = &func;
}
for (auto const& klass : classes) {
g_classMap[klass.getCppName()] = &klass;
for (auto const& func : klass.methods()) {
g_mangleMap[func.getCppSig()] = &func;
}
}
header << "namespace HPHP {\n\n";
cpp << g_allIncludes << "\n";
cpp << "namespace HPHP {\n\n";
std::string line;
while (std::getline(std::cin, line)) {
processSymbol(line, header, cpp);
}
header << "} // namespace HPHP\n";
cpp << "} // namespace HPHP\n";
return 0;
}
|
; Copyright (c) 2013 The Chromium Authors. All rights reserved.
; Use of this source code is governed by a BSD-style license that can be
; found in the LICENSE file.
%include "third_party/x86inc/x86inc.asm"
;
; This file uses MMX instructions as an alternative to _mm_empty() which
; is not supported in Visual Studio 2010 on x64.
; TODO(wolenetz): Use MMX intrinsics when compiling win64 with Visual
; Studio 2012? http://crbug.com/173450
;
SECTION_TEXT
CPU MMX
%define SYMBOL EmptyRegisterState_MMX
global mangle(SYMBOL) PRIVATE
align function_align
mangle(SYMBOL):
emms
ret
|
; A106006: [n/2] + [n/3] + [n/5].
; 0,0,1,2,3,4,6,6,7,8,10,10,12,12,13,15,16,16,18,18,20,21,22,22,24,25,26,27,28,28,31,31,32,33,34,35,37,37,38,39,41,41,43,43,44,46,47,47,49,49,51,52,53,53,55,56,57,58,59,59,62,62,63,64,65,66,68,68,69,70,72,72,74,74,75
mov $1,$0
div $1,2
mov $2,$0
div $2,3
mov $3,$0
div $3,5
add $3,$2
add $1,$3
|
#define __CLUCK2SESAME_PLATFORM_CLOCK_ASM
#include "Platform.inc"
#include "TailCalls.inc"
#include "InitialisationChain.inc"
#include "ResetFlags.inc"
#include "Clock.inc"
radix decimal
TMR1CS_MASK equ (1 << TMR1CS)
T1OSCEN_MASK equ (1 << T1OSCEN)
T1SYNC_ASYNC_MASK equ (1 << NOT_T1SYNC)
T1CKPS_DIVIDE_BY_8_MASK equ (1 << T1CKPS1) | (1 << T1CKPS0)
TMR1ON_MASK equ (1 << TMR1ON)
extern INITIALISE_AFTER_CLOCK
Clock code
global initialiseAfterReset
global initialiseClock
initialiseAfterReset:
initialiseClock:
.safelySetBankFor clockFlags
clrf clockFlags
.setBankFor T1CON
movlw TMR1CS_MASK | T1OSCEN_MASK | T1CKPS_DIVIDE_BY_8_MASK | T1SYNC_ASYNC_MASK | TMR1ON_MASK
movwf T1CON
.setBankFor PIE1
bsf PIE1, TMR1IE
fcall isLastResetDueToBrownOut
xorlw 0
btfss STATUS, Z
goto returnFromClockInitialisation
clearDateAndTimeAfterNonBrownOutReset:
.setBankFor clockYearBcd
movlw 1
clrf clockYearBcd
movwf clockMonthBcd
movwf clockDayBcd
clrf clockHourBcd
clrf clockMinuteBcd
clrf clockSecondBcd
clrf dayOfYearHigh
clrf dayOfYearLow
returnFromClockInitialisation:
tcall INITIALISE_AFTER_CLOCK
end
|
%ifndef _BUILTIN_CLEAR_ASM
%define _BUILTIN_CLEAR_ASM
builtin_clear:
call cls ; clear console
push 0000h ; home position 0x0
call setcursor ; home cursor on console
add sp, 2 ; cleanup stack
ret
%endif
|
; A001120: a(0) = a(1) = 1; for n > 1, a(n) = n*a(n-1) + (-1)^n.
; Submitted by Jamie Morken(m4)
; 1,1,3,8,33,164,985,6894,55153,496376,4963761,54601370,655216441,8517813732,119249392249,1788740883734,28619854139745,486537520375664,8757675366761953,166395831968477106,3327916639369542121,69886249426760384540,1537497487388728459881,35362442209940754577262,848698613038578109854289,21217465325964452746357224,551654098475075771405287825,14894660658827045827942771274,417050498447157283182397595673,12094464454967561212289530274516,362833933649026836368685908235481
mov $3,2
mov $6,$0
lpb $3
mov $0,$6
sub $0,1
mov $1,3
mov $2,1
sub $3,1
mov $5,1
lpb $0
sub $0,1
mov $4,$1
add $1,$5
add $2,1
mul $1,$2
mov $5,$4
lpe
lpe
mov $0,$5
|
;===============================================================================
; Copyright 2014-2020 Intel Corporation
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;===============================================================================
;
;
; Purpose: Cryptography Primitive.
; Big Number Operations
;
; Content:
; cpInc_BNU()
; cpDec_BNU()
;
;
%include "asmdefs.inc"
%include "ia_32e.inc"
%if (_IPP32E >= _IPP32E_M7)
segment .text align=IPP_ALIGN_FACTOR
;*************************************************************
;* Ipp64u cpInc_BNU(Ipp64u* pDst,
;* const Ipp64u* pSrc, int len,
;* Ipp64u increment)
;* returns carry
;*************************************************************
align IPP_ALIGN_FACTOR
IPPASM cpInc_BNU,PUBLIC
%assign LOCAL_FRAME 0
USES_GPR rsi,rdi
USES_XMM
COMP_ABI 4
; rdi = pDst
; rsi = pSrc
; rdx = len
; rcx = increment
movsxd rdx, edx ; length
mov r8, qword [rsi] ; r[0] = r[0]+increment
add r8, rcx
mov qword [rdi], r8
lea rsi, [rsi+rdx*sizeof(qword)]
lea rdi, [rdi+rdx*sizeof(qword)]
lea rcx, [rdx*sizeof(qword)]
sbb rax, rax ; save cf
neg rcx ; rcx = negative length (bytes)
add rcx, sizeof(qword)
jrcxz .exit
add rax, rax ; restore cf
jnc .copy
align IPP_ALIGN_FACTOR
.inc_loop:
mov r8, qword [rsi+rcx]
adc r8, 0
mov qword [rdi+rcx], r8
lea rcx, [rcx+sizeof(qword)]
jrcxz .exit_loop
jnc .exit_loop
jmp .inc_loop
.exit_loop:
sbb rax, rax ; save cf
.copy:
cmp rsi, rdi
jz .exit
jrcxz .exit
.copy_loop:
mov r8, qword [rsi+rcx]
mov qword [rdi+rcx], r8
add rcx, sizeof(qword)
jnz .copy_loop
.exit:
neg rax
REST_XMM
REST_GPR
ret
ENDFUNC cpInc_BNU
;*************************************************************
;* Ipp64u cpDec_BNU(Ipp64u* pDst,
;* const Ipp64u* pSrc, int len,
;* Ipp64u increment)
;* returns borrow
;*************************************************************
align IPP_ALIGN_FACTOR
IPPASM cpDec_BNU,PUBLIC
%assign LOCAL_FRAME 0
USES_GPR rsi,rdi
USES_XMM
COMP_ABI 4
; rdi = pDst
; rsi = pSrc
; rdx = len
; rcx = increment
movsxd rdx, edx ; length
mov r8, qword [rsi] ; r[0] = r[0]+increment
sub r8, rcx
mov qword [rdi], r8
lea rsi, [rsi+rdx*sizeof(qword)]
lea rdi, [rdi+rdx*sizeof(qword)]
lea rcx, [rdx*sizeof(qword)]
sbb rax, rax ; save cf
neg rcx ; rcx = negative length (bytes)
add rcx, sizeof(qword)
jrcxz .exit
add rax, rax ; restore cf
jnc .copy
align IPP_ALIGN_FACTOR
.inc_loop:
mov r8, qword [rsi+rcx]
sbb r8, 0
mov qword [rdi+rcx], r8
lea rcx, [rcx+sizeof(qword)]
jrcxz .exit_loop
jnc .exit_loop
jmp .inc_loop
.exit_loop:
sbb rax, rax ; save cf
.copy:
cmp rsi, rdi
jz .exit
jrcxz .exit
.copy_loop:
mov r8, qword [rsi+rcx]
mov qword [rdi+rcx], r8
add rcx, sizeof(qword)
jnz .copy_loop
.exit:
neg rax
REST_XMM
REST_GPR
ret
ENDFUNC cpDec_BNU
%endif
|
; A021195: Decimal expansion of 1/191.
; Submitted by Jon Maiga
; 0,0,5,2,3,5,6,0,2,0,9,4,2,4,0,8,3,7,6,9,6,3,3,5,0,7,8,5,3,4,0,3,1,4,1,3,6,1,2,5,6,5,4,4,5,0,2,6,1,7,8,0,1,0,4,7,1,2,0,4,1,8,8,4,8,1,6,7,5,3,9,2,6,7,0,1,5,7,0,6,8,0,6,2,8,2,7,2,2,5,1,3,0,8,9,0,0,5,2
seq $0,83812 ; 4n-1 is the digit reversal of n-1.
div $0,382
mod $0,10
|
/**
CIANA - C/C++ Change Impact ANAlyzer
Copyright (c) 2019 HANO Hiroyuki
This software is released under MIT License,
http://opensource.org/licenses/mit-license.php
*/
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/ASTMatchers/ASTMatchers.h"
#include "ast_reader_libclang.h"
#include "location.h"
#include <string>
namespace Ciana {
class ASTCursor {
private:
Location location;
std::string name;
int64_t id;
public:
ASTCursor(Location location, std::string name, int64_t id);
int64_t get_id();
Location get_location();
};
ASTCursor::ASTCursor(Location in_location, std::string in_name, int64_t in_id)
:location(in_location), name(in_name), id(in_id)
{
}
int64_t ASTCursor::get_id() {
return id;
}
Location ASTCursor::get_location() {
return location;
}
class ASTCursorList {
private:
std::vector<ASTCursor> cursor_list;
public:
unsigned search_id(std::string filename, unsigned line, unsigned column);
void push(ASTCursor input);
};
unsigned ASTCursorList::search_id(std::string filename, unsigned line, unsigned column) {
//TODO
(void)filename;
(void)line;
(void)column;
for (auto itr = cursor_list.begin(); itr != cursor_list.end(); ++itr) {
}
return 0;
}
class GetCursor : public clang::ast_matchers::MatchFinder::MatchCallback {
private:
unsigned line;
unsigned column;
std::string name;
int64_t id;
public:
virtual void run(const clang::ast_matchers::MatchFinder::MatchResult &);
void set_cursor(unsigned line, unsigned column);
std::string get_name();
int64_t get_id();
};
void GetCursor::set_cursor(unsigned in_line, unsigned in_column) {
line = in_line;
column = in_column;
}
void GetCursor::run(
const clang::ast_matchers::MatchFinder::MatchResult &Result) {
clang::ASTContext *Context = Result.Context;
const clang::DeclRefExpr *DRE =
Result.Nodes.getNodeAs<clang::DeclRefExpr>("declrefexpr");
if (DRE) {
clang::SourceManager &sm = Context->getSourceManager();
//StringRef work_filename = sm.getFilename(DRE->getLocation());
unsigned work_line = sm.getExpansionLineNumber(DRE->getLocation());
unsigned work_column = sm.getExpansionColumnNumber(DRE->getLocation());
if (line == work_line && column == work_column) {
name = DRE->getNameInfo().getName().getAsString();
const clang::ValueDecl *vd = DRE->getDecl();
id = vd->getID();
}
}
}
int64_t GetCursor::get_id() {
return id;
}
class UsedLocation : public clang::ast_matchers::MatchFinder::MatchCallback {
private:
const int64_t search_id;
std::vector<ASTCursor> output;
public:
UsedLocation(const int64_t id);
virtual void run(const clang::ast_matchers::MatchFinder::MatchResult &);
std::vector<ASTCursor> get_output();
};
UsedLocation::UsedLocation(const int64_t id)
:search_id(id) {
}
void UsedLocation::run(
const clang::ast_matchers::MatchFinder::MatchResult &Result) {
clang::ASTContext *Context = Result.Context;
const clang::DeclRefExpr *DRE =
Result.Nodes.getNodeAs<clang::DeclRefExpr>("declrefexpr");
if (DRE) {
const clang::ValueDecl *vd = DRE->getDecl();
int64_t local_id = vd->getID();
if (local_id == search_id) {
clang::SourceManager &sm = Context->getSourceManager();
llvm::StringRef work_filename = sm.getFilename(DRE->getLocation());
unsigned work_line = sm.getExpansionLineNumber(DRE->getLocation());
unsigned work_column = sm.getExpansionColumnNumber(DRE->getLocation());
std::string name = DRE->getNameInfo().getName().getAsString();
Location loc(work_filename, work_line, work_column);
ASTCursor cursor(loc, name, local_id);
output.push_back(cursor);
}
}
}
std::vector<ASTCursor> UsedLocation::get_output() {
return output;
}
bool ASTReaderLibClang::initialize() {
llvm::StringRef filepath;
filepath =
"../test/sample1/build/compile_commands.json";
std::string ErrorMessage;
compdb = std::unique_ptr<clang::tooling::JSONCompilationDatabase>(
clang::tooling::JSONCompilationDatabase::loadFromFile(
filepath, ErrorMessage,
clang::tooling::JSONCommandLineSyntax::AutoDetect));
if (!compdb) {
llvm::errs() << ErrorMessage;
return false;
}
return true;
}
bool ASTReaderLibClang::get_used_locations(Location input, std::vector<Location> &output) {
clang::ast_matchers::StatementMatcher declRefExprMatcher =
clang::ast_matchers::declRefExpr().bind("declrefexpr");
// TODO err -> llvm::ArrayRef<std::string> source_paths(input.get_filename());
std::string tmp = input.get_filename();
llvm::ArrayRef<std::string> source_paths(tmp);
clang::tooling::ClangTool Tool(*compdb, source_paths);
GetCursor cursor_getter;
clang::ast_matchers::MatchFinder finder;
finder.addMatcher(declRefExprMatcher, &cursor_getter);
cursor_getter.set_cursor(input.get_line(), input.get_column());
if (Tool.run(clang::tooling::newFrontendActionFactory(&finder).get()) == 0) {
clang::tooling::ClangTool Tool2(*compdb, source_paths);
int64_t id = cursor_getter.get_id();
UsedLocation used_location(id);
clang::ast_matchers::MatchFinder finder2;
finder2.addMatcher(declRefExprMatcher, &used_location);
if (Tool2.run(clang::tooling::newFrontendActionFactory(&finder2).get()) == 0) {
std::vector<ASTCursor> resutls;
resutls = used_location.get_output();
for (auto itr = resutls.begin(); itr != resutls.end(); ++itr) {
Location loc = (*itr).get_location();
output.push_back(loc);
}
return true;
}
}
return false;
}
bool ASTReaderLibClang::get_impact_variables(Location input, std::vector<Location> &results) {
//TODO
(void)input;
(void)results;
//clang::ast_matchers::StatementMatcher SampleMatcher =
//binaryOperator(hasOperatorName("=")).bind("binOpe");
std::string tmp = input.get_filename();
llvm::ArrayRef<std::string> source_paths(tmp);
clang::tooling::ClangTool Tool(*compdb, source_paths);
//if (Tool.run(clang::tooling::newFrontendActionFactory(&finder).get()) == 0) {
//}
// llvm::outs() << DRE->getOpcodeStr() << "\n";
// llvm::outs() << "--- getLHS\n";
// DRE->getLHS()->dump(Context->getSourceManager());
// llvm::outs() << "--- getRHS\n";
// DRE->getRHS()->dump(Context->getSourceManager());
return true;
}
bool ASTReaderLibClang::get_impact_funtions(Location input, std::vector<Location> &results) {
//TODO
(void)input;
(void)results;
return true;
}
} // namespace Ciana
|
IDD_USERDATA equ 2500
IDC_EDTUSERDATA equ 1003
.data
szUserdataName db 'IDR_',0
defuserdata USERDATAMEM <,1,0,,0>
db 0
.code
ExportUserDataNames proc uses esi edi,hMem:DWORD
invoke xGlobalAlloc,GMEM_FIXED or GMEM_ZEROINIT,1024*16
mov edi,eax
invoke GlobalLock,edi
push edi
mov esi,hMem
;#define
.if [esi].USERDATAMEM.sztype && [esi].USERDATAMEM.ntype
invoke ExportName,addr [esi].USERDATAMEM.sztype,[esi].USERDATAMEM.ntype,edi
lea edi,[edi+eax]
.endif
.if [esi].USERDATAMEM.szname && [esi].USERDATAMEM.value
invoke ExportName,addr [esi].USERDATAMEM.szname,[esi].USERDATAMEM.value,edi
lea edi,[edi+eax]
.endif
pop eax
ret
ExportUserDataNames endp
ExportUserData proc uses esi edi,hMem:DWORD
invoke xGlobalAlloc,GMEM_FIXED or GMEM_ZEROINIT,16*1024
mov edi,eax
invoke GlobalLock,edi
push edi
mov esi,hMem
;Name or ID
.if [esi].USERDATAMEM.szname
invoke strcpy,edi,addr [esi].USERDATAMEM.szname
.else
invoke ResEdBinToDec,[esi].USERDATAMEM.value,edi
.endif
invoke strlen,edi
add edi,eax
mov al,' '
stosb
.if [esi].USERDATAMEM.sztype
invoke strcpy,edi,addr [esi].USERDATAMEM.sztype
.else
invoke ResEdBinToDec,[esi].USERDATAMEM.value,edi
.endif
invoke strlen,edi
add edi,eax
mov al,0Dh
stosb
mov al,0Ah
stosb
invoke SaveStr,edi,addr szBEGIN
add edi,eax
mov al,0Dh
stosb
mov al,0Ah
stosb
lea edx,[esi+sizeof USERDATAMEM]
.while byte ptr [edx]
mov al,[edx]
mov [edi],al
inc edi
inc edx
.endw
.if byte ptr [edi-1]!=0Ah
mov al,0Dh
stosb
mov al,0Ah
stosb
.endif
invoke SaveStr,edi,addr szEND
add edi,eax
mov al,0Dh
stosb
mov al,0Ah
stosb
mov al,0Dh
stosb
mov al,0Ah
stosb
mov byte ptr [edi],0
pop eax
ret
ExportUserData endp
SaveUserDataEdit proc uses ebx esi edi, hWin:HWND
LOCAL buffer[256]:BYTE
LOCAL hMem:DWORD
invoke xGlobalAlloc,GMEM_FIXED or GMEM_ZEROINIT,64*1024
mov hMem,eax
invoke GetWindowLong,hWin,GWL_USERDATA
mov ebx,eax
.if !ebx
invoke SendMessage,hRes,PRO_ADDITEM,TPE_USERDATA,FALSE
mov ebx,eax
invoke RtlMoveMemory,[ebx].PROJECT.hmem,offset defuserdata,sizeof USERDATAMEM+1
.endif
push ebx
invoke GetDlgItemText,hWin,IDC_EDTUSERDATA,hMem,60*1024
mov ecx,hMem
mov edx,[ebx].PROJECT.hmem
lea edx,[edx+sizeof USERDATAMEM]
.while byte ptr [ecx]
mov al,[ecx]
mov [edx],al
.if al==VK_RETURN
inc edx
mov byte ptr [edx],0Ah
.endif
inc edx
inc ecx
.endw
mov byte ptr [edx],0
invoke GlobalFree,hMem
invoke GetProjectItemName,ebx,addr buffer
invoke SetProjectItemName,ebx,addr buffer
pop eax
ret
SaveUserDataEdit endp
UserDataEditProc proc uses esi edi,hWin:HWND,uMsg:UINT,wParam:WPARAM,lParam:LPARAM
LOCAL rect:RECT
LOCAL fChanged:DWORD
LOCAL racol:RACOLOR
mov eax,uMsg
.if eax==WM_INITDIALOG
mov fChanged,FALSE
mov esi,lParam
.if esi>=TPE_USERDATA && esi<=TPE_USERDATA+32
lea eax,[esi-32+11]
mov edx,sizeof RARSTYPE
mul edx
lea esi,[eax+offset rarstype]
mov edi,offset defuserdata
invoke strcpy,addr [edi].USERDATAMEM.sztype,addr [esi].RARSTYPE.sztype
mov eax,[esi].RARSTYPE.nid
mov [edi].USERDATAMEM.ntype,eax
invoke GetFreeProjectitemID,TPE_USERDATA
mov [edi].USERDATAMEM.value,eax
invoke strcpy,addr [edi].USERDATAMEM.szname,addr szUserdataName
invoke strcat,addr [edi].USERDATAMEM.szname,addr [esi].RARSTYPE.sztype
invoke GetUnikeName,addr [edi].USERDATAMEM.szname
invoke SaveUserDataEdit,hWin
mov esi,eax
mov fChanged,TRUE
.endif
invoke SetWindowLong,hWin,GWL_USERDATA,esi
mov edi,[esi].PROJECT.hmem
.if ![edi].USERDATAMEM.hred
push edi
invoke CreateWindowEx,200h,addr szRAEditClass,0,WS_CHILD or WS_VISIBLE or STYLE_NOSIZEGRIP or STYLE_NOCOLLAPSE,0,0,0,0,hWin,IDC_EDTUSERDATA,hInstance,0
mov hDlgRed,eax
mov edi,eax
invoke SendMessage,edi,WM_SETFONT,hredfont,0
invoke SendMessage,edi,REM_GETCOLOR,0,addr racol
mov eax,color.back
mov racol.bckcol,eax
mov racol.cmntback,eax
mov racol.strback,eax
mov racol.oprback,eax
mov racol.numback,eax
mov eax,color.text
mov racol.txtcol,eax
mov racol.strcol,0
invoke SendMessage,edi,REM_SETCOLOR,0,addr racol
invoke SendMessage,edi,REM_SETWORDGROUP,0,2
pop edi
mov eax,hDlgRed
mov [edi].USERDATAMEM.hred,eax
invoke SetDlgItemText,hWin,IDC_EDTUSERDATA,addr [edi+sizeof USERDATAMEM]
invoke SendDlgItemMessage,hWin,IDC_EDTUSERDATA,EM_SETMODIFY,FALSE,0
.else
mov eax,[edi].USERDATAMEM.hred
mov hDlgRed,eax
invoke SetParent,eax,hWin
invoke ShowWindow,hDlgRed,SW_SHOW
.endif
lea eax,[edi].USERDATAMEM.sztype
mov lpResType,eax
lea eax,[edi].USERDATAMEM.szname
mov lpResName,eax
lea eax,[edi].USERDATAMEM.value
mov lpResID,eax
invoke PropertyList,-9
mov fNoScroll,TRUE
invoke ShowScrollBar,hDEd,SB_BOTH,FALSE
invoke SendMessage,hWin,WM_SIZE,0,0
mov eax,fChanged
mov fDialogChanged,eax
.elseif eax==WM_COMMAND
mov edx,wParam
movzx eax,dx
shr edx,16
.if edx==BN_CLICKED
.if eax==IDOK
invoke SendDlgItemMessage,hWin,IDC_EDTUSERDATA,EM_GETMODIFY,0,0
.if eax
mov fDialogChanged,TRUE
invoke SendDlgItemMessage,hWin,IDC_EDTUSERDATA,EM_SETMODIFY,FALSE,0
.endif
invoke SaveUserDataEdit,hWin
.if fDialogChanged
invoke SendMessage,hRes,PRO_SETMODIFY,TRUE,0
mov fDialogChanged,FALSE
.endif
.elseif eax==IDCANCEL
invoke SendMessage,hWin,WM_CLOSE,NULL,NULL
invoke PropertyList,0
.endif
.endif
.elseif eax==WM_CLOSE
invoke ShowWindow,hDlgRed,SW_HIDE
invoke SetParent,hDlgRed,hRes
mov hDlgRed,0
mov fNoScroll,FALSE
invoke ShowScrollBar,hDEd,SB_BOTH,TRUE
invoke DestroyWindow,hWin
.elseif eax==WM_SIZE
invoke SendMessage,hDEd,WM_VSCROLL,SB_THUMBTRACK,0
invoke SendMessage,hDEd,WM_HSCROLL,SB_THUMBTRACK,0
invoke GetClientRect,hDEd,addr rect
mov rect.left,3
mov rect.top,3
sub rect.right,6
sub rect.bottom,6
invoke MoveWindow,hWin,rect.left,rect.top,rect.right,rect.bottom,TRUE
invoke GetClientRect,hWin,addr rect
invoke GetDlgItem,hWin,IDC_EDTUSERDATA
mov rect.left,3
mov rect.top,3
sub rect.right,6
sub rect.bottom,6
invoke MoveWindow,eax,rect.left,rect.top,rect.right,rect.bottom,TRUE
.elseif eax==WM_NOTIFY
mov eax,lParam
mov eax,[eax].NMHDR.hwndFrom
.if eax==hDlgRed
invoke NotifyParent
.endif
.else
mov eax,FALSE
ret
.endif
mov eax,TRUE
ret
UserDataEditProc endp
|
/*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#include <precompiled.h>
#include <Editor/Undo/ScriptCanvasUndoManager.h>
#include <AzCore/Serialization/ObjectStream.h>
namespace ScriptCanvasEditor
{
static const int c_undoLimit = 100;
// ScopedUndoBatch
ScopedUndoBatch::ScopedUndoBatch(AZStd::string_view label)
{
UndoRequestBus::Broadcast(&UndoRequests::BeginUndoBatch, label);
}
ScopedUndoBatch::~ScopedUndoBatch()
{
UndoRequestBus::Broadcast(&UndoRequests::EndUndoBatch);
}
// SceneUndoState
SceneUndoState::SceneUndoState(AzToolsFramework::UndoSystem::IUndoNotify* undoNotify)
: m_undoStack(AZStd::make_unique<AzToolsFramework::UndoSystem::UndoStack>(c_undoLimit, undoNotify))
, m_undoCache(AZStd::make_unique<UndoCache>())
{
}
void SceneUndoState::BeginUndoBatch(AZStd::string_view label)
{
if (!m_currentUndoBatch)
{
m_currentUndoBatch = aznew AzToolsFramework::UndoSystem::BatchCommand(label, 0);
}
else
{
auto parentUndoBatch = m_currentUndoBatch;
m_currentUndoBatch = aznew AzToolsFramework::UndoSystem::BatchCommand(label, 0);
m_currentUndoBatch->SetParent(parentUndoBatch);
}
}
void SceneUndoState::EndUndoBatch()
{
if (!m_currentUndoBatch)
{
return;
}
if (m_currentUndoBatch->GetParent())
{
// pop one up
m_currentUndoBatch = m_currentUndoBatch->GetParent();
}
else
{
// we're at root
if (m_currentUndoBatch->HasRealChildren() && m_undoStack)
{
m_undoStack->Post(m_currentUndoBatch);
}
else
{
delete m_currentUndoBatch;
}
m_currentUndoBatch = nullptr;
}
}
SceneUndoState::~SceneUndoState()
{
delete m_currentUndoBatch;
}
// UndoCache
void UndoCache::Clear()
{
m_dataMap.clear();
}
void UndoCache::PurgeCache(ScriptCanvas::ScriptCanvasId scriptCanvasId)
{
m_dataMap.erase(scriptCanvasId);
}
void UndoCache::PopulateCache(ScriptCanvas::ScriptCanvasId scriptCanvasId)
{
UpdateCache(scriptCanvasId);
}
void UndoCache::UpdateCache(ScriptCanvas::ScriptCanvasId scriptCanvasId)
{
// Lookup the graph item and perform a snapshot of all it's serialization elements
UndoData undoData;
UndoRequestBus::EventResult(undoData, scriptCanvasId, &UndoRequests::CreateUndoData);
AZ::SerializeContext* serializeContext{};
AZ::ComponentApplicationBus::BroadcastResult(serializeContext, &AZ::ComponentApplicationRequests::GetSerializeContext);
AZStd::vector<AZ::u8>& newData = m_dataMap[scriptCanvasId];
newData.clear();
AZ::IO::ByteContainerStream<AZStd::vector<AZ::u8>> byteStream(&newData);
AZ::ObjectStream* objStream = AZ::ObjectStream::Create(&byteStream, *serializeContext, AZ::DataStream::ST_BINARY);
if (!objStream->WriteClass(&undoData))
{
AZ_Assert(false, "Unable to serialize Script Canvas scene and graph data for undo/redo");
return;
}
objStream->Finalize();
}
const AZStd::vector<AZ::u8>& UndoCache::Retrieve(ScriptCanvas::ScriptCanvasId scriptCanvasId)
{
auto it = m_dataMap.find(scriptCanvasId);
if (it == m_dataMap.end())
{
return m_emptyData;
}
return it->second;
}
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright 2020 - 2022 Pionix GmbH and Contributors to EVerest
#include "emptyImpl.hpp"
namespace module {
namespace main {
void emptyImpl::init() {
}
void emptyImpl::ready() {
}
} // namespace main
} // namespace module
|
.global s_prepare_buffers
s_prepare_buffers:
push %r12
push %r13
push %r14
push %r8
push %r9
push %rcx
push %rdx
lea addresses_A_ht+0xb34a, %rcx
nop
cmp $49802, %r12
mov $0x6162636465666768, %rdx
movq %rdx, %xmm6
movups %xmm6, (%rcx)
nop
nop
sub %r14, %r14
lea addresses_WT_ht+0x8b4a, %r9
nop
nop
add $10661, %r13
mov (%r9), %r8
nop
add %r12, %r12
lea addresses_WC_ht+0x2b4a, %r13
nop
nop
add $7506, %r9
mov $0x6162636465666768, %r8
movq %r8, (%r13)
and %r13, %r13
lea addresses_normal_ht+0x142b2, %r9
cmp $63999, %r12
mov (%r9), %ecx
nop
nop
nop
xor %r13, %r13
pop %rdx
pop %rcx
pop %r9
pop %r8
pop %r14
pop %r13
pop %r12
ret
.global s_faulty_load
s_faulty_load:
push %r14
push %r15
push %rbp
push %rdi
push %rdx
push %rsi
// Faulty Load
lea addresses_RW+0x19b4a, %r14
nop
nop
inc %rdi
vmovups (%r14), %ymm7
vextracti128 $1, %ymm7, %xmm7
vpextrq $1, %xmm7, %r15
lea oracles, %rsi
and $0xff, %r15
shlq $12, %r15
mov (%rsi,%r15,1), %r15
pop %rsi
pop %rdx
pop %rdi
pop %rbp
pop %r15
pop %r14
ret
/*
<gen_faulty_load>
[REF]
{'src': {'NT': False, 'same': False, 'congruent': 0, 'type': 'addresses_RW', 'AVXalign': True, 'size': 4}, 'OP': 'LOAD'}
[Faulty Load]
{'src': {'NT': False, 'same': True, 'congruent': 0, 'type': 'addresses_RW', 'AVXalign': False, 'size': 32}, 'OP': 'LOAD'}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 10, 'type': 'addresses_A_ht', 'AVXalign': False, 'size': 16}}
{'src': {'NT': False, 'same': False, 'congruent': 9, 'type': 'addresses_WT_ht', 'AVXalign': False, 'size': 8}, 'OP': 'LOAD'}
{'OP': 'STOR', 'dst': {'NT': False, 'same': False, 'congruent': 11, 'type': 'addresses_WC_ht', 'AVXalign': False, 'size': 8}}
{'src': {'NT': False, 'same': True, 'congruent': 3, 'type': 'addresses_normal_ht', 'AVXalign': False, 'size': 4}, 'OP': 'LOAD'}
{'32': 21829}
32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32 32
*/
|
#include "Mapper0.h"
namespace nes {
void Mapper0::reset()
{
set_prg_map<16>(0, 0);
set_prg_map<16>(1, 1);
set_chr_map<8>(0, 0);
}
} // namespace nes
|
; ===============================================================
; May 2016
; ===============================================================
;
; long long atoll(const char *buf)
;
; Read the initial portion of the string as decimal long long and
; return value read. Any initial whitespace is skipped.
;
; ===============================================================
SECTION code_clib
SECTION code_stdlib
PUBLIC asm_atoll
EXTERN asm_strtoll
asm_atoll:
; enter : hl = char *nptr
;
; exit : no error:
;
; carry reset
; dehl'dehl = long long result
;
; invalid input string:
;
; carry set
; dehl'dehl = 0
; errno set to EINVAL
;
; overflow:
;
; carry set
; dehl'dehl = $7fffffff ffffffff (LLONG_MAX) or $80000000 00000000 (LLONG_MIN)
; errno set to ERANGE
;
; uses : af, bc, de, hl, af', bc', de', hl', ix
ld bc,10 ; base = 10
ld de,0 ; endp = 0
jp asm_strtoll
|
;
; The MIT License (MIT)
;
; Copyright © 2016 Franklin "Snaipe" Mathieu <http://snai.pe/>
;
; Permission is hereby granted, free of charge, to any person obtaining a copy
; of this software and associated documentation files (the "Software"), to deal
; in the Software without restriction, including without limitation the rights
; to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
; copies of the Software, and to permit persons to whom the Software is
; furnished to do so, subject to the following conditions:
;
; The above copyright notice and this permission notice shall be included in
; all copies or substantial portions of the Software.
;
; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
; IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
; FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
; AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
; LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
; OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
; THE SOFTWARE.
;
.CODE
bxfi_trampoline label far
jmp qword ptr addr_data
ALIGN 8
bxfi_trampoline_addr label far
addr_data byte 8 dup (0)
bxfi_trampoline_end label far
public bxfi_trampoline
public bxfi_trampoline_addr
public bxfi_trampoline_end
end
|
;cmtCore assembly support
;i386 version for masm
.model flat
.code
;cx dx
;void cmtSpinLockEnter(cmtUint8* value, cmtUint64 MaxSpin)
@cmtSpinLockEnter@8 proc
;ah=1
xor ah, ah
inc ah
;检查MaxSpin是否为-1
cmp edx, 0ffffffffh
je cSLE_NoCount
cSLE_SpinStart:
;减法模式,控制循环次数
test edx, edx
;自旋数到达上限
jz cSLE_EndMaxSpin
dec edx
xor al, al
;cmpxchg:
;if(op1==ax)
;{
; op1=op2;
; rflags.zf=1;
;}
;else
;{
; ax=op1;
; rflags.zf=0;
;}
cmpxchg byte ptr [ecx], ah
;MaxSpin=0:允许进入
jz cSLE_EndNormal
;MaxSpin!=0:继续自旋
jmp cSLE_SpinStart
cSLE_NoCount:
xor al, al
cmpxchg byte ptr [ecx], ah
jnz cSLE_EndNormal
cSLE_EndMaxSpin:
xor eax, eax
inc eax
ret
cSLE_EndNormal:
xor eax, eax
ret
@cmtSpinLockEnter@8 endp
end |
; A098923: 33-gonal numbers: n(31n-29)/2.
; 0,1,33,96,190,315,471,658,876,1125,1405,1716,2058,2431,2835,3270,3736,4233,4761,5320,5910,6531,7183,7866,8580,9325,10101,10908,11746,12615,13515,14446,15408,16401,17425,18480,19566,20683,21831,23010,24220,25461,26733,28036,29370,30735,32131,33558,35016,36505,38025,39576,41158,42771,44415,46090,47796,49533,51301,53100,54930,56791,58683,60606,62560,64545,66561,68608,70686,72795,74935,77106,79308,81541,83805,86100,88426,90783,93171,95590,98040,100521,103033,105576,108150,110755,113391,116058,118756,121485,124245,127036,129858,132711,135595,138510,141456,144433,147441,150480,153550,156651,159783,162946,166140,169365,172621,175908,179226,182575,185955,189366,192808,196281,199785,203320,206886,210483,214111,217770,221460,225181,228933,232716,236530,240375,244251,248158,252096,256065,260065,264096,268158,272251,276375,280530,284716,288933,293181,297460,301770,306111,310483,314886,319320,323785,328281,332808,337366,341955,346575,351226,355908,360621,365365,370140,374946,379783,384651,389550,394480,399441,404433,409456,414510,419595,424711,429858,435036,440245,445485,450756,456058,461391,466755,472150,477576,483033,488521,494040,499590,505171,510783,516426,522100,527805,533541,539308,545106,550935,556795,562686,568608,574561,580545,586560,592606,598683,604791,610930,617100,623301,629533,635796,642090,648415,654771,661158,667576,674025,680505,687016,693558,700131,706735,713370,720036,726733,733461,740220,747010,753831,760683,767566,774480,781425,788401,795408,802446,809515,816615,823746,830908,838101,845325,852580,859866,867183,874531,881910,889320,896761,904233,911736,919270,926835,934431,942058,949716,957405
mov $2,$0
bin $2,2
mul $2,31
add $0,$2
mov $1,$0
|
/*BEGIN_LEGAL
Intel Open Source License
Copyright (c) 2002-2016 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer. Redistributions
in binary form must reproduce the above copyright notice, this list of
conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution. Neither the name of
the Intel Corporation nor the names of its contributors may be used to
endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL OR
ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
END_LEGAL */
//Child process application
#include <windows.h>
#include <stdio.h>
#include <iostream>
using namespace std;
int main(int argc, char * argv[])
{
CHAR * expectedArgv[4] = {"w_grand_child_process", "param1 param2", "param3", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"};
string currentArgv;
//Take into account that a path might be added to the executable name
currentArgv = argv[0];
string::size_type index = currentArgv.find(expectedArgv[0]);
if(index == string::npos)
{
//Got unexpected parameter
cout << "Got unexpected parameter: " << argv[0] << endl;
return (-1);
}
//All the rest should have exact match
for(int i = 1; i < argc; i++)
{
currentArgv = argv[i];
if(currentArgv.compare(expectedArgv[i]) != 0)
{
//Got unexpected parameter
cout << "Got unexpected parameter: " << argv[i] << endl;
return (-1);
}
}
return 0;
}
|
; A004984: a(n) = (2^n/n!)*Product_{k=0..n-1} (4*k - 1).
; Submitted by Jamie Morken(s4)
; 1,-2,-6,-28,-154,-924,-5852,-38456,-259578,-1788204,-12517428,-88759944,-636112932,-4599585816,-33511268088,-245749299312,-1812401082426,-13433090375628,-100001895018564,-747382583822952,-5605369378672140,-42173731515723720,-318219974164097160,-2407403282806648080,-18256141561283747940,-138746675865756484344,-1056609300823837842312,-8061537628507799833936,-61613180446452470159368,-471659519279739599151024,-3616056314478003593491184,-27761980736960156620996832,-213420226915381204023913146
mov $1,1
mov $3,$0
mov $0,10
lpb $3
sub $0,8
mul $1,$0
sub $2,1
div $1,$2
sub $3,1
lpe
mov $0,$1
|
/*=========================================================================
Program: Visualization Toolkit
Module: vtkControlPointsItem.cxx
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
#include "vtkBrush.h"
#include "vtkCallbackCommand.h"
#include "vtkContext2D.h"
#include "vtkContextScene.h"
#include "vtkControlPointsItem.h"
#include "vtkObjectFactory.h"
#include "vtkPen.h"
#include "vtkPoints2D.h"
#include "vtkSmartPointer.h"
#include "vtkTransform2D.h"
#include <cassert>
//-----------------------------------------------------------------------------
vtkControlPointsItem::vtkControlPointsItem()
{
this->Pen->SetLineType(vtkPen::SOLID_LINE);
this->Pen->SetWidth(1.);
this->Pen->SetColorF(1., 1., 1.);
this->Brush->SetColorF(0.85, 0.85, 1., 0.75);
this->Points = vtkPoints2D::New();
this->HighlightPoints = vtkPoints2D::New();
this->Callback = vtkCallbackCommand::New();
this->Callback->SetClientData(this);
this->Callback->SetCallback(
vtkControlPointsItem::CallComputePoints);
}
//-----------------------------------------------------------------------------
vtkControlPointsItem::~vtkControlPointsItem()
{
if (this->Points)
{
this->Points->Delete();
this->Points = 0;
}
if (this->HighlightPoints)
{
this->HighlightPoints->Delete();
this->HighlightPoints = 0;
}
if (this->Callback)
{
this->Callback->Delete();
this->Callback = 0;
}
}
//-----------------------------------------------------------------------------
void vtkControlPointsItem::PrintSelf(ostream &os, vtkIndent indent)
{
this->Superclass::PrintSelf(os, indent);
}
//-----------------------------------------------------------------------------
void vtkControlPointsItem::GetBounds(double bounds[4])
{
this->Points->GetBounds(bounds);
}
//-----------------------------------------------------------------------------
bool vtkControlPointsItem::Paint(vtkContext2D* painter)
{
if (this->Points->GetNumberOfPoints())
{
painter->ApplyPen(this->Pen);
painter->ApplyBrush(this->Brush);
this->DrawPoints(painter, this->Points);
}
if (this->HighlightPoints->GetNumberOfPoints())
{
painter->GetPen()->SetLineType(vtkPen::SOLID_LINE);
painter->GetPen()->SetColorF(0.87, 0.87, 1.);
painter->GetBrush()->SetColorF(0.65, 0.65, 0.95, 0.55);
this->DrawPoints(painter, this->HighlightPoints);
}
return true;
}
//-----------------------------------------------------------------------------
void vtkControlPointsItem::CallComputePoints(
vtkObject* vtkNotUsed(sender), unsigned long vtkNotUsed(event),
void* receiver, void* vtkNotUsed(params))
{
vtkControlPointsItem* item =
reinterpret_cast<vtkControlPointsItem*>(receiver);
item->ComputePoints();
}
//-----------------------------------------------------------------------------
void vtkControlPointsItem::ComputePoints()
{
this->Modified();
}
//-----------------------------------------------------------------------------
void vtkControlPointsItem::DrawPoints(vtkContext2D* painter, vtkPoints2D* points)
{
vtkTransform2D* sceneTransform = painter->GetTransform();
vtkSmartPointer<vtkTransform2D> translation =
vtkSmartPointer<vtkTransform2D>::New();
double point[2];
double transformedPoint[2];
const int count = points->GetNumberOfPoints();
for (int i = 0; i < count; ++i)
{
points->GetPoint(i, point);
sceneTransform->TransformPoints(point, transformedPoint, 1);
painter->PushMatrix();
translation->Identity();
translation->Translate(transformedPoint[0], transformedPoint[1]);
painter->SetTransform(translation);
painter->DrawWedge(0.f, 0.f, 6.f, 0.f, 0.f, 360.f);
painter->DrawArc(0.f, 0.f, 6.f, 0.f, 360.f);
painter->PopMatrix();
}
}
|
SECTION code_clib
PUBLIC cleararea
EXTERN pixeladdress
EXTERN leftbitmask, rightbitmask
;
; $Id: clrarea2.asm,v 1.6 2017/01/02 21:51:24 aralbrec Exp $
;
; ***********************************************************************
;
; Clear specified graphics area in map.
;
; Design & programming by Gunther Strube, Copyright (C) InterLogic 1995
;
; Generic SMC version by Stefano Bodrato.
;
;
;
; IN: HL = (x,y)
; BC = (width,heigth)
;
; Registers changed after return:
; ......../IXIY same
; AFBCDEHL/.... different
;
.cleararea
ld (coord+1),hl ; SMC: remember y,x
inc b
push bc ; remember height
push bc
push hl
call pixeladdress ; bitpos0 = pixeladdress(x,y)
pop hl
call leftbitmask ; LeftBitMask(bitpos0)
ld (bitmaskl1+1),a ; SMC
ld (bitmaskl2+1),a ; SMC
pop bc
ld a,h
add b
dec a
dec a
ld h,a
push de
call pixeladdress ; bitpos1 = pixeladdress(x+width-1,y)
call rightbitmask ; RightBitMask(bitpos1)
ld (bitmaskr1+1),a
ld (bitmaskr2+1),a ; bitmask1 = LeftBitMask(bitpos0)
pop hl
push hl ; adr0
ex de,hl
cp a
sbc hl,de ; (adr1-adr0)/8
ld a,l
ld (rowbytes1+1),a
ld (rowbytes2+1),a ; rowbytes = (adr1-adr0) div 8, no. of bytes in row
; 0 means that area is within same address
; FOR h = 1 TO height
pop hl ; adr0
.clear_height
xor a
.rowbytes1 cp 0 ; if rowbytes = 0
jr nz, clear_row ; area is within one byte...
ld a,(hl)
.bitmaskl1 and 0 ; preserve bits of leftmost side of byte
ld b,a
ld a,(hl)
.bitmaskr1 and 0 ; preserve bits of rightmost side of byte
or b ; merge preserved bits of left side
ld (hl),a ; (offset) = byte
jr clear_nextrow ; else
.clear_row ; clear area is defined as rows of bytes
ld a,(hl)
.bitmaskl2 and 0 ; preserve only leftmost bits (outside of area)
ld (hl),a ; (offset) = (offset) AND bitmask0
inc hl ; offset += 1 (8 bits)
.rowbytes2 ld b,0 ; r = rowbytes
dec b ; --r
jr z, row_cleared ; if ( r )
.clear_row_loop ; do
ld (hl),0 ; (offset) = 0
inc hl ; offset += 1 (8 bits)
djnz clear_row_loop ; while ( r-- != 0 )
.row_cleared ld a,(hl) ; byte = (adr1)
.bitmaskr2 and 0
ld (hl),a ; preserve only rightmost side of byte (outside area)
.clear_nextrow
.coord ld hl,0 ; SMC -> y,x
inc l
ld (coord+1),hl ; SMC -> y,x
call pixeladdress
ex de,hl ; put adr0 in hl for next row
; END FOR h
.height pop bc
dec c ; height
push bc
jr nz, clear_height
pop bc
.end_cleararea ret
|
.global s_prepare_buffers
s_prepare_buffers:
push %r10
push %r11
push %r13
push %r15
push %rax
push %rcx
push %rdi
push %rdx
push %rsi
lea addresses_A_ht+0xa6e9, %r13
nop
nop
nop
xor $11385, %rdi
mov $0x6162636465666768, %r10
movq %r10, %xmm4
vmovups %ymm4, (%r13)
sub %rdi, %rdi
lea addresses_normal_ht+0x9a49, %rdx
clflush (%rdx)
nop
nop
nop
xor %rax, %rax
mov $0x6162636465666768, %r11
movq %r11, %xmm7
vmovups %ymm7, (%rdx)
nop
nop
nop
dec %r13
lea addresses_A_ht+0x1130d, %rdi
cmp %rdx, %rdx
movb (%rdi), %r10b
nop
nop
nop
cmp %r11, %r11
lea addresses_WT_ht+0xa559, %rdx
nop
nop
nop
sub $45133, %r15
movups (%rdx), %xmm7
vpextrq $0, %xmm7, %r13
nop
nop
sub %r15, %r15
lea addresses_WT_ht+0xea09, %rax
nop
nop
nop
nop
nop
xor %rdx, %rdx
mov $0x6162636465666768, %r15
movq %r15, (%rax)
nop
nop
nop
nop
nop
and $57077, %r11
lea addresses_D_ht+0x3ea9, %rsi
lea addresses_D_ht+0x1d2e9, %rdi
nop
nop
inc %r11
mov $107, %rcx
rep movsl
nop
nop
nop
nop
and $32127, %rax
lea addresses_D_ht+0x13069, %rsi
nop
nop
dec %r15
mov $0x6162636465666768, %r13
movq %r13, %xmm6
and $0xffffffffffffffc0, %rsi
movaps %xmm6, (%rsi)
nop
xor $17051, %r10
pop %rsi
pop %rdx
pop %rdi
pop %rcx
pop %rax
pop %r15
pop %r13
pop %r11
pop %r10
ret
.global s_faulty_load
s_faulty_load:
push %r12
push %r8
push %rax
push %rbx
push %rcx
push %rdi
// Store
lea addresses_A+0x15bdb, %r12
nop
nop
nop
nop
sub $3385, %rcx
movw $0x5152, (%r12)
// Exception!!!
nop
nop
nop
nop
nop
mov (0), %rax
nop
nop
inc %rcx
// Store
lea addresses_normal+0xfd69, %rdi
nop
dec %r12
movb $0x51, (%rdi)
nop
and $25572, %rbx
// Faulty Load
lea addresses_WC+0x13ae9, %rcx
nop
nop
nop
nop
nop
dec %r12
vmovups (%rcx), %ymm7
vextracti128 $1, %ymm7, %xmm7
vpextrq $0, %xmm7, %r8
lea oracles, %rdi
and $0xff, %r8
shlq $12, %r8
mov (%rdi,%r8,1), %r8
pop %rdi
pop %rcx
pop %rbx
pop %rax
pop %r8
pop %r12
ret
/*
<gen_faulty_load>
[REF]
{'OP': 'LOAD', 'src': {'size': 1, 'NT': False, 'type': 'addresses_WC', 'same': False, 'AVXalign': False, 'congruent': 0}}
{'OP': 'STOR', 'dst': {'size': 2, 'NT': False, 'type': 'addresses_A', 'same': False, 'AVXalign': False, 'congruent': 1}}
{'OP': 'STOR', 'dst': {'size': 1, 'NT': False, 'type': 'addresses_normal', 'same': False, 'AVXalign': False, 'congruent': 5}}
[Faulty Load]
{'OP': 'LOAD', 'src': {'size': 32, 'NT': False, 'type': 'addresses_WC', 'same': True, 'AVXalign': False, 'congruent': 0}}
<gen_prepare_buffer>
{'OP': 'STOR', 'dst': {'size': 32, 'NT': False, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 10}}
{'OP': 'STOR', 'dst': {'size': 32, 'NT': False, 'type': 'addresses_normal_ht', 'same': False, 'AVXalign': False, 'congruent': 4}}
{'OP': 'LOAD', 'src': {'size': 1, 'NT': False, 'type': 'addresses_A_ht', 'same': False, 'AVXalign': False, 'congruent': 2}}
{'OP': 'LOAD', 'src': {'size': 16, 'NT': False, 'type': 'addresses_WT_ht', 'same': False, 'AVXalign': False, 'congruent': 4}}
{'OP': 'STOR', 'dst': {'size': 8, 'NT': False, 'type': 'addresses_WT_ht', 'same': True, 'AVXalign': False, 'congruent': 4}}
{'OP': 'REPM', 'src': {'same': False, 'type': 'addresses_D_ht', 'congruent': 1}, 'dst': {'same': False, 'type': 'addresses_D_ht', 'congruent': 9}}
{'OP': 'STOR', 'dst': {'size': 16, 'NT': False, 'type': 'addresses_D_ht', 'same': False, 'AVXalign': True, 'congruent': 6}}
{'00': 149, '46': 21680}
46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 00 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 00 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 00 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46 46
*/
|
; A003589: a(n) has the property that the sequence b(n) = number of 2's between successive 3's is the same as the original sequence.
; 2,2,3,2,2,3,2,2,3,2,2,2,3,2,2,3,2,2,3,2,2,2,3,2,2,3,2,2,3,2,2,2,3,2,2,3,2,2,3,2,2,3,2,2,2,3,2,2,3,2,2,3,2,2,2,3,2,2,3,2
mov $2,$0
mov $5,2
lpb $5
mov $0,$2
sub $5,1
add $0,$5
sub $0,1
mov $3,1
mov $4,$0
add $0,3
add $3,$4
add $3,$0
div $4,8
add $4,$3
mov $0,$4
div $0,7
mul $0,5
mov $6,$0
mov $7,$5
lpb $7
mov $1,$6
sub $7,1
lpe
lpe
lpb $2
sub $1,$6
mov $2,0
lpe
div $1,5
add $1,2
|
#include<cstdio>
#include<queue>
using namespace std;
struct Node {
int data;
Node *l;
Node *r;
};
int p[31], i[31],c[31],cnt=0;
Node *root;
queue<Node*> q;
Node* create(int pl, int pr, int il, int ir) {
if (pl > pr || il > ir) {
return NULL;
}
Node* root = new Node;
root->data = p[pr];
int k;
for (k = il;k<=ir; k++) {
if (i[k] == p[pr])
break;
}
int nl = k - il;
root->l = create(pl, pl + nl - 1, il, k - 1);
root->r = create(pl + nl, pr - 1, k + 1, ir);
return root;
}
void BFS(Node *n) {
q.push(n);
while (!q.empty()) {
Node *temp = q.front();
q.pop();
c[cnt++] = temp->data;
if (temp->l != NULL)
q.push(temp->l);
if (temp->r != NULL)
q.push(temp->r);
}
}
int main() {
int N;
scanf("%d", &N);
for (int i = 1; i <= N; i++) {
scanf("%d", &p[i]);
}
for (int j = 1; j <= N; j++) {
scanf("%d", &i[j]);
}
root=create(1,N,1,N);
BFS(root);
for (int i = 0; i < N; i++) {
printf("%d", c[i]);
if (i < N - 1)
printf(" ");
}
return 0;
}
//算法笔记
#include <cstdio>
#include <cstring>
#include <queue>
#include <algorithm>
using namespace std;
const int maxn = 50;
struct node {
int data;
node* lchild;
node* rchild;
};
int pre[maxn], in[maxn], post[maxn];
int n;
node* create(int postL, int postR, int inL, int inR) {
if(postL > postR) {
return NULL;
}
node* root = new node;
root->data = post[postR];
int k;
for(k = inL; k <= inR; k++) {
if(in[k] == post[postR]) {
break;
}
}
int numLeft = k - inL;
root->lchild = create(postL, postL + numLeft - 1, inL, k - 1);
root->rchild = create(postL + numLeft, postR - 1, k + 1, inR);
return root;
}
int num = 0;
void BFS(node* root) {
queue<node*> q;
q.push(root);
while(!q.empty()) {
node* now = q.front();
q.pop();
printf("%d", now->data);
num++;
if(num < n) printf(" ");
if(now->lchild != NULL) q.push(now->lchild);
if(now->rchild != NULL) q.push(now->rchild);
}
}
int main() {
scanf("%d", &n);
for(int i = 0; i < n; i++) {
scanf("%d", &post[i]);
}
for(int i = 0; i < n; i++) {
scanf("%d", &in[i]);
}
node* root = create(0, n - 1, 0, n - 1);
BFS(root);
return 0;
}
|
/*
Copyright (c) 2018, Ford Motor Company
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the Ford Motor Company nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "sdl_rpc_plugin/commands/mobile/list_files_response.h"
#include "application_manager/application_impl.h"
#include "application_manager/rpc_service.h"
namespace sdl_rpc_plugin {
using namespace application_manager;
namespace commands {
ListFilesResponse::ListFilesResponse(
const application_manager::commands::MessageSharedPtr& message,
ApplicationManager& application_manager,
rpc_service::RPCService& rpc_service,
HMICapabilities& hmi_capabilities,
policy::PolicyHandlerInterface& policy_handler)
: CommandResponseImpl(message,
application_manager,
rpc_service,
hmi_capabilities,
policy_handler) {}
ListFilesResponse::~ListFilesResponse() {}
void ListFilesResponse::Run() {
LOG4CXX_AUTO_TRACE(logger_);
rpc_service_.SendMessageToMobile(message_);
}
} // namespace commands
} // namespace sdl_rpc_plugin
|
_stressfs: file format elf32-i386
Disassembly of section .text:
00000000 <main>:
#include "fs.h"
#include "fcntl.h"
int
main(int argc, char *argv[])
{
0: 8d 4c 24 04 lea 0x4(%esp),%ecx
4: 83 e4 f0 and $0xfffffff0,%esp
int fd, i;
char path[] = "stressfs0";
7: b8 30 00 00 00 mov $0x30,%eax
{
c: ff 71 fc pushl -0x4(%ecx)
f: 55 push %ebp
10: 89 e5 mov %esp,%ebp
12: 57 push %edi
13: 56 push %esi
14: 53 push %ebx
15: 51 push %ecx
char data[512];
printf(1, "stressfs starting\n");
memset(data, 'a', sizeof(data));
16: 8d b5 e8 fd ff ff lea -0x218(%ebp),%esi
for(i = 0; i < 4; i++)
1c: 31 db xor %ebx,%ebx
{
1e: 81 ec 20 02 00 00 sub $0x220,%esp
char path[] = "stressfs0";
24: 66 89 85 e6 fd ff ff mov %ax,-0x21a(%ebp)
2b: c7 85 de fd ff ff 73 movl $0x65727473,-0x222(%ebp)
32: 74 72 65
printf(1, "stressfs starting\n");
35: 68 68 08 00 00 push $0x868
3a: 6a 01 push $0x1
char path[] = "stressfs0";
3c: c7 85 e2 fd ff ff 73 movl $0x73667373,-0x21e(%ebp)
43: 73 66 73
printf(1, "stressfs starting\n");
46: e8 c5 04 00 00 call 510 <printf>
memset(data, 'a', sizeof(data));
4b: 83 c4 0c add $0xc,%esp
4e: 68 00 02 00 00 push $0x200
53: 6a 61 push $0x61
55: 56 push %esi
56: e8 95 01 00 00 call 1f0 <memset>
5b: 83 c4 10 add $0x10,%esp
if(fork() > 0)
5e: e8 27 03 00 00 call 38a <fork>
63: 85 c0 test %eax,%eax
65: 0f 8f bf 00 00 00 jg 12a <main+0x12a>
for(i = 0; i < 4; i++)
6b: 83 c3 01 add $0x1,%ebx
6e: 83 fb 04 cmp $0x4,%ebx
71: 75 eb jne 5e <main+0x5e>
73: bf 04 00 00 00 mov $0x4,%edi
break;
printf(1, "write %d\n", i);
78: 83 ec 04 sub $0x4,%esp
7b: 53 push %ebx
7c: 68 7b 08 00 00 push $0x87b
path[8] += i;
fd = open(path, O_CREATE | O_RDWR);
81: bb 14 00 00 00 mov $0x14,%ebx
printf(1, "write %d\n", i);
86: 6a 01 push $0x1
88: e8 83 04 00 00 call 510 <printf>
path[8] += i;
8d: 89 f8 mov %edi,%eax
8f: 00 85 e6 fd ff ff add %al,-0x21a(%ebp)
fd = open(path, O_CREATE | O_RDWR);
95: 5f pop %edi
96: 58 pop %eax
97: 8d 85 de fd ff ff lea -0x222(%ebp),%eax
9d: 68 02 02 00 00 push $0x202
a2: 50 push %eax
a3: e8 2a 03 00 00 call 3d2 <open>
a8: 83 c4 10 add $0x10,%esp
ab: 89 c7 mov %eax,%edi
ad: 8d 76 00 lea 0x0(%esi),%esi
for(i = 0; i < 20; i++)
// printf(fd, "%d\n", i);
write(fd, data, sizeof(data));
b0: 83 ec 04 sub $0x4,%esp
b3: 68 00 02 00 00 push $0x200
b8: 56 push %esi
b9: 57 push %edi
ba: e8 f3 02 00 00 call 3b2 <write>
for(i = 0; i < 20; i++)
bf: 83 c4 10 add $0x10,%esp
c2: 83 eb 01 sub $0x1,%ebx
c5: 75 e9 jne b0 <main+0xb0>
close(fd);
c7: 83 ec 0c sub $0xc,%esp
ca: 57 push %edi
cb: e8 ea 02 00 00 call 3ba <close>
printf(1, "read\n");
d0: 58 pop %eax
d1: 5a pop %edx
d2: 68 85 08 00 00 push $0x885
d7: 6a 01 push $0x1
d9: e8 32 04 00 00 call 510 <printf>
fd = open(path, O_RDONLY);
de: 59 pop %ecx
df: 8d 85 de fd ff ff lea -0x222(%ebp),%eax
e5: 5b pop %ebx
e6: 6a 00 push $0x0
e8: 50 push %eax
e9: bb 14 00 00 00 mov $0x14,%ebx
ee: e8 df 02 00 00 call 3d2 <open>
f3: 83 c4 10 add $0x10,%esp
f6: 89 c7 mov %eax,%edi
f8: 90 nop
f9: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
for (i = 0; i < 20; i++)
read(fd, data, sizeof(data));
100: 83 ec 04 sub $0x4,%esp
103: 68 00 02 00 00 push $0x200
108: 56 push %esi
109: 57 push %edi
10a: e8 9b 02 00 00 call 3aa <read>
for (i = 0; i < 20; i++)
10f: 83 c4 10 add $0x10,%esp
112: 83 eb 01 sub $0x1,%ebx
115: 75 e9 jne 100 <main+0x100>
close(fd);
117: 83 ec 0c sub $0xc,%esp
11a: 57 push %edi
11b: e8 9a 02 00 00 call 3ba <close>
wait();
120: e8 75 02 00 00 call 39a <wait>
exit();
125: e8 68 02 00 00 call 392 <exit>
12a: 89 df mov %ebx,%edi
12c: e9 47 ff ff ff jmp 78 <main+0x78>
131: 66 90 xchg %ax,%ax
133: 66 90 xchg %ax,%ax
135: 66 90 xchg %ax,%ax
137: 66 90 xchg %ax,%ax
139: 66 90 xchg %ax,%ax
13b: 66 90 xchg %ax,%ax
13d: 66 90 xchg %ax,%ax
13f: 90 nop
00000140 <strcpy>:
#include "user.h"
#include "x86.h"
char*
strcpy(char *s, char *t)
{
140: 55 push %ebp
141: 89 e5 mov %esp,%ebp
143: 53 push %ebx
144: 8b 45 08 mov 0x8(%ebp),%eax
147: 8b 4d 0c mov 0xc(%ebp),%ecx
char *os;
os = s;
while((*s++ = *t++) != 0)
14a: 89 c2 mov %eax,%edx
14c: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
150: 83 c1 01 add $0x1,%ecx
153: 0f b6 59 ff movzbl -0x1(%ecx),%ebx
157: 83 c2 01 add $0x1,%edx
15a: 84 db test %bl,%bl
15c: 88 5a ff mov %bl,-0x1(%edx)
15f: 75 ef jne 150 <strcpy+0x10>
;
return os;
}
161: 5b pop %ebx
162: 5d pop %ebp
163: c3 ret
164: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
16a: 8d bf 00 00 00 00 lea 0x0(%edi),%edi
00000170 <strcmp>:
int
strcmp(const char *p, const char *q)
{
170: 55 push %ebp
171: 89 e5 mov %esp,%ebp
173: 53 push %ebx
174: 8b 55 08 mov 0x8(%ebp),%edx
177: 8b 4d 0c mov 0xc(%ebp),%ecx
while(*p && *p == *q)
17a: 0f b6 02 movzbl (%edx),%eax
17d: 0f b6 19 movzbl (%ecx),%ebx
180: 84 c0 test %al,%al
182: 75 1c jne 1a0 <strcmp+0x30>
184: eb 2a jmp 1b0 <strcmp+0x40>
186: 8d 76 00 lea 0x0(%esi),%esi
189: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
p++, q++;
190: 83 c2 01 add $0x1,%edx
while(*p && *p == *q)
193: 0f b6 02 movzbl (%edx),%eax
p++, q++;
196: 83 c1 01 add $0x1,%ecx
199: 0f b6 19 movzbl (%ecx),%ebx
while(*p && *p == *q)
19c: 84 c0 test %al,%al
19e: 74 10 je 1b0 <strcmp+0x40>
1a0: 38 d8 cmp %bl,%al
1a2: 74 ec je 190 <strcmp+0x20>
return (uchar)*p - (uchar)*q;
1a4: 29 d8 sub %ebx,%eax
}
1a6: 5b pop %ebx
1a7: 5d pop %ebp
1a8: c3 ret
1a9: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
1b0: 31 c0 xor %eax,%eax
return (uchar)*p - (uchar)*q;
1b2: 29 d8 sub %ebx,%eax
}
1b4: 5b pop %ebx
1b5: 5d pop %ebp
1b6: c3 ret
1b7: 89 f6 mov %esi,%esi
1b9: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
000001c0 <strlen>:
uint
strlen(char *s)
{
1c0: 55 push %ebp
1c1: 89 e5 mov %esp,%ebp
1c3: 8b 4d 08 mov 0x8(%ebp),%ecx
int n;
for(n = 0; s[n]; n++)
1c6: 80 39 00 cmpb $0x0,(%ecx)
1c9: 74 15 je 1e0 <strlen+0x20>
1cb: 31 d2 xor %edx,%edx
1cd: 8d 76 00 lea 0x0(%esi),%esi
1d0: 83 c2 01 add $0x1,%edx
1d3: 80 3c 11 00 cmpb $0x0,(%ecx,%edx,1)
1d7: 89 d0 mov %edx,%eax
1d9: 75 f5 jne 1d0 <strlen+0x10>
;
return n;
}
1db: 5d pop %ebp
1dc: c3 ret
1dd: 8d 76 00 lea 0x0(%esi),%esi
for(n = 0; s[n]; n++)
1e0: 31 c0 xor %eax,%eax
}
1e2: 5d pop %ebp
1e3: c3 ret
1e4: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
1ea: 8d bf 00 00 00 00 lea 0x0(%edi),%edi
000001f0 <memset>:
void*
memset(void *dst, int c, uint n)
{
1f0: 55 push %ebp
1f1: 89 e5 mov %esp,%ebp
1f3: 57 push %edi
1f4: 8b 55 08 mov 0x8(%ebp),%edx
}
static inline void
stosb(void *addr, int data, int cnt)
{
asm volatile("cld; rep stosb" :
1f7: 8b 4d 10 mov 0x10(%ebp),%ecx
1fa: 8b 45 0c mov 0xc(%ebp),%eax
1fd: 89 d7 mov %edx,%edi
1ff: fc cld
200: f3 aa rep stos %al,%es:(%edi)
stosb(dst, c, n);
return dst;
}
202: 89 d0 mov %edx,%eax
204: 5f pop %edi
205: 5d pop %ebp
206: c3 ret
207: 89 f6 mov %esi,%esi
209: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000210 <strchr>:
char*
strchr(const char *s, char c)
{
210: 55 push %ebp
211: 89 e5 mov %esp,%ebp
213: 53 push %ebx
214: 8b 45 08 mov 0x8(%ebp),%eax
217: 8b 5d 0c mov 0xc(%ebp),%ebx
for(; *s; s++)
21a: 0f b6 10 movzbl (%eax),%edx
21d: 84 d2 test %dl,%dl
21f: 74 1d je 23e <strchr+0x2e>
if(*s == c)
221: 38 d3 cmp %dl,%bl
223: 89 d9 mov %ebx,%ecx
225: 75 0d jne 234 <strchr+0x24>
227: eb 17 jmp 240 <strchr+0x30>
229: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
230: 38 ca cmp %cl,%dl
232: 74 0c je 240 <strchr+0x30>
for(; *s; s++)
234: 83 c0 01 add $0x1,%eax
237: 0f b6 10 movzbl (%eax),%edx
23a: 84 d2 test %dl,%dl
23c: 75 f2 jne 230 <strchr+0x20>
return (char*)s;
return 0;
23e: 31 c0 xor %eax,%eax
}
240: 5b pop %ebx
241: 5d pop %ebp
242: c3 ret
243: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
249: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000250 <gets>:
char*
gets(char *buf, int max)
{
250: 55 push %ebp
251: 89 e5 mov %esp,%ebp
253: 57 push %edi
254: 56 push %esi
255: 53 push %ebx
int i, cc;
char c;
for(i=0; i+1 < max; ){
256: 31 f6 xor %esi,%esi
258: 89 f3 mov %esi,%ebx
{
25a: 83 ec 1c sub $0x1c,%esp
25d: 8b 7d 08 mov 0x8(%ebp),%edi
for(i=0; i+1 < max; ){
260: eb 2f jmp 291 <gets+0x41>
262: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
cc = read(0, &c, 1);
268: 8d 45 e7 lea -0x19(%ebp),%eax
26b: 83 ec 04 sub $0x4,%esp
26e: 6a 01 push $0x1
270: 50 push %eax
271: 6a 00 push $0x0
273: e8 32 01 00 00 call 3aa <read>
if(cc < 1)
278: 83 c4 10 add $0x10,%esp
27b: 85 c0 test %eax,%eax
27d: 7e 1c jle 29b <gets+0x4b>
break;
buf[i++] = c;
27f: 0f b6 45 e7 movzbl -0x19(%ebp),%eax
283: 83 c7 01 add $0x1,%edi
286: 88 47 ff mov %al,-0x1(%edi)
if(c == '\n' || c == '\r')
289: 3c 0a cmp $0xa,%al
28b: 74 23 je 2b0 <gets+0x60>
28d: 3c 0d cmp $0xd,%al
28f: 74 1f je 2b0 <gets+0x60>
for(i=0; i+1 < max; ){
291: 83 c3 01 add $0x1,%ebx
294: 3b 5d 0c cmp 0xc(%ebp),%ebx
297: 89 fe mov %edi,%esi
299: 7c cd jl 268 <gets+0x18>
29b: 89 f3 mov %esi,%ebx
break;
}
buf[i] = '\0';
return buf;
}
29d: 8b 45 08 mov 0x8(%ebp),%eax
buf[i] = '\0';
2a0: c6 03 00 movb $0x0,(%ebx)
}
2a3: 8d 65 f4 lea -0xc(%ebp),%esp
2a6: 5b pop %ebx
2a7: 5e pop %esi
2a8: 5f pop %edi
2a9: 5d pop %ebp
2aa: c3 ret
2ab: 90 nop
2ac: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
2b0: 8b 75 08 mov 0x8(%ebp),%esi
2b3: 8b 45 08 mov 0x8(%ebp),%eax
2b6: 01 de add %ebx,%esi
2b8: 89 f3 mov %esi,%ebx
buf[i] = '\0';
2ba: c6 03 00 movb $0x0,(%ebx)
}
2bd: 8d 65 f4 lea -0xc(%ebp),%esp
2c0: 5b pop %ebx
2c1: 5e pop %esi
2c2: 5f pop %edi
2c3: 5d pop %ebp
2c4: c3 ret
2c5: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
2c9: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
000002d0 <stat>:
int
stat(char *n, struct stat *st)
{
2d0: 55 push %ebp
2d1: 89 e5 mov %esp,%ebp
2d3: 56 push %esi
2d4: 53 push %ebx
int fd;
int r;
fd = open(n, O_RDONLY);
2d5: 83 ec 08 sub $0x8,%esp
2d8: 6a 00 push $0x0
2da: ff 75 08 pushl 0x8(%ebp)
2dd: e8 f0 00 00 00 call 3d2 <open>
if(fd < 0)
2e2: 83 c4 10 add $0x10,%esp
2e5: 85 c0 test %eax,%eax
2e7: 78 27 js 310 <stat+0x40>
return -1;
r = fstat(fd, st);
2e9: 83 ec 08 sub $0x8,%esp
2ec: ff 75 0c pushl 0xc(%ebp)
2ef: 89 c3 mov %eax,%ebx
2f1: 50 push %eax
2f2: e8 f3 00 00 00 call 3ea <fstat>
close(fd);
2f7: 89 1c 24 mov %ebx,(%esp)
r = fstat(fd, st);
2fa: 89 c6 mov %eax,%esi
close(fd);
2fc: e8 b9 00 00 00 call 3ba <close>
return r;
301: 83 c4 10 add $0x10,%esp
}
304: 8d 65 f8 lea -0x8(%ebp),%esp
307: 89 f0 mov %esi,%eax
309: 5b pop %ebx
30a: 5e pop %esi
30b: 5d pop %ebp
30c: c3 ret
30d: 8d 76 00 lea 0x0(%esi),%esi
return -1;
310: be ff ff ff ff mov $0xffffffff,%esi
315: eb ed jmp 304 <stat+0x34>
317: 89 f6 mov %esi,%esi
319: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
00000320 <atoi>:
int
atoi(const char *s)
{
320: 55 push %ebp
321: 89 e5 mov %esp,%ebp
323: 53 push %ebx
324: 8b 4d 08 mov 0x8(%ebp),%ecx
int n;
n = 0;
while('0' <= *s && *s <= '9')
327: 0f be 11 movsbl (%ecx),%edx
32a: 8d 42 d0 lea -0x30(%edx),%eax
32d: 3c 09 cmp $0x9,%al
n = 0;
32f: b8 00 00 00 00 mov $0x0,%eax
while('0' <= *s && *s <= '9')
334: 77 1f ja 355 <atoi+0x35>
336: 8d 76 00 lea 0x0(%esi),%esi
339: 8d bc 27 00 00 00 00 lea 0x0(%edi,%eiz,1),%edi
n = n*10 + *s++ - '0';
340: 8d 04 80 lea (%eax,%eax,4),%eax
343: 83 c1 01 add $0x1,%ecx
346: 8d 44 42 d0 lea -0x30(%edx,%eax,2),%eax
while('0' <= *s && *s <= '9')
34a: 0f be 11 movsbl (%ecx),%edx
34d: 8d 5a d0 lea -0x30(%edx),%ebx
350: 80 fb 09 cmp $0x9,%bl
353: 76 eb jbe 340 <atoi+0x20>
return n;
}
355: 5b pop %ebx
356: 5d pop %ebp
357: c3 ret
358: 90 nop
359: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
00000360 <memmove>:
void*
memmove(void *vdst, void *vsrc, int n)
{
360: 55 push %ebp
361: 89 e5 mov %esp,%ebp
363: 56 push %esi
364: 53 push %ebx
365: 8b 5d 10 mov 0x10(%ebp),%ebx
368: 8b 45 08 mov 0x8(%ebp),%eax
36b: 8b 75 0c mov 0xc(%ebp),%esi
char *dst, *src;
dst = vdst;
src = vsrc;
while(n-- > 0)
36e: 85 db test %ebx,%ebx
370: 7e 14 jle 386 <memmove+0x26>
372: 31 d2 xor %edx,%edx
374: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
*dst++ = *src++;
378: 0f b6 0c 16 movzbl (%esi,%edx,1),%ecx
37c: 88 0c 10 mov %cl,(%eax,%edx,1)
37f: 83 c2 01 add $0x1,%edx
while(n-- > 0)
382: 39 d3 cmp %edx,%ebx
384: 75 f2 jne 378 <memmove+0x18>
return vdst;
}
386: 5b pop %ebx
387: 5e pop %esi
388: 5d pop %ebp
389: c3 ret
0000038a <fork>:
name: \
movl $SYS_ ## name, %eax; \
int $T_SYSCALL; \
ret
SYSCALL(fork)
38a: b8 01 00 00 00 mov $0x1,%eax
38f: cd 40 int $0x40
391: c3 ret
00000392 <exit>:
SYSCALL(exit)
392: b8 02 00 00 00 mov $0x2,%eax
397: cd 40 int $0x40
399: c3 ret
0000039a <wait>:
SYSCALL(wait)
39a: b8 03 00 00 00 mov $0x3,%eax
39f: cd 40 int $0x40
3a1: c3 ret
000003a2 <pipe>:
SYSCALL(pipe)
3a2: b8 04 00 00 00 mov $0x4,%eax
3a7: cd 40 int $0x40
3a9: c3 ret
000003aa <read>:
SYSCALL(read)
3aa: b8 05 00 00 00 mov $0x5,%eax
3af: cd 40 int $0x40
3b1: c3 ret
000003b2 <write>:
SYSCALL(write)
3b2: b8 10 00 00 00 mov $0x10,%eax
3b7: cd 40 int $0x40
3b9: c3 ret
000003ba <close>:
SYSCALL(close)
3ba: b8 15 00 00 00 mov $0x15,%eax
3bf: cd 40 int $0x40
3c1: c3 ret
000003c2 <kill>:
SYSCALL(kill)
3c2: b8 06 00 00 00 mov $0x6,%eax
3c7: cd 40 int $0x40
3c9: c3 ret
000003ca <exec>:
SYSCALL(exec)
3ca: b8 07 00 00 00 mov $0x7,%eax
3cf: cd 40 int $0x40
3d1: c3 ret
000003d2 <open>:
SYSCALL(open)
3d2: b8 0f 00 00 00 mov $0xf,%eax
3d7: cd 40 int $0x40
3d9: c3 ret
000003da <mknod>:
SYSCALL(mknod)
3da: b8 11 00 00 00 mov $0x11,%eax
3df: cd 40 int $0x40
3e1: c3 ret
000003e2 <unlink>:
SYSCALL(unlink)
3e2: b8 12 00 00 00 mov $0x12,%eax
3e7: cd 40 int $0x40
3e9: c3 ret
000003ea <fstat>:
SYSCALL(fstat)
3ea: b8 08 00 00 00 mov $0x8,%eax
3ef: cd 40 int $0x40
3f1: c3 ret
000003f2 <link>:
SYSCALL(link)
3f2: b8 13 00 00 00 mov $0x13,%eax
3f7: cd 40 int $0x40
3f9: c3 ret
000003fa <mkdir>:
SYSCALL(mkdir)
3fa: b8 14 00 00 00 mov $0x14,%eax
3ff: cd 40 int $0x40
401: c3 ret
00000402 <chdir>:
SYSCALL(chdir)
402: b8 09 00 00 00 mov $0x9,%eax
407: cd 40 int $0x40
409: c3 ret
0000040a <dup>:
SYSCALL(dup)
40a: b8 0a 00 00 00 mov $0xa,%eax
40f: cd 40 int $0x40
411: c3 ret
00000412 <getpid>:
SYSCALL(getpid)
412: b8 0b 00 00 00 mov $0xb,%eax
417: cd 40 int $0x40
419: c3 ret
0000041a <sbrk>:
SYSCALL(sbrk)
41a: b8 0c 00 00 00 mov $0xc,%eax
41f: cd 40 int $0x40
421: c3 ret
00000422 <sleep>:
SYSCALL(sleep)
422: b8 0d 00 00 00 mov $0xd,%eax
427: cd 40 int $0x40
429: c3 ret
0000042a <uptime>:
SYSCALL(uptime)
42a: b8 0e 00 00 00 mov $0xe,%eax
42f: cd 40 int $0x40
431: c3 ret
00000432 <cps>:
SYSCALL(cps)
432: b8 16 00 00 00 mov $0x16,%eax
437: cd 40 int $0x40
439: c3 ret
0000043a <chpr>:
SYSCALL(chpr)
43a: b8 17 00 00 00 mov $0x17,%eax
43f: cd 40 int $0x40
441: c3 ret
00000442 <setTickets>:
SYSCALL(setTickets)
442: b8 18 00 00 00 mov $0x18,%eax
447: cd 40 int $0x40
449: c3 ret
0000044a <thread_create>:
SYSCALL(thread_create)
44a: b8 19 00 00 00 mov $0x19,%eax
44f: cd 40 int $0x40
451: c3 ret
00000452 <thread_exit>:
SYSCALL(thread_exit)
452: b8 1a 00 00 00 mov $0x1a,%eax
457: cd 40 int $0x40
459: c3 ret
0000045a <thread_join>:
SYSCALL(thread_join)
45a: b8 1b 00 00 00 mov $0x1b,%eax
45f: cd 40 int $0x40
461: c3 ret
00000462 <gettid>:
SYSCALL(gettid)
462: b8 1c 00 00 00 mov $0x1c,%eax
467: cd 40 int $0x40
469: c3 ret
46a: 66 90 xchg %ax,%ax
46c: 66 90 xchg %ax,%ax
46e: 66 90 xchg %ax,%ax
00000470 <printint>:
write(fd, &c, 1);
}
static void
printint(int fd, int xx, int base, int sgn)
{
470: 55 push %ebp
471: 89 e5 mov %esp,%ebp
473: 57 push %edi
474: 56 push %esi
475: 53 push %ebx
476: 83 ec 3c sub $0x3c,%esp
char buf[16];
int i, neg;
uint x;
neg = 0;
if(sgn && xx < 0){
479: 85 d2 test %edx,%edx
{
47b: 89 45 c0 mov %eax,-0x40(%ebp)
neg = 1;
x = -xx;
47e: 89 d0 mov %edx,%eax
if(sgn && xx < 0){
480: 79 76 jns 4f8 <printint+0x88>
482: f6 45 08 01 testb $0x1,0x8(%ebp)
486: 74 70 je 4f8 <printint+0x88>
x = -xx;
488: f7 d8 neg %eax
neg = 1;
48a: c7 45 c4 01 00 00 00 movl $0x1,-0x3c(%ebp)
} else {
x = xx;
}
i = 0;
491: 31 f6 xor %esi,%esi
493: 8d 5d d7 lea -0x29(%ebp),%ebx
496: eb 0a jmp 4a2 <printint+0x32>
498: 90 nop
499: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
do{
buf[i++] = digits[x % base];
4a0: 89 fe mov %edi,%esi
4a2: 31 d2 xor %edx,%edx
4a4: 8d 7e 01 lea 0x1(%esi),%edi
4a7: f7 f1 div %ecx
4a9: 0f b6 92 94 08 00 00 movzbl 0x894(%edx),%edx
}while((x /= base) != 0);
4b0: 85 c0 test %eax,%eax
buf[i++] = digits[x % base];
4b2: 88 14 3b mov %dl,(%ebx,%edi,1)
}while((x /= base) != 0);
4b5: 75 e9 jne 4a0 <printint+0x30>
if(neg)
4b7: 8b 45 c4 mov -0x3c(%ebp),%eax
4ba: 85 c0 test %eax,%eax
4bc: 74 08 je 4c6 <printint+0x56>
buf[i++] = '-';
4be: c6 44 3d d8 2d movb $0x2d,-0x28(%ebp,%edi,1)
4c3: 8d 7e 02 lea 0x2(%esi),%edi
4c6: 8d 74 3d d7 lea -0x29(%ebp,%edi,1),%esi
4ca: 8b 7d c0 mov -0x40(%ebp),%edi
4cd: 8d 76 00 lea 0x0(%esi),%esi
4d0: 0f b6 06 movzbl (%esi),%eax
write(fd, &c, 1);
4d3: 83 ec 04 sub $0x4,%esp
4d6: 83 ee 01 sub $0x1,%esi
4d9: 6a 01 push $0x1
4db: 53 push %ebx
4dc: 57 push %edi
4dd: 88 45 d7 mov %al,-0x29(%ebp)
4e0: e8 cd fe ff ff call 3b2 <write>
while(--i >= 0)
4e5: 83 c4 10 add $0x10,%esp
4e8: 39 de cmp %ebx,%esi
4ea: 75 e4 jne 4d0 <printint+0x60>
putc(fd, buf[i]);
}
4ec: 8d 65 f4 lea -0xc(%ebp),%esp
4ef: 5b pop %ebx
4f0: 5e pop %esi
4f1: 5f pop %edi
4f2: 5d pop %ebp
4f3: c3 ret
4f4: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
neg = 0;
4f8: c7 45 c4 00 00 00 00 movl $0x0,-0x3c(%ebp)
4ff: eb 90 jmp 491 <printint+0x21>
501: eb 0d jmp 510 <printf>
503: 90 nop
504: 90 nop
505: 90 nop
506: 90 nop
507: 90 nop
508: 90 nop
509: 90 nop
50a: 90 nop
50b: 90 nop
50c: 90 nop
50d: 90 nop
50e: 90 nop
50f: 90 nop
00000510 <printf>:
// Print to the given fd. Only understands %d, %x, %p, %s.
void
printf(int fd, char *fmt, ...)
{
510: 55 push %ebp
511: 89 e5 mov %esp,%ebp
513: 57 push %edi
514: 56 push %esi
515: 53 push %ebx
516: 83 ec 2c sub $0x2c,%esp
int c, i, state;
uint *ap;
state = 0;
ap = (uint*)(void*)&fmt + 1;
for(i = 0; fmt[i]; i++){
519: 8b 75 0c mov 0xc(%ebp),%esi
51c: 0f b6 1e movzbl (%esi),%ebx
51f: 84 db test %bl,%bl
521: 0f 84 b3 00 00 00 je 5da <printf+0xca>
ap = (uint*)(void*)&fmt + 1;
527: 8d 45 10 lea 0x10(%ebp),%eax
52a: 83 c6 01 add $0x1,%esi
state = 0;
52d: 31 ff xor %edi,%edi
ap = (uint*)(void*)&fmt + 1;
52f: 89 45 d4 mov %eax,-0x2c(%ebp)
532: eb 2f jmp 563 <printf+0x53>
534: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
c = fmt[i] & 0xff;
if(state == 0){
if(c == '%'){
538: 83 f8 25 cmp $0x25,%eax
53b: 0f 84 a7 00 00 00 je 5e8 <printf+0xd8>
write(fd, &c, 1);
541: 8d 45 e2 lea -0x1e(%ebp),%eax
544: 83 ec 04 sub $0x4,%esp
547: 88 5d e2 mov %bl,-0x1e(%ebp)
54a: 6a 01 push $0x1
54c: 50 push %eax
54d: ff 75 08 pushl 0x8(%ebp)
550: e8 5d fe ff ff call 3b2 <write>
555: 83 c4 10 add $0x10,%esp
558: 83 c6 01 add $0x1,%esi
for(i = 0; fmt[i]; i++){
55b: 0f b6 5e ff movzbl -0x1(%esi),%ebx
55f: 84 db test %bl,%bl
561: 74 77 je 5da <printf+0xca>
if(state == 0){
563: 85 ff test %edi,%edi
c = fmt[i] & 0xff;
565: 0f be cb movsbl %bl,%ecx
568: 0f b6 c3 movzbl %bl,%eax
if(state == 0){
56b: 74 cb je 538 <printf+0x28>
state = '%';
} else {
putc(fd, c);
}
} else if(state == '%'){
56d: 83 ff 25 cmp $0x25,%edi
570: 75 e6 jne 558 <printf+0x48>
if(c == 'd'){
572: 83 f8 64 cmp $0x64,%eax
575: 0f 84 05 01 00 00 je 680 <printf+0x170>
printint(fd, *ap, 10, 1);
ap++;
} else if(c == 'x' || c == 'p'){
57b: 81 e1 f7 00 00 00 and $0xf7,%ecx
581: 83 f9 70 cmp $0x70,%ecx
584: 74 72 je 5f8 <printf+0xe8>
printint(fd, *ap, 16, 0);
ap++;
} else if(c == 's'){
586: 83 f8 73 cmp $0x73,%eax
589: 0f 84 99 00 00 00 je 628 <printf+0x118>
s = "(null)";
while(*s != 0){
putc(fd, *s);
s++;
}
} else if(c == 'c'){
58f: 83 f8 63 cmp $0x63,%eax
592: 0f 84 08 01 00 00 je 6a0 <printf+0x190>
putc(fd, *ap);
ap++;
} else if(c == '%'){
598: 83 f8 25 cmp $0x25,%eax
59b: 0f 84 ef 00 00 00 je 690 <printf+0x180>
write(fd, &c, 1);
5a1: 8d 45 e7 lea -0x19(%ebp),%eax
5a4: 83 ec 04 sub $0x4,%esp
5a7: c6 45 e7 25 movb $0x25,-0x19(%ebp)
5ab: 6a 01 push $0x1
5ad: 50 push %eax
5ae: ff 75 08 pushl 0x8(%ebp)
5b1: e8 fc fd ff ff call 3b2 <write>
5b6: 83 c4 0c add $0xc,%esp
5b9: 8d 45 e6 lea -0x1a(%ebp),%eax
5bc: 88 5d e6 mov %bl,-0x1a(%ebp)
5bf: 6a 01 push $0x1
5c1: 50 push %eax
5c2: ff 75 08 pushl 0x8(%ebp)
5c5: 83 c6 01 add $0x1,%esi
} else {
// Unknown % sequence. Print it to draw attention.
putc(fd, '%');
putc(fd, c);
}
state = 0;
5c8: 31 ff xor %edi,%edi
write(fd, &c, 1);
5ca: e8 e3 fd ff ff call 3b2 <write>
for(i = 0; fmt[i]; i++){
5cf: 0f b6 5e ff movzbl -0x1(%esi),%ebx
write(fd, &c, 1);
5d3: 83 c4 10 add $0x10,%esp
for(i = 0; fmt[i]; i++){
5d6: 84 db test %bl,%bl
5d8: 75 89 jne 563 <printf+0x53>
}
}
}
5da: 8d 65 f4 lea -0xc(%ebp),%esp
5dd: 5b pop %ebx
5de: 5e pop %esi
5df: 5f pop %edi
5e0: 5d pop %ebp
5e1: c3 ret
5e2: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
state = '%';
5e8: bf 25 00 00 00 mov $0x25,%edi
5ed: e9 66 ff ff ff jmp 558 <printf+0x48>
5f2: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
printint(fd, *ap, 16, 0);
5f8: 83 ec 0c sub $0xc,%esp
5fb: b9 10 00 00 00 mov $0x10,%ecx
600: 6a 00 push $0x0
602: 8b 7d d4 mov -0x2c(%ebp),%edi
605: 8b 45 08 mov 0x8(%ebp),%eax
608: 8b 17 mov (%edi),%edx
60a: e8 61 fe ff ff call 470 <printint>
ap++;
60f: 89 f8 mov %edi,%eax
611: 83 c4 10 add $0x10,%esp
state = 0;
614: 31 ff xor %edi,%edi
ap++;
616: 83 c0 04 add $0x4,%eax
619: 89 45 d4 mov %eax,-0x2c(%ebp)
61c: e9 37 ff ff ff jmp 558 <printf+0x48>
621: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
s = (char*)*ap;
628: 8b 45 d4 mov -0x2c(%ebp),%eax
62b: 8b 08 mov (%eax),%ecx
ap++;
62d: 83 c0 04 add $0x4,%eax
630: 89 45 d4 mov %eax,-0x2c(%ebp)
if(s == 0)
633: 85 c9 test %ecx,%ecx
635: 0f 84 8e 00 00 00 je 6c9 <printf+0x1b9>
while(*s != 0){
63b: 0f b6 01 movzbl (%ecx),%eax
state = 0;
63e: 31 ff xor %edi,%edi
s = (char*)*ap;
640: 89 cb mov %ecx,%ebx
while(*s != 0){
642: 84 c0 test %al,%al
644: 0f 84 0e ff ff ff je 558 <printf+0x48>
64a: 89 75 d0 mov %esi,-0x30(%ebp)
64d: 89 de mov %ebx,%esi
64f: 8b 5d 08 mov 0x8(%ebp),%ebx
652: 8d 7d e3 lea -0x1d(%ebp),%edi
655: 8d 76 00 lea 0x0(%esi),%esi
write(fd, &c, 1);
658: 83 ec 04 sub $0x4,%esp
s++;
65b: 83 c6 01 add $0x1,%esi
65e: 88 45 e3 mov %al,-0x1d(%ebp)
write(fd, &c, 1);
661: 6a 01 push $0x1
663: 57 push %edi
664: 53 push %ebx
665: e8 48 fd ff ff call 3b2 <write>
while(*s != 0){
66a: 0f b6 06 movzbl (%esi),%eax
66d: 83 c4 10 add $0x10,%esp
670: 84 c0 test %al,%al
672: 75 e4 jne 658 <printf+0x148>
674: 8b 75 d0 mov -0x30(%ebp),%esi
state = 0;
677: 31 ff xor %edi,%edi
679: e9 da fe ff ff jmp 558 <printf+0x48>
67e: 66 90 xchg %ax,%ax
printint(fd, *ap, 10, 1);
680: 83 ec 0c sub $0xc,%esp
683: b9 0a 00 00 00 mov $0xa,%ecx
688: 6a 01 push $0x1
68a: e9 73 ff ff ff jmp 602 <printf+0xf2>
68f: 90 nop
write(fd, &c, 1);
690: 83 ec 04 sub $0x4,%esp
693: 88 5d e5 mov %bl,-0x1b(%ebp)
696: 8d 45 e5 lea -0x1b(%ebp),%eax
699: 6a 01 push $0x1
69b: e9 21 ff ff ff jmp 5c1 <printf+0xb1>
putc(fd, *ap);
6a0: 8b 7d d4 mov -0x2c(%ebp),%edi
write(fd, &c, 1);
6a3: 83 ec 04 sub $0x4,%esp
putc(fd, *ap);
6a6: 8b 07 mov (%edi),%eax
write(fd, &c, 1);
6a8: 6a 01 push $0x1
ap++;
6aa: 83 c7 04 add $0x4,%edi
putc(fd, *ap);
6ad: 88 45 e4 mov %al,-0x1c(%ebp)
write(fd, &c, 1);
6b0: 8d 45 e4 lea -0x1c(%ebp),%eax
6b3: 50 push %eax
6b4: ff 75 08 pushl 0x8(%ebp)
6b7: e8 f6 fc ff ff call 3b2 <write>
ap++;
6bc: 89 7d d4 mov %edi,-0x2c(%ebp)
6bf: 83 c4 10 add $0x10,%esp
state = 0;
6c2: 31 ff xor %edi,%edi
6c4: e9 8f fe ff ff jmp 558 <printf+0x48>
s = "(null)";
6c9: bb 8b 08 00 00 mov $0x88b,%ebx
while(*s != 0){
6ce: b8 28 00 00 00 mov $0x28,%eax
6d3: e9 72 ff ff ff jmp 64a <printf+0x13a>
6d8: 66 90 xchg %ax,%ax
6da: 66 90 xchg %ax,%ax
6dc: 66 90 xchg %ax,%ax
6de: 66 90 xchg %ax,%ax
000006e0 <free>:
static Header base;
static Header *freep;
void
free(void *ap)
{
6e0: 55 push %ebp
Header *bp, *p;
bp = (Header*)ap - 1;
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
6e1: a1 44 0b 00 00 mov 0xb44,%eax
{
6e6: 89 e5 mov %esp,%ebp
6e8: 57 push %edi
6e9: 56 push %esi
6ea: 53 push %ebx
6eb: 8b 5d 08 mov 0x8(%ebp),%ebx
bp = (Header*)ap - 1;
6ee: 8d 4b f8 lea -0x8(%ebx),%ecx
6f1: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
for(p = freep; !(bp > p && bp < p->s.ptr); p = p->s.ptr)
6f8: 39 c8 cmp %ecx,%eax
6fa: 8b 10 mov (%eax),%edx
6fc: 73 32 jae 730 <free+0x50>
6fe: 39 d1 cmp %edx,%ecx
700: 72 04 jb 706 <free+0x26>
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
702: 39 d0 cmp %edx,%eax
704: 72 32 jb 738 <free+0x58>
break;
if(bp + bp->s.size == p->s.ptr){
706: 8b 73 fc mov -0x4(%ebx),%esi
709: 8d 3c f1 lea (%ecx,%esi,8),%edi
70c: 39 fa cmp %edi,%edx
70e: 74 30 je 740 <free+0x60>
bp->s.size += p->s.ptr->s.size;
bp->s.ptr = p->s.ptr->s.ptr;
} else
bp->s.ptr = p->s.ptr;
710: 89 53 f8 mov %edx,-0x8(%ebx)
if(p + p->s.size == bp){
713: 8b 50 04 mov 0x4(%eax),%edx
716: 8d 34 d0 lea (%eax,%edx,8),%esi
719: 39 f1 cmp %esi,%ecx
71b: 74 3a je 757 <free+0x77>
p->s.size += bp->s.size;
p->s.ptr = bp->s.ptr;
} else
p->s.ptr = bp;
71d: 89 08 mov %ecx,(%eax)
freep = p;
71f: a3 44 0b 00 00 mov %eax,0xb44
}
724: 5b pop %ebx
725: 5e pop %esi
726: 5f pop %edi
727: 5d pop %ebp
728: c3 ret
729: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
if(p >= p->s.ptr && (bp > p || bp < p->s.ptr))
730: 39 d0 cmp %edx,%eax
732: 72 04 jb 738 <free+0x58>
734: 39 d1 cmp %edx,%ecx
736: 72 ce jb 706 <free+0x26>
{
738: 89 d0 mov %edx,%eax
73a: eb bc jmp 6f8 <free+0x18>
73c: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
bp->s.size += p->s.ptr->s.size;
740: 03 72 04 add 0x4(%edx),%esi
743: 89 73 fc mov %esi,-0x4(%ebx)
bp->s.ptr = p->s.ptr->s.ptr;
746: 8b 10 mov (%eax),%edx
748: 8b 12 mov (%edx),%edx
74a: 89 53 f8 mov %edx,-0x8(%ebx)
if(p + p->s.size == bp){
74d: 8b 50 04 mov 0x4(%eax),%edx
750: 8d 34 d0 lea (%eax,%edx,8),%esi
753: 39 f1 cmp %esi,%ecx
755: 75 c6 jne 71d <free+0x3d>
p->s.size += bp->s.size;
757: 03 53 fc add -0x4(%ebx),%edx
freep = p;
75a: a3 44 0b 00 00 mov %eax,0xb44
p->s.size += bp->s.size;
75f: 89 50 04 mov %edx,0x4(%eax)
p->s.ptr = bp->s.ptr;
762: 8b 53 f8 mov -0x8(%ebx),%edx
765: 89 10 mov %edx,(%eax)
}
767: 5b pop %ebx
768: 5e pop %esi
769: 5f pop %edi
76a: 5d pop %ebp
76b: c3 ret
76c: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
00000770 <malloc>:
return freep;
}
void*
malloc(uint nbytes)
{
770: 55 push %ebp
771: 89 e5 mov %esp,%ebp
773: 57 push %edi
774: 56 push %esi
775: 53 push %ebx
776: 83 ec 0c sub $0xc,%esp
Header *p, *prevp;
uint nunits;
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
779: 8b 45 08 mov 0x8(%ebp),%eax
if((prevp = freep) == 0){
77c: 8b 15 44 0b 00 00 mov 0xb44,%edx
nunits = (nbytes + sizeof(Header) - 1)/sizeof(Header) + 1;
782: 8d 78 07 lea 0x7(%eax),%edi
785: c1 ef 03 shr $0x3,%edi
788: 83 c7 01 add $0x1,%edi
if((prevp = freep) == 0){
78b: 85 d2 test %edx,%edx
78d: 0f 84 9d 00 00 00 je 830 <malloc+0xc0>
793: 8b 02 mov (%edx),%eax
795: 8b 48 04 mov 0x4(%eax),%ecx
base.s.ptr = freep = prevp = &base;
base.s.size = 0;
}
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
if(p->s.size >= nunits){
798: 39 cf cmp %ecx,%edi
79a: 76 6c jbe 808 <malloc+0x98>
79c: 81 ff 00 10 00 00 cmp $0x1000,%edi
7a2: bb 00 10 00 00 mov $0x1000,%ebx
7a7: 0f 43 df cmovae %edi,%ebx
p = sbrk(nu * sizeof(Header));
7aa: 8d 34 dd 00 00 00 00 lea 0x0(,%ebx,8),%esi
7b1: eb 0e jmp 7c1 <malloc+0x51>
7b3: 90 nop
7b4: 8d 74 26 00 lea 0x0(%esi,%eiz,1),%esi
for(p = prevp->s.ptr; ; prevp = p, p = p->s.ptr){
7b8: 8b 02 mov (%edx),%eax
if(p->s.size >= nunits){
7ba: 8b 48 04 mov 0x4(%eax),%ecx
7bd: 39 f9 cmp %edi,%ecx
7bf: 73 47 jae 808 <malloc+0x98>
p->s.size = nunits;
}
freep = prevp;
return (void*)(p + 1);
}
if(p == freep)
7c1: 39 05 44 0b 00 00 cmp %eax,0xb44
7c7: 89 c2 mov %eax,%edx
7c9: 75 ed jne 7b8 <malloc+0x48>
p = sbrk(nu * sizeof(Header));
7cb: 83 ec 0c sub $0xc,%esp
7ce: 56 push %esi
7cf: e8 46 fc ff ff call 41a <sbrk>
if(p == (char*)-1)
7d4: 83 c4 10 add $0x10,%esp
7d7: 83 f8 ff cmp $0xffffffff,%eax
7da: 74 1c je 7f8 <malloc+0x88>
hp->s.size = nu;
7dc: 89 58 04 mov %ebx,0x4(%eax)
free((void*)(hp + 1));
7df: 83 ec 0c sub $0xc,%esp
7e2: 83 c0 08 add $0x8,%eax
7e5: 50 push %eax
7e6: e8 f5 fe ff ff call 6e0 <free>
return freep;
7eb: 8b 15 44 0b 00 00 mov 0xb44,%edx
if((p = morecore(nunits)) == 0)
7f1: 83 c4 10 add $0x10,%esp
7f4: 85 d2 test %edx,%edx
7f6: 75 c0 jne 7b8 <malloc+0x48>
return 0;
}
}
7f8: 8d 65 f4 lea -0xc(%ebp),%esp
return 0;
7fb: 31 c0 xor %eax,%eax
}
7fd: 5b pop %ebx
7fe: 5e pop %esi
7ff: 5f pop %edi
800: 5d pop %ebp
801: c3 ret
802: 8d b6 00 00 00 00 lea 0x0(%esi),%esi
if(p->s.size == nunits)
808: 39 cf cmp %ecx,%edi
80a: 74 54 je 860 <malloc+0xf0>
p->s.size -= nunits;
80c: 29 f9 sub %edi,%ecx
80e: 89 48 04 mov %ecx,0x4(%eax)
p += p->s.size;
811: 8d 04 c8 lea (%eax,%ecx,8),%eax
p->s.size = nunits;
814: 89 78 04 mov %edi,0x4(%eax)
freep = prevp;
817: 89 15 44 0b 00 00 mov %edx,0xb44
}
81d: 8d 65 f4 lea -0xc(%ebp),%esp
return (void*)(p + 1);
820: 83 c0 08 add $0x8,%eax
}
823: 5b pop %ebx
824: 5e pop %esi
825: 5f pop %edi
826: 5d pop %ebp
827: c3 ret
828: 90 nop
829: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
base.s.ptr = freep = prevp = &base;
830: c7 05 44 0b 00 00 48 movl $0xb48,0xb44
837: 0b 00 00
83a: c7 05 48 0b 00 00 48 movl $0xb48,0xb48
841: 0b 00 00
base.s.size = 0;
844: b8 48 0b 00 00 mov $0xb48,%eax
849: c7 05 4c 0b 00 00 00 movl $0x0,0xb4c
850: 00 00 00
853: e9 44 ff ff ff jmp 79c <malloc+0x2c>
858: 90 nop
859: 8d b4 26 00 00 00 00 lea 0x0(%esi,%eiz,1),%esi
prevp->s.ptr = p->s.ptr;
860: 8b 08 mov (%eax),%ecx
862: 89 0a mov %ecx,(%edx)
864: eb b1 jmp 817 <malloc+0xa7>
|
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// THREADS.CPP
//
//
//
#include "common.h"
#include "frames.h"
#include "threads.h"
#include "stackwalk.h"
#include "excep.h"
#include "comsynchronizable.h"
#include "log.h"
#include "gcheaputilities.h"
#include "mscoree.h"
#include "dbginterface.h"
#include "corprof.h" // profiling
#include "eeprofinterfaces.h"
#include "eeconfig.h"
#include "perfcounters.h"
#include "corhost.h"
#include "win32threadpool.h"
#include "jitinterface.h"
#include "eventtrace.h"
#include "comutilnative.h"
#include "finalizerthread.h"
#include "threadsuspend.h"
#include "wrappers.h"
#include "nativeoverlapped.h"
#include "mdaassistants.h"
#include "appdomain.inl"
#include "vmholder.h"
#include "exceptmacros.h"
#include "win32threadpool.h"
#ifdef FEATURE_COMINTEROP
#include "runtimecallablewrapper.h"
#include "interoputil.h"
#include "interoputil.inl"
#endif // FEATURE_COMINTEROP
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
#include "olecontexthelpers.h"
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
#ifdef FEATURE_PERFTRACING
#include "eventpipebuffermanager.h"
#endif // FEATURE_PERFTRACING
SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
CONTEXT *ThreadStore::s_pOSContext = NULL;
CLREvent *ThreadStore::s_pWaitForStackCrawlEvent;
PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(ModuleIndex index)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
if (index.m_dwIndex >= m_TLMTableSize)
return NULL;
return m_pTLMTable[index.m_dwIndex].pTLM;
}
PTR_ThreadLocalModule ThreadLocalBlock::GetTLMIfExists(MethodTable* pMT)
{
WRAPPER_NO_CONTRACT;
ModuleIndex index = pMT->GetModuleForStatics()->GetModuleIndex();
return GetTLMIfExists(index);
}
#ifndef DACCESS_COMPILE
BOOL Thread::s_fCleanFinalizedThread = FALSE;
Volatile<LONG> Thread::s_threadPoolCompletionCountOverflow = 0;
CrstStatic g_DeadlockAwareCrst;
#if defined(_DEBUG)
BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId )
{
#ifndef FEATURE_PAL
LIMITED_METHOD_CONTRACT;
DWORD id = GetThreadId(h);
// OS call GetThreadId may fail, and return 0. In this case we can not
// make a decision if the two match or not. Instead, we ignore this check.
return id == 0 || id == osId;
#else // !FEATURE_PAL
return TRUE;
#endif // !FEATURE_PAL
}
#endif // _DEBUG
#ifdef _DEBUG_IMPL
template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_COOPERATIVE;
}
template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_PREEMPTIVE;
}
template<> void GCAssert<TRUE>::BeginGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_COOPERATIVE;
}
template<> void GCAssert<FALSE>::BeginGCAssert()
{
SCAN_SCOPE_BEGIN;
STATIC_CONTRACT_MODE_PREEMPTIVE;
}
#endif
// #define NEW_TLS 1
#ifdef _DEBUG
void Thread::SetFrame(Frame *pFrame)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
DEBUG_ONLY;
MODE_COOPERATIVE;
// It only makes sense for a Thread to call SetFrame on itself.
PRECONDITION(this == GetThread());
PRECONDITION(CheckPointer(pFrame));
}
CONTRACTL_END;
if (g_pConfig->fAssertOnFailFast())
{
Frame *pWalk = m_pFrame;
BOOL fExist = FALSE;
while (pWalk != (Frame*) -1)
{
if (pWalk == pFrame)
{
fExist = TRUE;
break;
}
pWalk = pWalk->m_Next;
}
pWalk = m_pFrame;
while (fExist && pWalk != pFrame && pWalk != (Frame*)-1)
{
if (pWalk->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
{
_ASSERTE (((ContextTransitionFrame *)pWalk)->GetReturnDomain() == m_pDomain);
}
pWalk = pWalk->m_Next;
}
}
m_pFrame = pFrame;
// If stack overrun corruptions are expected, then skip this check
// as the Frame chain may have been corrupted.
if (g_pConfig->fAssertOnFailFast() == false)
return;
Frame* espVal = (Frame*)GetCurrentSP();
while (pFrame != (Frame*) -1)
{
static Frame* stopFrame = 0;
if (pFrame == stopFrame)
_ASSERTE(!"SetFrame frame == stopFrame");
_ASSERTE(espVal < pFrame);
_ASSERTE(pFrame < m_CacheStackBase);
_ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT);
pFrame = pFrame->m_Next;
}
}
#endif // _DEBUG
//************************************************************************
// PRIVATE GLOBALS
//************************************************************************
extern unsigned __int64 getTimeStamp();
extern unsigned __int64 getTickFrequency();
unsigned __int64 tgetFrequency() {
static unsigned __int64 cachedFreq = (unsigned __int64) -1;
if (cachedFreq != (unsigned __int64) -1)
return cachedFreq;
else {
cachedFreq = getTickFrequency();
return cachedFreq;
}
}
#endif // #ifndef DACCESS_COMPILE
static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData)
{
WRAPPER_NO_CONTRACT;
// It suffices to wait for the first CrawlFrame with non-NULL function
MethodDesc *pMD = pCF->GetFunction();
if (pMD != NULL)
{
*(bool *)pData = pMD->IsILStub();
return SWA_ABORT;
}
return SWA_CONTINUE;
}
// This is really just a heuristic to detect if we are executing in an M2U IL stub or
// one of the marshaling methods it calls. It doesn't deal with U2M IL stubs.
// We loop through the frame chain looking for an uninitialized TransitionFrame.
// If there is one, then we are executing in an M2U IL stub or one of the methods it calls.
// On the other hand, if there is an initialized TransitionFrame, then we are not.
// Also, if there is an HMF on the stack, then we stop. This could be the case where
// an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to
// stop in those cases. Some examples are COMException..ctor and custom marshalers.
//
// X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with
// inlined P/Invoke when judging just from the frame chain. We use stack walk to decide
// this case.
bool Thread::DetectHandleILStubsForDebugger()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
Frame* pFrame = GetFrame();
if (pFrame != NULL)
{
while (pFrame != FRAME_TOP)
{
// Check for HMF's. See the comment at the beginning of this function.
if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr())
{
break;
}
// If there is an entry frame (i.e. U2M managed), we should break.
else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY)
{
break;
}
// Check for M2U transition frames. See the comment at the beginning of this function.
else if (pFrame->GetFrameType() == Frame::TYPE_EXIT)
{
if (pFrame->GetReturnAddress() == NULL)
{
// If the return address is NULL, then the frame has not been initialized yet.
// We may see InlinedCallFrame in ordinary methods as well. Have to do
// stack walk to find out if this is really an IL stub.
bool fInILStub = false;
StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback,
&fInILStub,
QUICKUNWIND,
dac_cast<PTR_Frame>(pFrame));
if (fInILStub) return true;
}
else
{
// The frame is fully initialized.
return false;
}
}
pFrame = pFrame->Next();
}
}
return false;
}
extern "C" {
#ifndef __llvm__
__declspec(thread)
#else // !__llvm__
__thread
#endif // !__llvm__
ThreadLocalInfo gCurrentThreadInfo =
{
NULL, // m_pThread
NULL, // m_pAppDomain
NULL, // m_EETlsData
};
} // extern "C"
// index into TLS Array. Definition added by compiler
EXTERN_C UINT32 _tls_index;
#ifndef DACCESS_COMPILE
BOOL SetThread(Thread* t)
{
LIMITED_METHOD_CONTRACT
gCurrentThreadInfo.m_pThread = t;
return TRUE;
}
BOOL SetAppDomain(AppDomain* ad)
{
LIMITED_METHOD_CONTRACT
gCurrentThreadInfo.m_pAppDomain = ad;
return TRUE;
}
BOOL Thread::Alert ()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL fRetVal = FALSE;
{
HANDLE handle = GetThreadHandle();
if (handle != INVALID_HANDLE_VALUE && handle != SWITCHOUT_HANDLE_VALUE)
{
fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code);
}
}
return fRetVal;
}
DWORD Thread::Join(DWORD timeout, BOOL alertable)
{
WRAPPER_NO_CONTRACT;
return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None);
}
DWORD Thread::JoinEx(DWORD timeout, WaitMode mode)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE;
Thread *pCurThread = GetThread();
_ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread());
{
// We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be
// forwarded to a SynchronizationContext if needed.
mode = (WaitMode)(mode & ~WaitMode_InDeadlock);
HANDLE handle = GetThreadHandle();
if (handle == INVALID_HANDLE_VALUE || handle == SWITCHOUT_HANDLE_VALUE) {
return WAIT_FAILED;
}
if (pCurThread) {
return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode);
}
else {
return WaitForSingleObjectEx(handle,timeout,alertable);
}
}
}
extern INT32 MapFromNTPriority(INT32 NTPriority);
BOOL Thread::SetThreadPriority(
int nPriority // thread priority level
)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL fRet;
{
if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
// When the thread starts running, we will set the thread priority.
fRet = TRUE;
}
else
fRet = ::SetThreadPriority(GetThreadHandle(), nPriority);
}
if (fRet)
{
GCX_COOP();
THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
if (pObject != NULL)
{
// TODO: managed ThreadPriority only supports up to 4.
pObject->SetPriority (MapFromNTPriority(nPriority));
}
}
return fRet;
}
int Thread::GetThreadPriority()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
int nRetVal = -1;
if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
nRetVal = FALSE;
}
else
nRetVal = ::GetThreadPriority(GetThreadHandle());
return nRetVal;
}
void Thread::ChooseThreadCPUGroupAffinity()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
//Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads
ThreadStoreLockHolder TSLockHolder(TRUE);
// this thread already has CPU group affinity set
if (m_pAffinityMask != 0)
return;
if (GetThreadHandle() == INVALID_HANDLE_VALUE)
return;
GROUP_AFFINITY groupAffinity;
CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity);
CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
m_wCPUGroup = groupAffinity.Group;
m_pAffinityMask = groupAffinity.Mask;
}
void Thread::ClearThreadCPUGroupAffinity()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
ThreadStoreLockHolder TSLockHolder(TRUE);
// this thread does not have CPU group affinity set
if (m_pAffinityMask == 0)
return;
GROUP_AFFINITY groupAffinity;
groupAffinity.Group = m_wCPUGroup;
groupAffinity.Mask = m_pAffinityMask;
CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity);
m_wCPUGroup = 0;
m_pAffinityMask = 0;
}
DWORD Thread::StartThread()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
DWORD dwRetVal = (DWORD) -1;
#ifdef _DEBUG
_ASSERTE (m_Creater.IsCurrentThread());
m_Creater.Clear();
#endif
_ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE &&
GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
dwRetVal = ::ResumeThread(GetThreadHandle());
return dwRetVal;
}
// Class static data:
LONG Thread::m_DebugWillSyncCount = -1;
LONG Thread::m_DetachCount = 0;
LONG Thread::m_ActiveDetachCount = 0;
int Thread::m_offset_counter = 0;
Volatile<LONG> Thread::m_threadsAtUnsafePlaces = 0;
//-------------------------------------------------------------------------
// Public function: SetupThreadNoThrow()
// Creates Thread for current thread if not previously created.
// Returns NULL for failure (usually due to out-of-memory.)
//-------------------------------------------------------------------------
Thread* SetupThreadNoThrow(HRESULT *pHR)
{
CONTRACTL {
NOTHROW;
SO_TOLERANT;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
HRESULT hr = S_OK;
Thread *pThread = GetThread();
if (pThread != NULL)
{
return pThread;
}
EX_TRY
{
pThread = SetupThread();
}
EX_CATCH
{
// We failed SetupThread. GET_EXCEPTION() may depend on Thread object.
if (__pException == NULL)
{
hr = E_OUTOFMEMORY;
}
else
{
hr = GET_EXCEPTION()->GetHR();
}
}
EX_END_CATCH(SwallowAllExceptions);
if (pHR)
{
*pHR = hr;
}
return pThread;
}
void DeleteThread(Thread* pThread)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
//_ASSERTE (pThread == GetThread());
SetThread(NULL);
SetAppDomain(NULL);
if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore))
{
pThread->DetachThread(FALSE);
}
else
{
#ifdef FEATURE_COMINTEROP
pThread->RevokeApartmentSpy();
#endif // FEATURE_COMINTEROP
FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead);
// ~Thread() calls SafeSetThrowables which has a conditional contract
// which says that if you call it with a NULL throwable then it is
// MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that
// and assumes that we're violating the MODE_COOPERATIVE.
CONTRACT_VIOLATION(ModeViolation);
delete pThread;
}
}
void EnsurePreemptive()
{
WRAPPER_NO_CONTRACT;
Thread *pThread = GetThread();
if (pThread && pThread->PreemptiveGCDisabled())
{
pThread->EnablePreemptiveGC();
}
}
typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException;
Thread* SetupThread(BOOL fInternal)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
SO_TOLERANT;
}
CONTRACTL_END;
Thread* pThread;
if ((pThread = GetThread()) != NULL)
return pThread;
#ifdef FEATURE_STACK_PROBE
RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
#endif //FEATURE_STACK_PROBE
CONTRACT_VIOLATION(SOToleranceViolation);
// For interop debugging, we must mark that we're in a can't-stop region
// b.c we may take Crsts here that may block the helper thread.
// We're especially fragile here b/c we don't have a Thread object yet
CantStopHolder hCantStop;
EnsurePreemptiveModeIfException ensurePreemptive;
#ifdef _DEBUG
CHECK chk;
if (g_pConfig->SuppressChecks())
{
// EnterAssert will suppress any checks
chk.EnterAssert();
}
#endif
// Normally, HasStarted is called from the thread's entrypoint to introduce it to
// the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
// that call into managed code. In that case, a call to SetupThread here must
// find the correct Thread object and install it into TLS.
if (ThreadStore::s_pThreadStore->m_PendingThreadCount != 0)
{
DWORD ourOSThreadId = ::GetCurrentThreadId();
{
ThreadStoreLockHolder TSLockHolder;
_ASSERTE(pThread == NULL);
while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL)
{
if (pThread->GetOSThreadId() == ourOSThreadId)
{
break;
}
}
if (pThread != NULL)
{
STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load());
}
}
// It's perfectly reasonable to not find this guy. It's just an unrelated
// thread spinning up.
if (pThread)
{
if (IsThreadPoolWorkerSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
pThread->SetBackground(TRUE);
}
else if (IsThreadPoolIOCompletionSpecialThread())
{
FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
pThread->SetBackground(TRUE);
}
else if (IsTimerSpecialThread() || IsWaitSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
pThread->SetBackground(TRUE);
}
BOOL fStatus = pThread->HasStarted();
ensurePreemptive.SuppressRelease();
return fStatus ? pThread : NULL;
}
}
// First time we've seen this thread in the runtime:
pThread = new Thread();
// What state are we in here? COOP???
Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread);
CExecutionEngine::SetupTLSForThread(pThread);
// A host can deny a thread entering runtime by returning a NULL IHostTask.
// But we do want threads used by threadpool.
if (IsThreadPoolWorkerSpecialThread() ||
IsThreadPoolIOCompletionSpecialThread() ||
IsTimerSpecialThread() ||
IsWaitSpecialThread())
{
fInternal = TRUE;
}
if (!pThread->InitThread(fInternal) ||
!pThread->PrepareApartmentAndContext())
ThrowOutOfMemory();
// reset any unstarted bits on the thread object
FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted);
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin);
ThreadStore::AddThread(pThread);
BOOL fOK = SetThread(pThread);
_ASSERTE (fOK);
fOK = SetAppDomain(pThread->GetDomain());
_ASSERTE (fOK);
#ifdef FEATURE_INTEROP_DEBUGGING
// Ensure that debugger word slot is allocated
UnsafeTlsSetValue(g_debuggerWordTLSIndex, 0);
#endif
// We now have a Thread object visable to the RS. unmark special status.
hCantStop.Release();
pThread->SetupThreadForHost();
threadHolder.SuppressRelease();
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized);
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this
// thread is up and running now.
//
if (CORDebuggerAttached())
{
g_pDebugInterface->ThreadCreated(pThread);
}
else
{
LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId()));
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is present, then notify the profiler that a
// thread has been created.
if (!IsGCSpecialThread())
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadCreated(
(ThreadID)pThread);
}
DWORD osThreadId = ::GetCurrentThreadId();
g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
(ThreadID)pThread, osThreadId);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
_ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking
pThread->SetBackground(TRUE);
ensurePreemptive.SuppressRelease();
if (IsThreadPoolWorkerSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
}
else if (IsThreadPoolIOCompletionSpecialThread())
{
FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
}
else if (IsTimerSpecialThread() || IsWaitSpecialThread())
{
FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
}
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
pThread->QueryThreadProcessorUsage();
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_EVENT_TRACE
ETW::ThreadLog::FireThreadCreated(pThread);
#endif // FEATURE_EVENT_TRACE
return pThread;
}
//-------------------------------------------------------------------------
// Public function: SetupUnstartedThread()
// This sets up a Thread object for an exposed System.Thread that
// has not been started yet. This allows us to properly enumerate all threads
// in the ThreadStore, so we can report on even unstarted threads. Clearly
// there is no physical thread to match, yet.
//
// When there is, complete the setup with code:Thread::HasStarted()
//-------------------------------------------------------------------------
Thread* SetupUnstartedThread(BOOL bRequiresTSL)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
Thread* pThread = new Thread();
FastInterlockOr((ULONG *) &pThread->m_State,
(Thread::TS_Unstarted | Thread::TS_WeOwn));
ThreadStore::AddThread(pThread, bRequiresTSL);
return pThread;
}
//-------------------------------------------------------------------------
// Public function: DestroyThread()
// Destroys the specified Thread object, for a thread which is about to die.
//-------------------------------------------------------------------------
void DestroyThread(Thread *th)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE (th == GetThread());
_ASSERTE(g_fEEShutDown || th->m_dwLockCount == 0 || th->m_fRudeAborted);
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
AppDomain* pDomain = th->GetDomain();
pDomain->UpdateProcessorUsage(th->QueryThreadProcessorUsage());
FireEtwThreadTerminated((ULONGLONG)th, (ULONGLONG)pDomain, GetClrInstanceId());
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
th->FinishSOWork();
GCX_PREEMP_NO_DTOR();
if (th->IsAbortRequested()) {
// Reset trapping count.
th->UnmarkThreadForAbort(Thread::TAR_ALL);
}
// Clear any outstanding stale EH state that maybe still active on the thread.
#ifdef WIN64EXCEPTIONS
ExceptionTracker::PopTrackers((void*)-1);
#else // !WIN64EXCEPTIONS
#ifdef _TARGET_X86_
PTR_ThreadExceptionState pExState = th->GetExceptionState();
if (pExState->IsExceptionInProgress())
{
GCX_COOP();
pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
}
#else // !_TARGET_X86_
#error Unsupported platform
#endif // _TARGET_X86_
#endif // WIN64EXCEPTIONS
#ifdef FEATURE_PERFTRACING
// Before the thread dies, mark its buffers as no longer owned
// so that they can be cleaned up after the thread dies.
EventPipeBufferList *pBufferList = th->GetEventPipeBufferList();
if(pBufferList != NULL)
{
pBufferList->SetOwnedByThread(false);
}
#endif // FEATURE_PERFTRACING
if (g_fEEShutDown == 0)
{
th->SetThreadState(Thread::TS_ReportDead);
th->OnThreadTerminate(FALSE);
}
}
//-------------------------------------------------------------------------
// Public function: DetachThread()
// Marks the thread as needing to be destroyed, but doesn't destroy it yet.
//-------------------------------------------------------------------------
HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
{
// !!! Can not use contract here.
// !!! Contract depends on Thread object for GC_TRIGGERS.
// !!! At the end of this function, we call InternalSwitchOut,
// !!! and then GetThread()=NULL, and dtor of contract does not work any more.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
// @todo . We need to probe here, but can't introduce destructors etc.
BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
// Clear any outstanding stale EH state that maybe still active on the thread.
#ifdef WIN64EXCEPTIONS
ExceptionTracker::PopTrackers((void*)-1);
#else // !WIN64EXCEPTIONS
#ifdef _TARGET_X86_
PTR_ThreadExceptionState pExState = GetExceptionState();
if (pExState->IsExceptionInProgress())
{
GCX_COOP();
pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
}
#else // !_TARGET_X86_
#error Unsupported platform
#endif // _TARGET_X86_
#endif // WIN64EXCEPTIONS
#ifdef FEATURE_COMINTEROP
IErrorInfo *pErrorInfo;
// Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH,
// otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData).
if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK)
{
// if this is our IErrorInfo, release it now - we don't want ole32 to do it later as
// part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point
if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo))
{
// if it's not our IErrorInfo, put it back
SetErrorInfo(0, pErrorInfo);
}
pErrorInfo->Release();
}
// Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH
// (COM will do it or may have already done it automatically in that case).
if (!fDLLThreadDetach)
{
RevokeApartmentSpy();
}
#endif // FEATURE_COMINTEROP
_ASSERTE(!PreemptiveGCDisabled());
_ASSERTE(g_fEEShutDown || m_dwLockCount == 0 || m_fRudeAborted);
_ASSERTE ((m_State & Thread::TS_Detached) == 0);
_ASSERTE (this == GetThread());
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM && m_pDomain)
{
m_pDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
FireEtwThreadTerminated((ULONGLONG)this, (ULONGLONG)m_pDomain, GetClrInstanceId());
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
FinishSOWork();
FastInterlockIncrement(&Thread::m_DetachCount);
if (IsAbortRequested()) {
// Reset trapping count.
UnmarkThreadForAbort(Thread::TAR_ALL);
}
if (!IsBackground())
{
FastInterlockIncrement(&Thread::m_ActiveDetachCount);
ThreadStore::CheckForEEShutdown();
}
END_CONTRACT_VIOLATION;
HANDLE hThread = GetThreadHandle();
SetThreadHandle (SWITCHOUT_HANDLE_VALUE);
while (m_dwThreadHandleBeingUsed > 0)
{
// Another thread is using the handle now.
#undef Sleep
// We can not call __SwitchToThread since we can not go back to host.
::Sleep(10);
#define Sleep(a) Dont_Use_Sleep(a)
}
if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
{
m_ThreadHandleForClose = hThread;
}
// We need to make sure that TLS are touched last here.
SetThread(NULL);
SetAppDomain(NULL);
#ifdef ENABLE_CONTRACTS_DATA
m_pClrDebugState = NULL;
#endif //ENABLE_CONTRACTS_DATA
#ifdef FEATURE_PERFTRACING
// Before the thread dies, mark its buffers as no longer owned
// so that they can be cleaned up after the thread dies.
EventPipeBufferList *pBufferList = m_pEventPipeBufferList.Load();
if(pBufferList != NULL)
{
pBufferList->SetOwnedByThread(false);
}
#endif // FEATURE_PERFTRACING
FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
// Do not touch Thread object any more. It may be destroyed.
// These detached threads will be cleaned up by finalizer thread. But if the process uses
// little managed heap, it will be a while before GC happens, and finalizer thread starts
// working on detached thread. So we wake up finalizer thread to clean up resources.
//
// (It's possible that this is the startup thread, and startup failed, and so the finalization
// machinery isn't fully initialized. Hence this check.)
if (g_fEEStarted)
FinalizerThread::EnableFinalization();
return S_OK;
}
DWORD GetRuntimeId()
{
LIMITED_METHOD_CONTRACT;
return _tls_index;
}
//---------------------------------------------------------------------------
// Creates new Thread for reverse p-invoke calls.
//---------------------------------------------------------------------------
Thread* WINAPI CreateThreadBlockThrow()
{
WRAPPER_NO_CONTRACT;
// This is a workaround to disable our check for throwing exception in SetupThread.
// We want to throw an exception for reverse p-invoke, and our assertion may fire if
// a unmanaged caller does not setup an exception handler.
CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code.
Thread* pThread = NULL;
BEGIN_ENTRYPOINT_THROWS;
if (!CanRunManagedCode())
{
// CLR is shutting down - someone's DllMain detach event may be calling back into managed code.
// It is misleading to use our COM+ exception code, since this is not a managed exception.
ULONG_PTR arg = E_PROCESS_SHUTDOWN_REENTRY;
RaiseException(EXCEPTION_EXX, 0, 1, &arg);
}
HRESULT hr = S_OK;
pThread = SetupThreadNoThrow(&hr);
if (pThread == NULL)
{
// Creating Thread failed, and we need to throw an exception to report status.
// It is misleading to use our COM+ exception code, since this is not a managed exception.
ULONG_PTR arg = hr;
RaiseException(EXCEPTION_EXX, 0, 1, &arg);
}
END_ENTRYPOINT_THROWS;
return pThread;
}
#ifdef _DEBUG
DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
#endif
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
//---------------------------------------------------------------------------
// One-time initialization. Called during Dll initialization. So
// be careful what you do in here!
//---------------------------------------------------------------------------
void InitThreadManager()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
InitializeYieldProcessorNormalizedCrst();
// All patched helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
// I am using virtual protect to cover the entire range that this code falls in.
//
// We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
// so instead we'll leave it writable from here forward.
DWORD oldProt;
if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
PAGE_EXECUTE_READWRITE, &oldProt))
{
_ASSERTE(!"ClrVirtualProtect of code page failed");
COMPlusThrowWin32();
}
#ifndef FEATURE_PAL
_ASSERTE(GetThread() == NULL);
PTEB Teb = NtCurrentTeb();
BYTE** tlsArray = (BYTE**)Teb->ThreadLocalStoragePointer;
BYTE* tlsData = (BYTE*)tlsArray[_tls_index];
size_t offsetOfCurrentThreadInfo = (BYTE*)&gCurrentThreadInfo - tlsData;
_ASSERTE(offsetOfCurrentThreadInfo < 0x8000);
_ASSERTE(_tls_index < 0x10000);
// Save gCurrentThreadInfo location for debugger
g_TlsIndex = (DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000);
_ASSERTE(g_TrapReturningThreads == 0);
#endif // !FEATURE_PAL
#ifdef FEATURE_INTEROP_DEBUGGING
g_debuggerWordTLSIndex = UnsafeTlsAlloc();
if (g_debuggerWordTLSIndex == TLS_OUT_OF_INDEXES)
COMPlusThrowWin32();
#endif
__ClrFlsGetBlock = CExecutionEngine::GetTlsData;
IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force));
ThreadStore::InitThreadStore();
// NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
// If you remove this flag, we will switch to preemptive mode when entering
// g_DeadlockAwareCrst, which means all functions that enter it will become
// GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
// to update the contracts if you remove this flag.
g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE);
#ifdef _DEBUG
// Randomize OBJREF_HASH to handle hash collision.
Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10);
#endif // _DEBUG
ThreadSuspend::Initialize();
}
//************************************************************************
// Thread members
//************************************************************************
#if defined(_DEBUG) && defined(TRACK_SYNC)
// One outstanding synchronization held by this thread:
struct Dbg_TrackSyncEntry
{
UINT_PTR m_caller;
AwareLock *m_pAwareLock;
BOOL Equiv (UINT_PTR caller, void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
return (m_caller == caller) && (m_pAwareLock == pAwareLock);
}
BOOL Equiv (void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
return (m_pAwareLock == pAwareLock);
}
};
// Each thread has a stack that tracks all enter and leave requests
struct Dbg_TrackSyncStack : public Dbg_TrackSync
{
enum
{
MAX_TRACK_SYNC = 20, // adjust stack depth as necessary
};
void EnterSync (UINT_PTR caller, void *pAwareLock);
void LeaveSync (UINT_PTR caller, void *pAwareLock);
Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC];
UINT_PTR m_StackPointer;
BOOL m_Active;
Dbg_TrackSyncStack() : m_StackPointer(0),
m_Active(TRUE)
{
LIMITED_METHOD_CONTRACT;
}
};
// ensure that registers are preserved across this call
#ifdef _MSC_VER
#pragma optimize("", off)
#endif
// A pain to do all this from ASM, but watch out for trashed registers
EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock)
{
BEGIN_ENTRYPOINT_THROWS;
WRAPPER_NO_CONTRACT;
GetThread()->m_pTrackSync->EnterSync(caller, pAwareLock);
END_ENTRYPOINT_THROWS;
}
EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock)
{
BEGIN_ENTRYPOINT_THROWS;
WRAPPER_NO_CONTRACT;
GetThread()->m_pTrackSync->LeaveSync(caller, pAwareLock);
END_ENTRYPOINT_THROWS;
}
#ifdef _MSC_VER
#pragma optimize("", on)
#endif
void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
{
LIMITED_METHOD_CONTRACT;
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->GetRecursionLevel(),
((AwareLock*)pAwareLock)->GetLockState(),
((AwareLock*)pAwareLock)->GetHoldingThread());
if (m_Active)
{
if (m_StackPointer >= MAX_TRACK_SYNC)
{
_ASSERTE(!"Overflowed synchronization stack checking. Disabling");
m_Active = FALSE;
return;
}
}
m_Stack[m_StackPointer].m_caller = caller;
m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock;
m_StackPointer++;
}
void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
{
WRAPPER_NO_CONTRACT;
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->GetRecursionLevel(),
((AwareLock*)pAwareLock)->GetLockState(),
((AwareLock*)pAwareLock)->GetHoldingThread());
if (m_Active)
{
if (m_StackPointer == 0)
_ASSERTE(!"Underflow in leaving synchronization");
else
if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock))
{
m_StackPointer--;
}
else
{
for (int i=m_StackPointer - 2; i>=0; i--)
{
if (m_Stack[i].Equiv(pAwareLock))
{
_ASSERTE(!"Locks are released out of order. This might be okay...");
memcpy(&m_Stack[i], &m_Stack[i+1],
sizeof(m_Stack[0]) * (m_StackPointer - i - 1));
return;
}
}
_ASSERTE(!"Trying to release a synchronization lock which isn't held");
}
}
}
#endif // TRACK_SYNC
static DWORD dwHashCodeSeed = 123456789;
#ifdef _DEBUG
void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind)
{
CONTRACTL
{
NOTHROW;
FORBID_FAULT;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
//
// Note: this apparently checks if any one of the supplied conditions is satisified, rather
// than checking that *all* of them are satisfied. One would have expected it to assert all of the
// conditions but it does not.
//
CONTRACT_VIOLATION(FaultViolation);
if (::GetAppDomain()==pDomain)
return;
if ((ADValidityKind & ADV_DEFAULTAD) &&
pDomain->IsDefaultDomain())
return;
if ((ADValidityKind & ADV_ITERATOR) &&
pDomain->IsHeldByIterator())
return;
if ((ADValidityKind & ADV_CREATING) &&
pDomain->IsBeingCreated())
return;
if ((ADValidityKind & ADV_COMPILATION) &&
pDomain->IsCompilationDomain())
return;
if ((ADValidityKind & ADV_FINALIZER) &&
IsFinalizerThread())
return;
if ((ADValidityKind & ADV_RUNNINGIN) &&
pDomain->IsRunningIn(GetThread()))
return;
if ((ADValidityKind & ADV_REFTAKER) &&
pDomain->IsHeldByRefTaker())
return;
_ASSERTE(!"Appdomain* can be invalid");
}
#endif
//--------------------------------------------------------------------
// Thread construction
//--------------------------------------------------------------------
Thread::Thread()
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
m_pFrame = FRAME_TOP;
m_fPreemptiveGCDisabled = 0;
#ifdef _DEBUG
m_ulForbidTypeLoad = 0;
m_GCOnTransitionsOK = TRUE;
#endif
#ifdef ENABLE_CONTRACTS
m_pClrDebugState = NULL;
m_ulEnablePreemptiveGCCount = 0;
#endif
m_dwLockCount = 0;
m_dwBeginLockCount = 0;
#ifdef _DEBUG
dbg_m_cSuspendedThreads = 0;
dbg_m_cSuspendedThreadsWithoutOSLock = 0;
m_Creater.Clear();
m_dwUnbreakableLockCount = 0;
#endif
m_dwForbidSuspendThread = 0;
// Initialize lock state
m_pHead = &m_embeddedEntry;
m_embeddedEntry.pNext = m_pHead;
m_embeddedEntry.pPrev = m_pHead;
m_embeddedEntry.dwLLockID = 0;
m_embeddedEntry.dwULockID = 0;
m_embeddedEntry.wReaderLevel = 0;
m_pBlockingLock = NULL;
m_alloc_context.init();
m_thAllocContextObj = 0;
m_UserInterrupt = 0;
m_WaitEventLink.m_Next = NULL;
m_WaitEventLink.m_LinkSB.m_pNext = NULL;
m_ThreadHandle = INVALID_HANDLE_VALUE;
m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
m_ThreadHandleForResume = INVALID_HANDLE_VALUE;
m_WeOwnThreadHandle = FALSE;
#ifdef _DEBUG
m_ThreadId = UNINITIALIZED_THREADID;
#endif //_DEBUG
// Initialize this variable to a very different start value for each thread
// Using linear congruential generator from Knuth Vol. 2, p. 102, line 24
dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1;
m_dwHashCodeSeed = dwHashCodeSeed;
m_hijackLock = FALSE;
m_OSThreadId = 0;
m_Priority = INVALID_THREAD_PRIORITY;
m_ExternalRefCount = 1;
m_UnmanagedRefCount = 0;
m_State = TS_Unstarted;
m_StateNC = TSNC_Unknown;
// It can't be a LongWeakHandle because we zero stuff out of the exposed
// object as it is finalized. At that point, calls to GetCurrentThread()
// had better get a new one,!
m_ExposedObject = CreateGlobalShortWeakHandle(NULL);
GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject);
m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL);
GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject);
m_LastThrownObjectHandle = NULL;
m_ltoIsUnhandled = FALSE;
m_AbortReason = NULL;
m_debuggerFilterContext = NULL;
m_debuggerCantStop = 0;
m_fInteropDebuggingHijacked = FALSE;
m_profilerCallbackState = 0;
#ifdef FEATURE_PROFAPI_ATTACH_DETACH
m_dwProfilerEvacuationCounter = 0;
#endif // FEATURE_PROFAPI_ATTACH_DETACH
m_pProfilerFilterContext = NULL;
m_CacheStackBase = 0;
m_CacheStackLimit = 0;
m_CacheStackSufficientExecutionLimit = 0;
m_LastAllowableStackAddress= 0;
m_ProbeLimit = 0;
#ifdef _DEBUG
m_pCleanedStackBase = NULL;
#endif
#ifdef STACK_GUARDS_DEBUG
m_pCurrentStackGuard = NULL;
#endif
#ifdef FEATURE_HIJACK
m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
#ifndef PLATFORM_UNIX
X86_ONLY(m_LastRedirectIP = 0);
X86_ONLY(m_SpinCount = 0);
#endif // PLATFORM_UNIX
#endif // FEATURE_HIJACK
#if defined(_DEBUG) && defined(TRACK_SYNC)
m_pTrackSync = new Dbg_TrackSyncStack;
NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync));
#endif // TRACK_SYNC
m_RequestedStackSize = 0;
m_PreventAsync = 0;
m_PreventAbort = 0;
m_nNestedMarshalingExceptions = 0;
m_pDomain = NULL;
#ifdef FEATURE_COMINTEROP
m_fDisableComObjectEagerCleanup = false;
#endif //FEATURE_COMINTEROP
m_fHasDeadThreadBeenConsideredForGCTrigger = false;
m_TraceCallCount = 0;
m_ThrewControlForThread = 0;
m_OSContext = NULL;
m_ThreadTasks = (ThreadTasks)0;
m_pLoadLimiter= NULL;
m_pLoadingFile = NULL;
// The state and the tasks must be 32-bit aligned for atomicity to be guaranteed.
_ASSERTE((((size_t) &m_State) & 3) == 0);
_ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0);
// Track perf counter for the logical thread object.
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical++);
// On all callbacks, call the trap code, which we now have
// wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more).
if (GCStress<cfg_transition>::IsEnabled())
{
m_State = (ThreadState) (m_State | TS_GCOnTransitions);
}
m_AbortType = EEPolicy::TA_None;
m_AbortInfo = 0;
m_AbortEndTime = MAXULONGLONG;
m_RudeAbortEndTime = MAXULONGLONG;
m_AbortController = 0;
m_AbortRequestLock = 0;
m_fRudeAbortInitiated = FALSE;
m_pIOCompletionContext = NULL;
#ifdef _DEBUG
m_fRudeAborted = FALSE;
m_dwAbortPoint = 0;
#endif
m_OSContext = new CONTEXT();
NewHolder<CONTEXT> contextHolder(m_OSContext);
m_pSavedRedirectContext = NULL;
NewHolder<CONTEXT> savedRedirectContextHolder(m_pSavedRedirectContext);
#ifdef FEATURE_COMINTEROP
m_pRCWStack = new RCWStackHeader();
#endif
#ifdef _DEBUG
m_bGCStressing = FALSE;
m_bUniqueStacking = FALSE;
#endif
m_pPendingTypeLoad = NULL;
#ifdef FEATURE_PREJIT
m_pIBCInfo = NULL;
#endif
m_dwAVInRuntimeImplOkayCount = 0;
#if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) // GCCOVER
m_fPreemptiveGCDisabledForGCStress = false;
#endif
#ifdef _DEBUG
m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
#endif
m_dwHostTaskRefCount = 0;
m_pExceptionDuringStartup = NULL;
#ifdef HAVE_GCCOVER
m_pbDestCode = NULL;
m_pbSrcCode = NULL;
#if defined(GCCOVER_TOLERATE_SPURIOUS_AV)
m_pLastAVAddress = NULL;
#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
#endif // HAVE_GCCOVER
m_fCompletionPortDrained = FALSE;
m_debuggerActivePatchSkipper = NULL;
m_dwThreadHandleBeingUsed = 0;
SetProfilerCallbacksAllowed(TRUE);
m_pCreatingThrowableForException = NULL;
#ifdef _DEBUG
m_dwDisableAbortCheckCount = 0;
#endif // _DEBUG
#ifdef WIN64EXCEPTIONS
m_dwIndexClauseForCatch = 0;
m_sfEstablisherOfActualHandlerFrame.Clear();
#endif // WIN64EXCEPTIONS
m_threadPoolCompletionCount = 0;
Thread *pThread = GetThread();
InitContext();
if (pThread)
{
_ASSERTE(pThread->GetDomain());
// Start off the new thread in the default context of
// the creating thread's appDomain. This could be changed by SetDelegate
SetKickOffDomainId(pThread->GetDomain()->GetId());
} else
SetKickOffDomainId((ADID)DefaultADID);
// Do not expose thread until it is fully constructed
g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId);
//
// DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT.
// NewId() allows this Thread instance to be accessed via a Thread Id. Do not
// add additional construction after this point to prevent the race condition
// of accessing a partially constructed Thread via Thread Id lookup.
//
exposedObjectHolder.SuppressRelease();
strongHndToExposedObjectHolder.SuppressRelease();
#if defined(_DEBUG) && defined(TRACK_SYNC)
trackSyncHolder.SuppressRelease();
#endif
contextHolder.SuppressRelease();
savedRedirectContextHolder.SuppressRelease();
managedThreadCurrentCulture = NULL;
managedThreadCurrentUICulture = NULL;
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
m_ullProcessorUsageBaseline = 0;
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_COMINTEROP
m_uliInitializeSpyCookie.QuadPart = 0ul;
m_fInitializeSpyRegistered = false;
m_pLastSTACtxCookie = NULL;
#endif // FEATURE_COMINTEROP
m_fGCSpecial = FALSE;
m_wCPUGroup = 0;
m_pAffinityMask = 0;
m_pAllLoggedTypes = NULL;
#ifdef FEATURE_PERFTRACING
m_pEventPipeBufferList = NULL;
m_eventWriteInProgress = false;
memset(&m_activityId, 0, sizeof(m_activityId));
#endif // FEATURE_PERFTRACING
m_HijackReturnKind = RT_Illegal;
}
//--------------------------------------------------------------------
// Failable initialization occurs here.
//--------------------------------------------------------------------
BOOL Thread::InitThread(BOOL fInternal)
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
HANDLE hDup = INVALID_HANDLE_VALUE;
BOOL ret = TRUE;
// This message actually serves a purpose (which is why it is always run)
// The Stress log is run during hijacking, when other threads can be suspended
// at arbitrary locations (including when holding a lock that NT uses to serialize
// all memory allocations). By sending a message now, we insure that the stress
// log will not allocate memory at these critical times an avoid deadlock.
STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId());
if ((m_State & TS_WeOwn) == 0)
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads++);
}
else
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical++);
}
#ifndef FEATURE_PAL
// workaround: Remove this when we flow impersonation token to host.
BOOL reverted = FALSE;
HANDLE threadToken = INVALID_HANDLE_VALUE;
#endif // !FEATURE_PAL
if (m_ThreadHandle == INVALID_HANDLE_VALUE)
{
// For WinCE, all clients have the same handle for a thread. Duplication is
// not possible. We make sure we never close this handle unless we created
// the thread (TS_WeOwn).
//
// For Win32, each client has its own handle. This is achieved by duplicating
// the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service
// returns a pseudo-handle which is only useful for duplication. In this case
// each client is responsible for closing its own (duplicated) handle.
//
// We don't bother duplicating if WeOwn, because we created the handle in the
// first place.
// Thread is created when or after the physical thread started running
HANDLE curProcess = ::GetCurrentProcess();
#ifndef FEATURE_PAL
// If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only
// THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include
// THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be
// able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then
// re-impersonate before we leave this routine.
if (!RevertIfImpersonated(&reverted, &threadToken))
{
COMPlusThrowWin32();
}
class EnsureResetThreadToken
{
private:
BOOL m_NeedReset;
HANDLE m_threadToken;
public:
EnsureResetThreadToken(HANDLE threadToken, BOOL reverted)
{
m_threadToken = threadToken;
m_NeedReset = reverted;
}
~EnsureResetThreadToken()
{
UndoRevert(m_NeedReset, m_threadToken);
if (m_threadToken != INVALID_HANDLE_VALUE)
{
CloseHandle(m_threadToken);
}
}
};
EnsureResetThreadToken resetToken(threadToken, reverted);
#endif // !FEATURE_PAL
if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup,
0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS))
{
_ASSERTE(hDup != INVALID_HANDLE_VALUE);
SetThreadHandle(hDup);
m_WeOwnThreadHandle = TRUE;
}
else
{
COMPlusThrowWin32();
}
}
if ((m_State & TS_WeOwn) == 0)
{
if (!AllocHandles())
{
ThrowOutOfMemory();
}
}
_ASSERTE(HasValidThreadHandle());
m_random.Init();
// Set floating point mode to round to nearest
#ifndef FEATURE_PAL
(void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR );
m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
#endif // !FEATURE_PAL
if (m_CacheStackBase == 0)
{
_ASSERTE(m_CacheStackLimit == 0);
_ASSERTE(m_LastAllowableStackAddress == 0);
_ASSERTE(m_ProbeLimit == 0);
ret = SetStackLimits(fAll);
if (ret == FALSE)
{
ThrowOutOfMemory();
}
}
ret = Thread::AllocateIOCompletionContext();
if (!ret)
{
ThrowOutOfMemory();
}
_ASSERTE(ret); // every failure case for ret should throw.
return ret;
}
// Allocate all the handles. When we are kicking of a new thread, we can call
// here before the thread starts running.
BOOL Thread::AllocHandles()
{
WRAPPER_NO_CONTRACT;
_ASSERTE(!m_DebugSuspendEvent.IsValid());
_ASSERTE(!m_EventWait.IsValid());
BOOL fOK = TRUE;
EX_TRY {
// create a manual reset event for getting the thread to a safe point
m_DebugSuspendEvent.CreateManualEvent(FALSE);
m_EventWait.CreateManualEvent(TRUE);
}
EX_CATCH {
fOK = FALSE;
if (!m_DebugSuspendEvent.IsValid()) {
m_DebugSuspendEvent.CloseEvent();
}
if (!m_EventWait.IsValid()) {
m_EventWait.CloseEvent();
}
}
EX_END_CATCH(RethrowTerminalExceptions);
return fOK;
}
//--------------------------------------------------------------------
// This is the alternate path to SetupThread/InitThread. If we created
// an unstarted thread, we have SetupUnstartedThread/HasStarted.
//--------------------------------------------------------------------
BOOL Thread::HasStarted(BOOL bRequiresTSL)
{
CONTRACTL {
NOTHROW;
DISABLED(GC_NOTRIGGER);
SO_TOLERANT;
}
CONTRACTL_END;
// @todo need a probe that tolerates not having a thread setup at all
CONTRACT_VIOLATION(SOToleranceViolation);
_ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
// This is cheating a little. There is a pathway here from SetupThread, but only
// via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in
// preemptive mode, ready for a transition. But in the IJW case, it can return a
// cooperative mode thread. RunDllMain handles this "surprise" correctly.
m_fPreemptiveGCDisabled = TRUE;
// Normally, HasStarted is called from the thread's entrypoint to introduce it to
// the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
// that call into managed code. In that case, the second HasStarted call is
// redundant and should be ignored.
if (GetThread() == this)
return TRUE;
_ASSERTE(GetThread() == 0);
_ASSERTE(HasValidThreadHandle());
BOOL fKeepTLS = FALSE;
BOOL fCanCleanupCOMState = FALSE;
BOOL res = TRUE;
res = SetStackLimits(fAll);
if (res == FALSE)
{
m_pExceptionDuringStartup = Exception::GetOOMException();
goto FAILURE;
}
// If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup
// which will be thrown in Thread.Start as an internal exception
EX_TRY
{
//
// Initialization must happen in the following order - hosts like SQL Server depend on this.
//
CExecutionEngine::SetupTLSForThread(this);
fCanCleanupCOMState = TRUE;
res = PrepareApartmentAndContext();
if (!res)
{
ThrowOutOfMemory();
}
InitThread(FALSE);
if (SetThread(this) == FALSE)
{
ThrowOutOfMemory();
}
if (SetAppDomain(m_pDomain) == FALSE)
{
ThrowOutOfMemory();
}
SetupThreadForHost();
ThreadStore::TransferStartedThread(this, bRequiresTSL);
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
if (g_fEnableARM)
{
QueryThreadProcessorUsage();
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
#ifdef FEATURE_EVENT_TRACE
ETW::ThreadLog::FireThreadCreated(this);
#endif // FEATURE_EVENT_TRACE
}
EX_CATCH
{
if (__pException != NULL)
{
__pException.SuppressRelease();
m_pExceptionDuringStartup = __pException;
}
res = FALSE;
}
EX_END_CATCH(SwallowAllExceptions);
FAILURE:
if (res == FALSE)
{
if (m_fPreemptiveGCDisabled)
{
m_fPreemptiveGCDisabled = FALSE;
}
_ASSERTE (HasThreadState(TS_Unstarted));
SetThreadState(TS_FailStarted);
if (GetThread() != NULL && IsAbortRequested())
UnmarkThreadForAbort(TAR_ALL);
if (!fKeepTLS)
{
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
//
// Undo our call to PrepareApartmentAndContext above, so we don't leak a CoInitialize
// If we're keeping TLS, then the host's call to ExitTask will clean this up instead.
//
if (fCanCleanupCOMState)
{
// The thread pointer in TLS may not be set yet, if we had a failure before we set it.
// So we'll set it up here (we'll unset it a few lines down).
if (SetThread(this) != FALSE)
{
CleanupCOMState();
}
}
#endif
FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
ThreadStore::CheckForEEShutdown();
DecExternalCount(/*holdingLock*/ !bRequiresTSL);
SetThread(NULL);
SetAppDomain(NULL);
}
}
else
{
FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized);
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this
// thread is up and running now.
//
if (CORDebuggerAttached())
{
g_pDebugInterface->ThreadCreated(this);
}
else
{
LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId()));
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is running, let them know about the new thread.
//
// The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more
// information
if (!IsGCSpecial())
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification
{
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadCreated((ThreadID) this);
}
GC_ON_TRANSITIONS(gcOnTransition);
DWORD osThreadId = ::GetCurrentThreadId();
g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
(ThreadID) this, osThreadId);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
// CoreCLR does not support user-requested thread suspension
_ASSERTE(!(m_State & TS_SuspendUnstarted));
}
return res;
}
BOOL Thread::AllocateIOCompletionContext()
{
WRAPPER_NO_CONTRACT;
PIOCompletionContext pIOC = new (nothrow) IOCompletionContext;
if(pIOC != NULL)
{
pIOC->lpOverlapped = NULL;
m_pIOCompletionContext = pIOC;
return TRUE;
}
else
{
return FALSE;
}
}
VOID Thread::FreeIOCompletionContext()
{
WRAPPER_NO_CONTRACT;
if (m_pIOCompletionContext != NULL)
{
PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext;
delete pIOC;
m_pIOCompletionContext = NULL;
}
}
void Thread::HandleThreadStartupFailure()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
_ASSERTE(GetThread() != NULL);
struct ProtectArgs
{
OBJECTREF pThrowable;
OBJECTREF pReason;
} args;
memset(&args, 0, sizeof(ProtectArgs));
GCPROTECT_BEGIN(args);
MethodTable *pMT = MscorlibBinder::GetException(kThreadStartException);
args.pThrowable = AllocateObject(pMT);
MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR);
if (m_pExceptionDuringStartup)
{
args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup);
Exception::Delete(m_pExceptionDuringStartup);
m_pExceptionDuringStartup = NULL;
}
ARG_SLOT args1[] = {
ObjToArgSlot(args.pThrowable),
ObjToArgSlot(args.pReason),
};
exceptionCtor.Call(args1);
GCPROTECT_END(); //Prot
RaiseTheExceptionInternalOnly(args.pThrowable, FALSE);
}
#ifndef FEATURE_PAL
BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken)
{
WRAPPER_NO_CONTRACT;
BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails,
TOKEN_IMPERSONATE, // we are not impersonating. There is no win32
TRUE, // api to figure this out. The only alternative
phToken); // is to use NtCurrentTeb->IsImpersonating().
if (bImpersonated)
{
*bReverted = RevertToSelf();
return *bReverted;
}
return TRUE;
}
void UndoRevert(BOOL bReverted, HANDLE hToken)
{
if (bReverted)
{
if (!SetThreadToken(NULL, hToken))
{
_ASSERT("Undo Revert -> SetThreadToken failed");
STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken);
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
}
}
return;
}
#endif // !FEATURE_PAL
// We don't want ::CreateThread() calls scattered throughout the source. So gather
// them all here.
BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
BOOL bRet;
//This assert is here to prevent a bug in the future
// CreateTask currently takes a DWORD and we will downcast
// if that interface changes to take a SIZE_T this Assert needs to be removed.
//
_ASSERTE(stackSize <= 0xFFFFFFFF);
#ifndef FEATURE_PAL
HandleHolder token;
BOOL bReverted = FALSE;
bRet = RevertIfImpersonated(&bReverted, &token);
if (bRet != TRUE)
return bRet;
#endif // !FEATURE_PAL
m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread);
bRet = CreateNewOSThread(stackSize, start, args);
#ifndef FEATURE_PAL
UndoRevert(bReverted, token);
if (pName != NULL)
SetThreadName(m_ThreadHandle, pName);
#endif // !FEATURE_PAL
return bRet;
}
// This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
// which can significantly impact performance with HyperThreading enabled
DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
{
WRAPPER_NO_CONTRACT;
m_offset_counter++;
if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
m_offset_counter = 0;
(void)_alloca(m_offset_counter * offset_multiplier);
intermediateThreadParam* param = (intermediateThreadParam*)arg;
LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
PVOID args = param->lpArg;
delete param;
return ThreadFcnPtr(args);
}
HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName, DWORD flags, DWORD* pThreadId)
{
LIMITED_METHOD_CONTRACT;
// TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative
// here because this is a last-minute fix.
SIZE_T stackSize;
switch (stackSizeBucket)
{
case StackSize_Small:
stackSize = 256 * 1024;
break;
case StackSize_Medium:
stackSize = 512 * 1024;
break;
default:
_ASSERTE(!"Bad stack size bucket");
case StackSize_Large:
stackSize = 1024 * 1024;
break;
}
flags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
DWORD threadId;
HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId);
#ifndef FEATURE_PAL
SetThreadName(hThread, pName);
#endif // !FEATURE_PAL
if (pThreadId)
*pThreadId = threadId;
return hThread;
}
BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
//
// Let's get the stack sizes from the PE file that started process.
//
static SIZE_T ExeSizeOfStackReserve = 0;
static SIZE_T ExeSizeOfStackCommit = 0;
static BOOL fSizesGot = FALSE;
#ifndef FEATURE_PAL
if (!fSizesGot)
{
HINSTANCE hInst = WszGetModuleHandle(NULL);
_ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process.
EX_TRY
{
PEDecoder pe(hInst);
pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit);
fSizesGot = TRUE;
}
EX_CATCH
{
fSizesGot = FALSE;
}
EX_END_CATCH(SwallowAllExceptions);
}
#endif // !FEATURE_PAL
if (!fSizesGot) {
//return some somewhat-reasonable numbers
if (NULL != reserveSize) *reserveSize = 256*1024;
if (NULL != commitSize) *commitSize = 256*1024;
return FALSE;
}
if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve;
if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit;
return TRUE;
}
BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ourId = 0;
HANDLE h = NULL;
DWORD dwCreationFlags = CREATE_SUSPENDED;
dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
#ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
{
// On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
// a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
sizeToCommitOrReserve = GetOsPageSize() + 1;
}
#endif // !FEATURE_PAL
intermediateThreadParam* lpThreadArgs = new (nothrow) intermediateThreadParam;
if (lpThreadArgs == NULL)
{
return FALSE;
}
NewHolder<intermediateThreadParam> argHolder(lpThreadArgs);
// Make sure we have all our handles, in case someone tries to suspend us
// as we are starting up.
if (!AllocHandles())
{
// OS is out of handles/memory?
return FALSE;
}
lpThreadArgs->lpThreadFunction = start;
lpThreadArgs->lpArg = args;
h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
sizeToCommitOrReserve,
intermediateThreadProc,
lpThreadArgs,
dwCreationFlags,
&ourId);
if (h == NULL)
return FALSE;
argHolder.SuppressRelease();
_ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
SetThreadHandle(h);
m_WeOwnThreadHandle = TRUE;
// Before we do the resume, we need to take note of the new ThreadId. This
// is necessary because -- before the thread starts executing at KickofThread --
// it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
// call into managed code. During the consequent SetupThread, we need to
// perform the Thread::HasStarted call instead of going through the normal
// 'new thread' pathway.
_ASSERTE(GetOSThreadId() == 0);
_ASSERTE(ourId != 0);
m_OSThreadId = ourId;
FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
#ifdef _DEBUG
m_Creater.SetToCurrentThread();
#endif
return TRUE;
}
//
// #threadDestruction
//
// General comments on thread destruction.
//
// The C++ Thread object can survive beyond the time when the Win32 thread has died.
// This is important if an exposed object has been created for this thread. The
// exposed object will survive until it is GC'ed.
//
// A client like an exposed object can place an external reference count on that
// object. We also place a reference count on it when we construct it, and we lose
// that count when the thread finishes doing useful work (OnThreadTerminate).
//
// One way OnThreadTerminate() is called is when the thread finishes doing useful
// work. This case always happens on the correct thread.
//
// The other way OnThreadTerminate() is called is during product shutdown. We do
// a "best effort" to eliminate all threads except the Main thread before shutdown
// happens. But there may be some background threads or external threads still
// running.
//
// When the final reference count disappears, we destruct. Until then, the thread
// remains in the ThreadStore, but is marked as "Dead".
//<TODO>
// @TODO cwb: for a typical shutdown, only background threads are still around.
// Should we interrupt them? What about the non-typical shutdown?</TODO>
int Thread::IncExternalCount()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
Thread *pCurThread = GetThread();
_ASSERTE(m_ExternalRefCount > 0);
int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount);
// If we have an exposed object and the refcount is greater than one
// we must make sure to keep a strong handle to the exposed object
// so that we keep it alive even if nobody has a reference to it.
if (pCurThread && ((*((void**)m_ExposedObject)) != NULL))
{
// The exposed object exists and needs a strong handle so check
// to see if it has one.
// Only a managed thread can setup StrongHnd.
if ((*((void**)m_StrongHndToExposedObject)) == NULL)
{
GCX_COOP();
// Store the object in the strong handle.
StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject));
}
}
return retVal;
}
int Thread::DecExternalCount(BOOL holdingLock)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// Note that it's possible to get here with a NULL current thread (during
// shutdown of the thread manager).
Thread *pCurThread = GetThread();
_ASSERTE (pCurThread == NULL || IsAtProcessExit()
|| (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread))
|| (holdingLock && ThreadStore::HoldingThreadStore(pCurThread)));
BOOL ToggleGC = FALSE;
BOOL SelfDelete = FALSE;
int retVal;
// Must synchronize count and exposed object handle manipulation. We use the
// thread lock for this, which implies that we must be in pre-emptive mode
// to begin with and avoid any activity that would invoke a GC (this
// acquires the thread store lock).
if (pCurThread)
{
// TODO: we would prefer to use a GC Holder here, however it is hard
// to get the case where we're deleting this thread correct given
// the current macros. We want to supress the release of the holder
// here which puts us in Preemptive mode, and also the switch to
// Cooperative mode below, but since both holders will be named
// the same thing (due to the generic nature of the macro) we can
// not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope
// b/c they will both apply simply to the most narrowly scoped
// holder.
ToggleGC = pCurThread->PreemptiveGCDisabled();
if (ToggleGC)
{
pCurThread->EnablePreemptiveGC();
}
}
GCX_ASSERT_PREEMP();
ThreadStoreLockHolder tsLock(!holdingLock);
_ASSERTE(m_ExternalRefCount >= 1);
_ASSERTE(!holdingLock ||
ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
IsAtProcessExit());
retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount);
if (retVal == 0)
{
HANDLE h = GetThreadHandle();
if (h == INVALID_HANDLE_VALUE)
{
h = m_ThreadHandleForClose;
m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
}
// Can not assert like this. We have already removed the Unstarted bit.
//_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE);
if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle)
{
::CloseHandle(h);
SetThreadHandle(INVALID_HANDLE_VALUE);
}
// Switch back to cooperative mode to manipulate the thread.
if (pCurThread)
{
// TODO: we would prefer to use GCX_COOP here, see comment above.
pCurThread->DisablePreemptiveGC();
}
GCX_ASSERT_COOP();
// during process detach the thread might still be in the thread list
// if it hasn't seen its DLL_THREAD_DETACH yet. Use the following
// tweak to decide if the thread has terminated yet.
if (!HasValidThreadHandle())
{
SelfDelete = this == pCurThread;
m_ExceptionState.FreeAllStackTraces();
if (SelfDelete) {
SetThread(NULL);
}
delete this;
}
tsLock.Release();
// It only makes sense to restore the GC mode if we didn't just destroy
// our own thread object.
if (pCurThread && !SelfDelete && !ToggleGC)
{
pCurThread->EnablePreemptiveGC();
}
// Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP
// in the same scope above...
//
// GCX_ASSERT_PREEMP()
return retVal;
}
else if (pCurThread == NULL)
{
// We're in shutdown, too late to be worrying about having a strong
// handle to the exposed thread object, we've already performed our
// final GC.
tsLock.Release();
return retVal;
}
else
{
// Check to see if the external ref count reaches exactly one. If this
// is the case and we have an exposed object then it is that exposed object
// that is holding a reference to us. To make sure that we are not the
// ones keeping the exposed object alive we need to remove the strong
// reference we have to it.
if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL))
{
// Switch back to cooperative mode to manipulate the object.
// Don't want to switch back to COOP until we let go of the lock
// however we are allowed to call StoreObjectInHandle here in preemptive
// mode because we are setting the value to NULL.
CONTRACT_VIOLATION(ModeViolation);
// Clear the handle and leave the lock.
// We do not have to to DisablePreemptiveGC here, because
// we just want to put NULL into a handle.
StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
tsLock.Release();
// Switch back to the initial GC mode.
if (ToggleGC)
{
pCurThread->DisablePreemptiveGC();
}
GCX_ASSERT_COOP();
return retVal;
}
}
tsLock.Release();
// Switch back to the initial GC mode.
if (ToggleGC)
{
pCurThread->DisablePreemptiveGC();
}
return retVal;
}
//--------------------------------------------------------------------
// Destruction. This occurs after the associated native thread
// has died.
//--------------------------------------------------------------------
Thread::~Thread()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// TODO: enable this
//_ASSERTE(GetThread() != this);
_ASSERTE(m_ThrewControlForThread == 0);
// AbortRequest is coupled with TrapReturningThread.
// We should have unmarked the thread for abort.
// !!! Can not assert here. If a thread has no managed code on stack
// !!! we leave the g_TrapReturningThread set so that the thread will be
// !!! aborted if it enters managed code.
//_ASSERTE(!IsAbortRequested());
// We should not have the Thread marked for abort. But if we have
// we need to unmark it so that g_TrapReturningThreads is decremented.
if (IsAbortRequested())
{
UnmarkThreadForAbort(TAR_ALL);
}
#if defined(_DEBUG) && defined(TRACK_SYNC)
_ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0);
delete m_pTrackSync;
#endif // TRACK_SYNC
_ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit());
if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit())
{
WaitEventLink *walk = &m_WaitEventLink;
while (walk->m_Next) {
ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1));
StoreEventToEventStore (walk->m_Next->m_EventWait);
}
m_WaitEventLink.m_Next = NULL;
}
if (m_StateNC & TSNC_ExistInThreadStore) {
BOOL ret;
ret = ThreadStore::RemoveThread(this);
_ASSERTE(ret);
}
#ifdef _DEBUG
m_pFrame = (Frame *)POISONC;
#endif
// Update Perfmon counters.
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical--);
// Current recognized threads are non-runtime threads that are alive and ran under the
// runtime. Check whether this Thread was one of them.
if ((m_State & TS_WeOwn) == 0)
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads--);
}
else
{
COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical--);
}
// Normally we shouldn't get here with a valid thread handle; however if SetupThread
// failed (due to an OOM for example) then we need to CloseHandle the thread
// handle if we own it.
if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE))
{
CloseHandle(GetThreadHandle());
}
if (m_DebugSuspendEvent.IsValid())
{
m_DebugSuspendEvent.CloseEvent();
}
if (m_EventWait.IsValid())
{
m_EventWait.CloseEvent();
}
FreeIOCompletionContext();
if (m_OSContext)
delete m_OSContext;
if (GetSavedRedirectContext())
{
delete GetSavedRedirectContext();
SetSavedRedirectContext(NULL);
}
#ifdef FEATURE_COMINTEROP
if (m_pRCWStack)
delete m_pRCWStack;
#endif
if (m_pExceptionDuringStartup)
{
Exception::Delete (m_pExceptionDuringStartup);
}
ClearContext();
if (!IsAtProcessExit())
{
// Destroy any handles that we're using to hold onto exception objects
SafeSetThrowables(NULL);
DestroyShortWeakHandle(m_ExposedObject);
DestroyStrongHandle(m_StrongHndToExposedObject);
}
g_pThinLockThreadIdDispenser->DisposeId(GetThreadId());
#ifdef FEATURE_PREJIT
if (m_pIBCInfo) {
delete m_pIBCInfo;
}
#endif
#ifdef FEATURE_EVENT_TRACE
// Destruct the thread local type cache for allocation sampling
if(m_pAllLoggedTypes) {
ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes);
}
#endif // FEATURE_EVENT_TRACE
// Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
CrstHolder lock(&g_DeadlockAwareCrst);
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::BaseCoUninitialize()
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(GetThread() == this);
BEGIN_SO_TOLERANT_CODE(this);
// BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
// an exception to leak through to us, we'll catch it.
::CoUninitialize();
END_SO_TOLERANT_CODE;
}// BaseCoUninitialize
#ifdef FEATURE_COMINTEROP
void Thread::BaseWinRTUninitialize()
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(WinRTSupported());
_ASSERTE(GetThread() == this);
_ASSERTE(IsWinRTInitialized());
BEGIN_SO_TOLERANT_CODE(this);
RoUninitialize();
END_SO_TOLERANT_CODE;
}
#endif // FEATURE_COMINTEROP
void Thread::CoUninitialize()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// Running threads might have performed a CoInitialize which must
// now be balanced.
BOOL needsUninitialize = IsCoInitialized()
#ifdef FEATURE_COMINTEROP
|| IsWinRTInitialized()
#endif // FEATURE_COMINTEROP
;
if (!IsAtProcessExit() && needsUninitialize)
{
GCX_PREEMP();
CONTRACT_VIOLATION(ThrowsViolation);
if (IsCoInitialized())
{
BaseCoUninitialize();
FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized);
}
#ifdef FEATURE_COMINTEROP
if (IsWinRTInitialized())
{
_ASSERTE(WinRTSupported());
BaseWinRTUninitialize();
ResetWinRTInitialized();
}
#endif // FEATURE_COMNITEROP
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::CleanupDetachedThreads()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(!ThreadStore::HoldingThreadStore());
ThreadStoreLockHolder threadStoreLockHolder;
Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n");
while (thread != NULL)
{
Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0);
if (thread->IsDetached() && thread->m_UnmanagedRefCount == 0)
{
STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread);
// Unmark that the thread is detached while we have the
// thread store lock. This will ensure that no other
// thread will race in here and try to delete it, too.
FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached);
FastInterlockDecrement(&m_DetachCount);
if (!thread->IsBackground())
FastInterlockDecrement(&m_ActiveDetachCount);
// If the debugger is attached, then we need to unlock the
// thread store before calling OnThreadTerminate. That
// way, we won't be holding the thread store lock if we
// need to block sending a detach thread event.
BOOL debuggerAttached =
#ifdef DEBUGGING_SUPPORTED
CORDebuggerAttached();
#else // !DEBUGGING_SUPPORTED
FALSE;
#endif // !DEBUGGING_SUPPORTED
if (debuggerAttached)
ThreadStore::UnlockThreadStore();
thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE);
#ifdef DEBUGGING_SUPPORTED
if (debuggerAttached)
{
ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
// We remember the next Thread in the thread store
// list before deleting the current one. But we can't
// use that Thread pointer now that we release the
// thread store lock in the middle of the loop. We
// have to start from the beginning of the list every
// time. If two threads T1 and T2 race into
// CleanupDetachedThreads, then T1 will grab the first
// Thread on the list marked for deletion and release
// the lock. T2 will grab the second one on the
// list. T2 may complete destruction of its Thread,
// then T1 might re-acquire the thread store lock and
// try to use the next Thread in the thread store. But
// T2 just deleted that next Thread.
thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
}
else
#endif // DEBUGGING_SUPPORTED
{
thread = next;
}
}
else if (thread->HasThreadState(TS_Finalized))
{
STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread);
thread->ResetThreadState(TS_Finalized);
// We have finalized the managed Thread object. Now it is time to clean up the unmanaged part
thread->DecExternalCount(TRUE);
thread = next;
}
else
{
thread = next;
}
}
s_fCleanFinalizedThread = FALSE;
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
void Thread::CleanupCOMState()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
#ifdef FEATURE_COMINTEROP
if (GetFinalApartment() == Thread::AS_InSTA)
ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
#endif // FEATURE_COMINTEROP
// Running threads might have performed a CoInitialize which must
// now be balanced. However only the thread that called COInitialize can
// call CoUninitialize.
BOOL needsUninitialize = IsCoInitialized()
#ifdef FEATURE_COMINTEROP
|| IsWinRTInitialized()
#endif // FEATURE_COMINTEROP
;
if (needsUninitialize)
{
GCX_PREEMP();
CONTRACT_VIOLATION(ThrowsViolation);
if (IsCoInitialized())
{
BaseCoUninitialize();
ResetCoInitialized();
}
#ifdef FEATURE_COMINTEROP
if (IsWinRTInitialized())
{
_ASSERTE(WinRTSupported());
BaseWinRTUninitialize();
ResetWinRTInitialized();
}
#endif // FEATURE_COMINTEROP
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
// See general comments on thread destruction (code:#threadDestruction) above.
void Thread::OnThreadTerminate(BOOL holdingLock)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// #ReportDeadOnThreadTerminate
// Caller should have put the TS_ReportDead bit on by now.
// We don't want any windows after the exit event but before the thread is marked dead.
// If a debugger attached during such a window (or even took a dump at the exit event),
// then it may not realize the thread is dead.
// So ensure we mark the thread as dead before we send the tool notifications.
// The TS_ReportDead bit will cause the debugger to view this as TS_Dead.
_ASSERTE(HasThreadState(TS_ReportDead));
// Should not use OSThreadId:
// OSThreadId may change for the current thread is the thread is blocked and rescheduled
// by host.
Thread *pCurrentThread = GetThread();
DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0;
DWORD ThisThreadID = GetThreadId();
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// If the currently running thread is the thread that died and it is an STA thread, then we
// need to release all the RCW's in the current context. However, we cannot do this if we
// are in the middle of process detach.
if (!IsAtProcessExit() && this == GetThread())
{
CleanupCOMState();
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (g_fEEShutDown != 0)
{
// We have started shutdown. Not safe to touch CLR state.
return;
}
// We took a count during construction, and we rely on the count being
// non-zero as we terminate the thread here.
_ASSERTE(m_ExternalRefCount > 0);
// The thread is no longer running. It's important that we zero any general OBJECTHANDLE's
// on this Thread object. That's because we need the managed Thread object to be subject to
// GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. If e.g. the
// Thread's AbortReason (which is an arbitrary object) contains transitively a reference back
// to the Thread, then we have an uncollectible cycle. When the thread is executing, nothing
// can be collected anyway. But now that we stop running the cycle concerns us.
//
// It's important that we only use OBJECTHANDLE's that are retrievable while the thread is
// still running. That's what allows us to zero them here with impunity:
{
// No handles to clean up in the m_ExceptionState
_ASSERTE(!m_ExceptionState.IsExceptionInProgress());
GCX_COOP();
// Destroy the LastThrown handle (and anything that violates the above assert).
SafeSetThrowables(NULL);
// Cleaning up the AbortReason is tricky, since the handle is only valid if the ADID is valid
// ...and we can only perform this operation if other threads aren't racing to update these
// values on our thread asynchronously.
ClearAbortReason();
// Free all structures related to thread statics for this thread
DeleteThreadStaticData();
}
if (GCHeapUtilities::IsGCHeapInitialized())
{
// Guaranteed to NOT be a shutdown case, because we tear down the heap before
// we tear down any threads during shutdown.
if (ThisThreadID == CurrentThreadID)
{
GCX_COOP();
GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
m_alloc_context.init();
}
}
// We switch a thread to dead when it has finished doing useful work. But it
// remains in the thread store so long as someone keeps it alive. An exposed
// object will do this (it releases the refcount in its finalizer). If the
// thread is never released, we have another look during product shutdown and
// account for the unreleased refcount of the uncollected exposed object:
if (IsDead())
{
GCX_COOP();
_ASSERTE(IsAtProcessExit());
ClearContext();
if (m_ExposedObject != NULL)
DecExternalCount(holdingLock); // may destruct now
}
else
{
#ifdef DEBUGGING_SUPPORTED
//
// If we're debugging, let the debugger know that this thread is
// gone.
//
// There is a race here where the debugger could have attached after
// we checked (and thus didn't release the lock). In this case,
// we can't call out to the debugger or we risk a deadlock.
//
if (!holdingLock && CORDebuggerAttached())
{
g_pDebugInterface->DetachThread(this);
}
#endif // DEBUGGING_SUPPORTED
#ifdef PROFILING_SUPPORTED
// If a profiler is present, then notify the profiler of thread destroy
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
GCX_PREEMP();
g_profControlBlock.pProfInterface->ThreadDestroyed((ThreadID) this);
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
if (!holdingLock)
{
LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n"));
ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
}
if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
{
// We must be holding the ThreadStore lock in order to clean up alloc context.
// We should never call FixAllocContext during GC.
GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
m_alloc_context.init();
}
FastInterlockOr((ULONG *) &m_State, TS_Dead);
ThreadStore::s_pThreadStore->m_DeadThreadCount++;
ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger();
if (IsUnstarted())
ThreadStore::s_pThreadStore->m_UnstartedThreadCount--;
else
{
if (IsBackground())
ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
}
FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background));
//
// If this thread was told to trip for debugging between the
// sending of the detach event above and the locking of the
// thread store lock, then remove the flag and decrement the
// global trap returning threads count.
//
if (!IsAtProcessExit())
{
// A thread can't die during a GCPending, because the thread store's
// lock is held by the GC thread.
if (m_State & TS_DebugSuspendPending)
UnmarkForSuspension(~TS_DebugSuspendPending);
// CoreCLR does not support user-requested thread suspension
_ASSERTE(!(m_State & TS_UserSuspendPending));
if (CurrentThreadID == ThisThreadID && IsAbortRequested())
{
UnmarkThreadForAbort(Thread::TAR_ALL);
}
}
if (GetThreadHandle() != INVALID_HANDLE_VALUE)
{
if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
{
m_ThreadHandleForClose = GetThreadHandle();
}
SetThreadHandle (INVALID_HANDLE_VALUE);
}
m_OSThreadId = 0;
// If nobody else is holding onto the thread, we may destruct it here:
ULONG oldCount = DecExternalCount(TRUE);
// If we are shutting down the process, we only have one thread active in the
// system. So we can disregard all the reasons that hold this thread alive --
// TLS is about to be reclaimed anyway.
if (IsAtProcessExit())
while (oldCount > 0)
{
oldCount = DecExternalCount(TRUE);
}
// ASSUME THAT THE THREAD IS DELETED, FROM HERE ON
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0);
_ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_BackgroundThreadCount);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_UnstartedThreadCount);
_ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
ThreadStore::s_pThreadStore->m_DeadThreadCount);
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
ThreadStore::CheckForEEShutdown();
if (ThisThreadID == CurrentThreadID)
{
// NULL out the thread block in the tls. We can't do this if we aren't on the
// right thread. But this will only happen during a shutdown. And we've made
// a "best effort" to reduce to a single thread before we begin the shutdown.
SetThread(NULL);
SetAppDomain(NULL);
}
if (!holdingLock)
{
LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n"));
ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID);
}
}
}
// Helper functions to check for duplicate handles. we only do this check if
// a waitfor multiple fails.
int __cdecl compareHandles( const void *arg1, const void *arg2 )
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
HANDLE h1 = *(HANDLE*)arg1;
HANDLE h2 = *(HANDLE*)arg2;
return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1);
}
BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
qsort(handles,countHandles,sizeof(HANDLE),compareHandles);
for (int i=1; i < countHandles; i++)
{
if (handles[i-1] == handles[i])
return TRUE;
}
return FALSE;
}
//--------------------------------------------------------------------
// Based on whether this thread has a message pump, do the appropriate
// style of Wait.
//--------------------------------------------------------------------
DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
DWORD millis, WaitMode mode, PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
int countHandles;
HANDLE *handles;
BOOL waitAll;
DWORD millis;
WaitMode mode;
DWORD dwRet;
} param;
param.pThis = this;
param.countHandles = countHandles;
param.handles = handles;
param.waitAll = waitAll;
param.millis = millis;
param.mode = mode;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() &&
param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (param.dwRet != WAIT_IO_COMPLETION);
}
EE_END_FINALLY;
return(param.dwRet);
}
DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args,
DWORD millis, WaitMode mode,
PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
AppropriateWaitFunc func;
void *args;
DWORD millis;
WaitMode mode;
DWORD dwRet;
} param;
param.pThis = this;
param.func = func;
param.args = args;
param.millis = millis;
param.mode = mode;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
}
EE_END_FINALLY;
return(param.dwRet);
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
//--------------------------------------------------------------------
// helper to do message wait
//--------------------------------------------------------------------
DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable)
{
STATIC_CONTRACT_THROWS;
// The true contract for GC trigger should be the following. But this puts a very strong restriction
// on contract for functions that call EnablePreemptiveGC.
//if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_GC_TRIGGERS;
DWORD flags = 0;
DWORD dwReturn=WAIT_ABANDONED;
Thread* pThread = GetThread();
// If pThread is NULL, we'd better shut down.
if (pThread == NULL)
_ASSERTE (g_fEEShutDown);
DWORD lastError = 0;
BEGIN_SO_TOLERANT_CODE(pThread);
// If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
// only be satisfied if a message arrives while the handles are signalled. If we
// want true WAIT_ALL, we need to fire up a different thread in the MTA and wait
// on his result. This isn't implemented yet.
//
// A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll
// in an STA with more than one handle.
if (bWaitAll)
{
if (numWaiters == 1)
bWaitAll = FALSE;
// The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative,
// is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT.
// So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point.
// We can't fix this, because it's a breaking change, so we just won't assert here.
// The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above.
}
if (bWaitAll)
flags |= COWAIT_WAITALL;
if (bAlertable)
flags |= COWAIT_ALERTABLE;
HRESULT hr = S_OK;
hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn);
if (hr == RPC_S_CALLPENDING)
{
dwReturn = WAIT_TIMEOUT;
}
else if (FAILED(hr))
{
// The service behaves differently on an STA vs. MTA in how much
// error information it propagates back, and in which form. We currently
// only get here in the STA case, so bias this logic that way.
dwReturn = WAIT_FAILED;
}
else
{
dwReturn += WAIT_OBJECT_0; // success -- bias back
}
lastError = ::GetLastError();
END_SO_TOLERANT_CODE;
// END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
::SetLastError(lastError);
return dwReturn;
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
{
STATIC_CONTRACT_SO_INTOLERANT;
DWORD dwRet = WAIT_FAILED;
DWORD lastError = 0;
BEGIN_SO_TOLERANT_CODE (GetThread ());
dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
lastError = ::GetLastError();
END_SO_TOLERANT_CODE;
// END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
::SetLastError(lastError);
return dwRet;
}
//--------------------------------------------------------------------
// Do appropriate wait based on apartment state (STA or MTA)
DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
DWORD timeout, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
SO_INTOLERANT;
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable) != 0;
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (alertable && !GetDomain()->MustForceTrivialWaitOperations())
{
ApartmentState as = GetFinalApartment();
if (AS_InMTA != as)
{
return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable);
}
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
}
// A helper called by our two flavors of DoAppropriateWaitWorker
void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
// If thread abort is prevented, we do not want this thread to see thread abort and thread interrupt exception.
if (IsAbortPrevented())
{
return;
}
// A word about ordering for Interrupt. If someone tries to interrupt a thread
// that's in the interruptible state, we queue an APC. But if they try to interrupt
// a thread that's not in the interruptible state, we just record that fact. So
// we have to set TS_Interruptible before we test to see whether someone wants to
// interrupt us or else we have a race condition that causes us to skip the APC.
FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
if (HasThreadStateNC(TSNC_InRestoringSyncBlock))
{
// The thread is restoring SyncBlock for Object.Wait.
ResetThreadStateNC(TSNC_InRestoringSyncBlock);
}
else
{
HandleThreadInterrupt((mode & WaitMode_ADUnload) != 0);
// Safe to clear the interrupted state, no APC could have fired since we
// reset m_UserInterrupt (which inhibits our APC callback from doing
// anything).
FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
}
}
void MarkOSAlertableWait()
{
LIMITED_METHOD_CONTRACT;
GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait);
}
void UnMarkOSAlertableWait()
{
LIMITED_METHOD_CONTRACT;
GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait);
}
//--------------------------------------------------------------------
// Based on whether this thread has a message pump, do the appropriate
// style of Wait.
//--------------------------------------------------------------------
DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
DWORD millis, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ret = 0;
BOOL alertable = (mode & WaitMode_Alertable) != 0;
// Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx.
// So if we defer to a sync ctx, we will lose any extra bits. We must therefore not
// defer to a sync ctx if doing any non-default wait.
// If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx
// which will make mode != WaitMode_Alertable.
BOOL ignoreSyncCtx = (mode != WaitMode_Alertable);
if (GetDomain()->MustForceTrivialWaitOperations())
ignoreSyncCtx = TRUE;
// Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization
// context on the current thread and if there is, dispatch to it to do the wait.
// If the wait is non alertable we cannot forward the call to the sync context
// since fundamental parts of the system (such as the GC) rely on non alertable
// waits not running any managed code. Also if we are past the point in shutdown were we
// are allowed to run managed code then we can't forward the call to the sync context.
if (!ignoreSyncCtx && alertable && CanRunManagedCode(LoaderLockCheck::None)
&& !HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
{
GCX_COOP();
BOOL fSyncCtxPresent = FALSE;
OBJECTREF SyncCtxObj = NULL;
GCPROTECT_BEGIN(SyncCtxObj)
{
GetSynchronizationContext(&SyncCtxObj);
if (SyncCtxObj != NULL)
{
SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj;
if (syncRef->IsWaitNotificationRequired())
{
fSyncCtxPresent = TRUE;
ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis);
}
}
}
GCPROTECT_END();
if (fSyncCtxPresent)
return ret;
}
// Before going to pre-emptive mode the thread needs to be flagged as waiting for
// the debugger. This used to be accomplished by the TS_Interruptible flag but that
// doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
// COOP mode so we set the bit before the transition. For the calls that are already
// in pre-emptive mode those are still buggy. This is only a partial fix.
BOOL isCoop = PreemptiveGCDisabled();
ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
GCX_PREEMP();
if (alertable)
{
DoAppropriateWaitWorkerAlertableHelper(mode);
}
StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0, dwEnd;
retry:
if (millis != INFINITE)
{
dwStart = CLRGetTickCount64();
}
ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode);
if (ret == WAIT_IO_COMPLETION)
{
_ASSERTE (alertable);
if (m_State & TS_Interrupted)
{
HandleThreadInterrupt(mode & WaitMode_ADUnload);
}
// We could be woken by some spurious APC or an EE APC queued to
// interrupt us. In the latter case the TS_Interrupted bit will be set
// in the thread state bits. Otherwise we just go back to sleep again.
if (millis != INFINITE)
{
dwEnd = CLRGetTickCount64();
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
_ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) ||
(ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) ||
(ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED));
// countHandles is used as an unsigned -- it should never be negative.
_ASSERTE(countHandles >= 0);
// We support precisely one WAIT_FAILED case, where we attempt to wait on a
// thread handle and the thread is in the process of dying we might get a
// invalid handle substatus. Turn this into a successful wait.
// There are three cases to consider:
// 1) Only waiting on one handle: return success right away.
// 2) Waiting for all handles to be signalled: retry the wait without the
// affected handle.
// 3) Waiting for one of multiple handles to be signalled: return with the
// first handle that is either signalled or has become invalid.
if (ret == WAIT_FAILED)
{
DWORD errorCode = ::GetLastError();
if (errorCode == ERROR_INVALID_PARAMETER)
{
if (CheckForDuplicateHandles(countHandles, handles))
COMPlusThrow(kDuplicateWaitObjectException);
else
COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode));
}
else if (errorCode == ERROR_ACCESS_DENIED)
{
// A Win32 ACL could prevent us from waiting on the handle.
COMPlusThrow(kUnauthorizedAccessException);
}
else if (errorCode == ERROR_NOT_ENOUGH_MEMORY)
{
ThrowOutOfMemory();
}
#ifdef FEATURE_PAL
else if (errorCode == ERROR_NOT_SUPPORTED)
{
// "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync
// object is included in the array
COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll"));
}
#endif
else if (errorCode != ERROR_INVALID_HANDLE)
{
ThrowWin32(errorCode);
}
if (countHandles == 1)
ret = WAIT_OBJECT_0;
else if (waitAll)
{
// Probe all handles with a timeout of zero. When we find one that's
// invalid, move it out of the list and retry the wait.
for (int i = 0; i < countHandles; i++)
{
// WaitForSingleObject won't pump memssage; we already probe enough space
// before calling this function and we don't want to fail here, so we don't
// do a transition to tolerant code here
DWORD subRet = WaitForSingleObject (handles[i], 0);
if (subRet != WAIT_FAILED)
continue;
_ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE);
if ((countHandles - i - 1) > 0)
memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE));
countHandles--;
break;
}
// Compute the new timeout value by assume that the timeout
// is not large enough for more than one wrap
dwEnd = CLRGetTickCount64();
if (millis != INFINITE)
{
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
else
{
// Probe all handles with a timeout as zero, succeed with the first
// handle that doesn't timeout.
ret = WAIT_OBJECT_0;
int i;
for (i = 0; i < countHandles; i++)
{
TryAgain:
// WaitForSingleObject won't pump memssage; we already probe enough space
// before calling this function and we don't want to fail here, so we don't
// do a transition to tolerant code here
DWORD subRet = WaitForSingleObject (handles[i], 0);
if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED))
break;
if (subRet == WAIT_ABANDONED)
{
ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED;
break;
}
// If we get alerted it just masks the real state of the current
// handle, so retry the wait.
if (subRet == WAIT_IO_COMPLETION)
goto TryAgain;
_ASSERTE(subRet == WAIT_TIMEOUT);
ret++;
}
}
}
WaitCompleted:
_ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
return ret;
}
DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
DWORD millis, WaitMode mode)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
BOOL alertable = (mode & WaitMode_Alertable)!=0;
// Before going to pre-emptive mode the thread needs to be flagged as waiting for
// the debugger. This used to be accomplished by the TS_Interruptible flag but that
// doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
// COOP mode so we set the bit before the transition. For the calls that are already
// in pre-emptive mode those are still buggy. This is only a partial fix.
BOOL isCoop = PreemptiveGCDisabled();
ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
GCX_PREEMP();
// <TODO>
// @TODO cwb: we don't know whether a thread has a message pump or
// how to pump its messages, currently.
// @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until
// we get alertable waits on that platform.</TODO>
DWORD ret;
if(alertable)
{
DoAppropriateWaitWorkerAlertableHelper(mode);
}
DWORD option;
if (alertable)
{
option = WAIT_ALERTABLE;
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
ApartmentState as = GetFinalApartment();
if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations())
{
option |= WAIT_MSGPUMP;
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
}
else
{
option = 0;
}
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0;
ULONGLONG dwEnd;
retry:
if (millis != INFINITE)
{
dwStart = CLRGetTickCount64();
}
ret = func(args, millis, option);
if (ret == WAIT_IO_COMPLETION)
{
_ASSERTE (alertable);
if ((m_State & TS_Interrupted))
{
HandleThreadInterrupt(mode & WaitMode_ADUnload);
}
if (millis != INFINITE)
{
dwEnd = CLRGetTickCount64();
if (dwEnd >= dwStart + millis)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
}
goto retry;
}
WaitCompleted:
_ASSERTE(ret == WAIT_OBJECT_0 ||
ret == WAIT_ABANDONED ||
ret == WAIT_TIMEOUT ||
ret == WAIT_FAILED);
_ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
return ret;
}
//--------------------------------------------------------------------
// Only one style of wait for DoSignalAndWait since we don't support this on STA Threads
//--------------------------------------------------------------------
DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
_ASSERTE(alertable || syncState == 0);
struct Param
{
Thread *pThis;
HANDLE *handles;
DWORD millis;
BOOL alertable;
DWORD dwRet;
} param;
param.pThis = this;
param.handles = handles;
param.millis = millis;
param.alertable = alertable;
param.dwRet = (DWORD) -1;
EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable);
}
EE_FINALLY {
if (syncState) {
if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
// This thread has been removed from syncblk waiting list by the signalling thread
syncState->Restore(FALSE);
}
else
syncState->Restore(TRUE);
}
_ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
}
EE_END_FINALLY;
return(param.dwRet);
}
DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
DWORD ret = 0;
GCX_PREEMP();
if(alertable)
{
DoAppropriateWaitWorkerAlertableHelper(WaitMode_None);
}
StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
ULONGLONG dwStart = 0, dwEnd;
if (INFINITE != millis)
{
dwStart = CLRGetTickCount64();
}
ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable);
retry:
if (WAIT_IO_COMPLETION == ret)
{
_ASSERTE (alertable);
// We could be woken by some spurious APC or an EE APC queued to
// interrupt us. In the latter case the TS_Interrupted bit will be set
// in the thread state bits. Otherwise we just go back to sleep again.
if ((m_State & TS_Interrupted))
{
HandleThreadInterrupt(FALSE);
}
if (INFINITE != millis)
{
dwEnd = CLRGetTickCount64();
if (dwStart + millis <= dwEnd)
{
ret = WAIT_TIMEOUT;
goto WaitCompleted;
}
else
{
millis -= (DWORD)(dwEnd - dwStart);
}
dwStart = CLRGetTickCount64();
}
//Retry case we don't want to signal again so only do the wait...
ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE);
goto retry;
}
if (WAIT_FAILED == ret)
{
DWORD errorCode = ::GetLastError();
//If the handle to signal is a mutex and
// the calling thread is not the owner, errorCode is ERROR_NOT_OWNER
switch(errorCode)
{
case ERROR_INVALID_HANDLE:
case ERROR_NOT_OWNER:
case ERROR_ACCESS_DENIED:
COMPlusThrowWin32();
break;
case ERROR_TOO_MANY_POSTS:
ret = ERROR_TOO_MANY_POSTS;
break;
default:
CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode));
COMPlusThrowWin32();
break;
}
}
WaitCompleted:
//Check that the return state is valid
_ASSERTE(WAIT_OBJECT_0 == ret ||
WAIT_ABANDONED == ret ||
WAIT_TIMEOUT == ret ||
WAIT_FAILED == ret ||
ERROR_TOO_MANY_POSTS == ret);
//Wrong to time out if the wait was infinite
_ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis));
return ret;
}
DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(handles));
PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj));
}
CONTRACTL_END;
MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER);
BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles);
memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE));
ARG_SLOT args[6] =
{
ObjToArgSlot(*pSyncCtxObj),
ObjToArgSlot(handleArrayObj),
BoolToArgSlot(waitAll),
(ARG_SLOT)millis,
};
// Needed by TriggerGCForMDAInternal to avoid infinite recursion
ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait);
return invokeWaitMethodHelper.Call_RetI4(args);
}
// Called out of SyncBlock::Wait() to block this thread until the Notify occurs.
BOOL Thread::Block(INT32 timeOut, PendingSync *syncState)
{
WRAPPER_NO_CONTRACT;
_ASSERTE(this == GetThread());
// Before calling Block, the SyncBlock queued us onto it's list of waiting threads.
// However, before calling Block the SyncBlock temporarily left the synchronized
// region. This allowed threads to enter the region and call Notify, in which
// case we may have been signalled before we entered the Wait. So we aren't in the
// m_WaitSB list any longer. Not a problem: the following Wait will return
// immediately. But it means we cannot enforce the following assertion:
// _ASSERTE(m_WaitSB != NULL);
return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0);
}
// Return whether or not a timeout occurred. TRUE=>we waited successfully
DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo)
{
WRAPPER_NO_CONTRACT;
DWORD dwResult;
DWORD dwTimeOut32;
_ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
? INFINITE
: (DWORD) timeOut);
dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32,
WaitMode_Alertable /*alertable*/,
syncInfo);
// Either we succeeded in the wait, or we timed out
_ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) ||
(dwResult == WAIT_TIMEOUT));
return dwResult;
}
// Return whether or not a timeout occurred. TRUE=>we waited successfully
DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo)
{
WRAPPER_NO_CONTRACT;
DWORD dwResult;
DWORD dwTimeOut32;
_ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
? INFINITE
: (DWORD) timeOut);
dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo);
// Either we succeeded in the wait, or we timed out
_ASSERTE((dwResult == WAIT_OBJECT_0) ||
(dwResult == WAIT_TIMEOUT));
return dwResult;
}
void Thread::Wake(SyncBlock *psb)
{
WRAPPER_NO_CONTRACT;
CLREvent* hEvent = NULL;
WaitEventLink *walk = &m_WaitEventLink;
while (walk->m_Next) {
if (walk->m_Next->m_WaitSB == psb) {
hEvent = walk->m_Next->m_EventWait;
// We are guaranteed that only one thread can change walk->m_Next->m_WaitSB
// since the thread is helding the syncblock.
walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1);
break;
}
#ifdef _DEBUG
else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) {
_ASSERTE (!"Can not wake a thread on the same SyncBlock more than once");
}
#endif
}
PREFIX_ASSUME (hEvent != NULL);
hEvent->Set();
}
#define WAIT_INTERRUPT_THREADABORT 0x1
#define WAIT_INTERRUPT_INTERRUPT 0x2
#define WAIT_INTERRUPT_OTHEREXCEPTION 0x4
// When we restore
DWORD EnterMonitorForRestore(SyncBlock *pSB)
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
DWORD state = 0;
EX_TRY
{
pSB->EnterMonitor();
}
EX_CATCH
{
// Assume it is a normal exception unless proven.
state = WAIT_INTERRUPT_OTHEREXCEPTION;
Thread *pThread = GetThread();
if (pThread->IsAbortInitiated())
{
state = WAIT_INTERRUPT_THREADABORT;
}
else if (__pException != NULL)
{
if (__pException->GetHR() == COR_E_THREADINTERRUPTED)
{
state = WAIT_INTERRUPT_INTERRUPT;
}
}
}
EX_END_CATCH(SwallowAllExceptions);
return state;
}
// This is the service that backs us out of a wait that we interrupted. We must
// re-enter the monitor to the same extent the SyncBlock would, if we returned
// through it (instead of throwing through it). And we need to cancel the wait,
// if it didn't get notified away while we are processing the interrupt.
void PendingSync::Restore(BOOL bRemoveFromSB)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(m_EnterCount);
Thread *pCurThread = GetThread();
_ASSERTE (pCurThread == m_OwnerThread);
WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next;
pRealWaitEventLink->m_RefCount --;
if (pRealWaitEventLink->m_RefCount == 0)
{
if (bRemoveFromSB) {
ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB);
}
if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) {
// Put the event back to the pool.
StoreEventToEventStore(pRealWaitEventLink->m_EventWait);
}
// Remove from the link.
m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next;
}
// Someone up the stack is responsible for keeping the syncblock alive by protecting
// the object that owns it. But this relies on assertions that EnterMonitor is only
// called in cooperative mode. Even though we are safe in preemptive, do the
// switch.
GCX_COOP_THREAD_EXISTS(pCurThread);
// We need to make sure that EnterMonitor succeeds. We may have code like
// lock (a)
// {
// a.Wait
// }
// We need to make sure that the finally from lock is excuted with the lock owned.
DWORD state = 0;
SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1);
for (LONG i=0; i < m_EnterCount;)
{
if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0)
{
// If the thread has been interrupted by Thread.Interrupt or Thread.Abort,
// disable the check at the beginning of DoAppropriateWait
pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
}
DWORD result = EnterMonitorForRestore(psb);
if (result == 0)
{
i++;
}
else
{
// We block the thread until the thread acquires the lock.
// This is to make sure that when catch/finally is executed, the thread has the lock.
// We do not want thread to run its catch/finally if the lock is not taken.
state |= result;
// If the thread is being rudely aborted, and the thread has
// no Cer on stack, we will not run managed code to release the
// lock, so we can terminate the loop.
if (pCurThread->IsRudeAbortInitiated() &&
!pCurThread->IsExecutingWithinCer())
{
break;
}
}
}
pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
if ((state & WAIT_INTERRUPT_THREADABORT) != 0)
{
pCurThread->HandleThreadAbort();
}
else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0)
{
COMPlusThrow(kThreadInterruptedException);
}
}
// This is the callback from the OS, when we queue an APC to interrupt a waiting thread.
// The callback occurs on the thread we wish to interrupt. It is a STATIC method.
void WINAPI Thread::UserInterruptAPC(ULONG_PTR data)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
_ASSERTE(data == APC_Code);
Thread *pCurThread = GetThread();
if (pCurThread)
{
// We should only take action if an interrupt is currently being
// requested (our synchronization does not guarantee that we won't fire
// spuriously). It's safe to check the m_UserInterrupt field and then
// set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is
// only cleared in this thread's context (though it may be set from any
// context).
if (pCurThread->IsUserInterrupted())
{
// Set bit to indicate this routine was called (as opposed to other
// generic APCs).
FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted);
}
}
}
// This is the workhorse for Thread.Interrupt().
void Thread::UserInterrupt(ThreadInterruptMode mode)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
FastInterlockOr((DWORD*)&m_UserInterrupt, mode);
if (HasValidThreadHandle() &&
HasThreadState (TS_Interruptible))
{
Alert();
}
}
// Implementation of Thread.Sleep().
void Thread::UserSleep(INT32 time)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger()));
DWORD res;
// Before going to pre-emptive mode the thread needs to be flagged as waiting for
// the debugger. This used to be accomplished by the TS_Interruptible flag but that
// doesn't work reliably, see DevDiv Bugs 699245.
ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin);
GCX_PREEMP();
// A word about ordering for Interrupt. If someone tries to interrupt a thread
// that's in the interruptible state, we queue an APC. But if they try to interrupt
// a thread that's not in the interruptible state, we just record that fact. So
// we have to set TS_Interruptible before we test to see whether someone wants to
// interrupt us or else we have a race condition that causes us to skip the APC.
FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
// If someone has interrupted us, we should not enter the wait.
if (IsUserInterrupted())
{
HandleThreadInterrupt(FALSE);
}
ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted);
FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
DWORD dwTime = (DWORD)time;
retry:
ULONGLONG start = CLRGetTickCount64();
res = ClrSleepEx (dwTime, TRUE);
if (res == WAIT_IO_COMPLETION)
{
// We could be woken by some spurious APC or an EE APC queued to
// interrupt us. In the latter case the TS_Interrupted bit will be set
// in the thread state bits. Otherwise we just go back to sleep again.
if ((m_State & TS_Interrupted))
{
HandleThreadInterrupt(FALSE);
}
if (dwTime == INFINITE)
{
goto retry;
}
else
{
ULONGLONG actDuration = CLRGetTickCount64() - start;
if (dwTime > actDuration)
{
dwTime -= (DWORD)actDuration;
goto retry;
}
else
{
res = WAIT_TIMEOUT;
}
}
}
_ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0);
}
// Correspondence between an EE Thread and an exposed System.Thread:
OBJECTREF Thread::GetExposedObject()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
TRIGGERSGC();
Thread *pCurThread = GetThread();
_ASSERTE (!(pCurThread == NULL || IsAtProcessExit()));
_ASSERTE(pCurThread->PreemptiveGCDisabled());
if (ObjectFromHandle(m_ExposedObject) == NULL)
{
// Allocate the exposed thread object.
THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass);
GCPROTECT_BEGIN(attempt);
// The exposed object keeps us alive until it is GC'ed. This
// doesn't mean the physical thread continues to run, of course.
// We have to set this outside of the ThreadStore lock, because this might trigger a GC.
attempt->SetInternal(this);
BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread));
// Take a lock to make sure that only one thread creates the object.
ThreadStoreLockHolder tsHolder(fNeedThreadStore);
// Check to see if another thread has not already created the exposed object.
if (ObjectFromHandle(m_ExposedObject) == NULL)
{
// Keep a weak reference to the exposed object.
StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt);
ObjectInHandleHolder exposedHolder(m_ExposedObject);
// Increase the external ref count. We can't call IncExternalCount because we
// already hold the thread lock and IncExternalCount won't be able to take it.
ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount);
// Check to see if we need to store a strong pointer to the object.
if (retVal > 1)
StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt);
ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject);
attempt->SetManagedThreadId(GetThreadId());
// Note that we are NOT calling the constructor on the Thread. That's
// because this is an internal create where we don't want a Start
// address. And we don't want to expose such a constructor for our
// customers to accidentally call. The following is in lieu of a true
// constructor:
attempt->InitExisting();
exposedHolder.SuppressRelease();
strongHolder.SuppressRelease();
}
else
{
attempt->ClearInternal();
}
GCPROTECT_END();
}
return ObjectFromHandle(m_ExposedObject);
}
// We only set non NULL exposed objects for unstarted threads that haven't exited
// their constructor yet. So there are no race conditions.
void Thread::SetExposedObject(OBJECTREF exposed)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
if (exposed != NULL)
{
_ASSERTE (GetThread() != this);
_ASSERTE(IsUnstarted());
_ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL);
// The exposed object keeps us alive until it is GC'ed. This doesn't mean the
// physical thread continues to run, of course.
StoreObjectInHandle(m_ExposedObject, exposed);
// This makes sure the contexts on the backing thread
// and the managed thread start off in sync with each other.
// BEWARE: the IncExternalCount call below may cause GC to happen.
// IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain.
// If the creating thread is killed before the target thread is killed in Thread.Start, Thread object
// will be kept alive forever.
// Instead, IncExternalCount should be called after the target thread has been started in Thread.Start.
// IncExternalCount();
}
else
{
// Simply set both of the handles to NULL. The GC of the old exposed thread
// object will take care of decrementing the external ref count.
StoreObjectInHandle(m_ExposedObject, NULL);
StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
}
}
void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled)
{
CONTRACTL
{
if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable));
// you can't have a NULL unhandled exception
_ASSERTE(!(throwable == NULL && isUnhandled));
if (m_LastThrownObjectHandle != NULL)
{
// We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of
// these handles... they'll be destroyed when the Runtime shuts down.
if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
{
DestroyHandle(m_LastThrownObjectHandle);
}
m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make
// a new handle below.
}
if (throwable != NULL)
{
_ASSERTE(this == GetThread());
// Non-compliant exceptions are always wrapped.
// The use of the ExceptionNative:: helper here (rather than the global ::IsException helper)
// is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT.
_ASSERTE(IsException(throwable->GetMethodTable()));
// If we're tracking one of the preallocated exception objects, then just use the global handle that
// matches it rather than creating a new one.
if (CLRException::IsPreallocatedExceptionObject(throwable))
{
m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable);
}
else
{
BEGIN_SO_INTOLERANT_CODE(GetThread());
{
m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
}
END_SO_INTOLERANT_CODE;
}
_ASSERTE(m_LastThrownObjectHandle != NULL);
m_ltoIsUnhandled = isUnhandled;
}
else
{
m_ltoIsUnhandled = FALSE;
}
}
void Thread::SetSOForLastThrownObject()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
// If we are saving stack overflow exception, we can just null out the current handle.
// The current domain is going to be unloaded or the process is going to be killed, so
// we will not leak a handle.
m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle();
}
//
// This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create
// the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception
// instead.
//
OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
// We return the original throwable if nothing goes wrong.
OBJECTREF ret = throwable;
EX_TRY
{
// Try to set the throwable.
SetLastThrownObject(throwable);
}
EX_CATCH
{
// If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that
// object instead of the original throwable.
ret = CLRException::GetPreallocatedOutOfMemoryException();
SetLastThrownObject(ret);
}
EX_END_CATCH(SwallowAllExceptions);
return ret;
}
//
// This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not
// being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory
// exception instead. It also updates the last thrown object, which is always updated when the throwable is
// updated.
//
OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags),
BOOL isUnhandled)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
// We return the original throwable if nothing goes wrong.
OBJECTREF ret = throwable;
EX_TRY
{
// Try to set the throwable.
SetThrowable(throwable DEBUG_ARG(stecFlags));
// Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw
// the right object when we rethrow.
if (LastThrownObject() != throwable)
{
SetLastThrownObject(throwable);
}
if (isUnhandled)
{
MarkLastThrownObjectUnhandled();
}
}
EX_CATCH
{
// If either set didn't work, then set both throwables to the preallocated OOM exception, and return that
// object instead of the original throwable.
ret = CLRException::GetPreallocatedOutOfMemoryException();
// Neither of these will throw because we're setting with a preallocated exception.
SetThrowable(ret DEBUG_ARG(stecFlags));
SetLastThrownObject(ret, isUnhandled);
}
EX_END_CATCH(SwallowAllExceptions);
return ret;
}
// This method will sync the managed exception state to be in sync with the topmost active exception
// for a given thread
void Thread::SyncManagedExceptionState(bool fIsDebuggerThread)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
}
CONTRACTL_END;
{
GCX_COOP();
// Syncup the LastThrownObject on the managed thread
SafeUpdateLastThrownObject();
}
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
// Since the catch clause has successfully executed and we are exiting it, reset the corruption severity
// in the ThreadExceptionState for the last active exception. This will ensure that when the next exception
// gets thrown/raised, EH tracker wont pick up an invalid value.
if (!fIsDebuggerThread)
{
CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler(this);
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
}
void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
if (m_LastThrownObjectHandle != NULL &&
!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
{
DestroyHandle(m_LastThrownObjectHandle);
}
m_LastThrownObjectHandle = h;
}
//
// Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the
// last thrown object and the current throwable have handles that are in the same app domain.
//
void Thread::SafeUpdateLastThrownObject(void)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_INTOLERANT;
}
CONTRACTL_END;
OBJECTHANDLE hThrowable = GetThrowableAsHandle();
if (hThrowable != NULL)
{
EX_TRY
{
IGCHandleManager *pHandleTable = GCHandleUtilities::GetGCHandleManager();
// Creating a duplicate handle here ensures that the AD of the last thrown object
// matches the domain of the current throwable.
OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable);
SetLastThrownObjectHandle(duplicateHandle);
}
EX_CATCH
{
// If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception.
SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException());
}
EX_END_CATCH(SwallowAllExceptions);
}
}
// Background threads must be counted, because the EE should shut down when the
// last non-background thread terminates. But we only count running ones.
void Thread::SetBackground(BOOL isBack, BOOL bRequiresTSL)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
// booleanize IsBackground() which just returns bits
if (isBack == !!IsBackground())
return;
LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n"));
ThreadStoreLockHolder TSLockHolder(FALSE);
if (bRequiresTSL)
{
TSLockHolder.Acquire();
}
if (IsDead())
{
// This can only happen in a race condition, where the correct thing to do
// is ignore it. If it happens without the race condition, we throw an
// exception.
}
else
if (isBack)
{
if (!IsBackground())
{
FastInterlockOr((ULONG *) &m_State, TS_Background);
// unstarted threads don't contribute to the background count
if (!IsUnstarted())
ThreadStore::s_pThreadStore->m_BackgroundThreadCount++;
// If we put the main thread into a wait, until only background threads exist,
// then we make that
// main thread a background thread. This cleanly handles the case where it
// may or may not be one as it enters the wait.
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
ThreadStore::CheckForEEShutdown();
}
}
else
{
if (IsBackground())
{
FastInterlockAnd((ULONG *) &m_State, ~TS_Background);
// unstarted threads don't contribute to the background count
if (!IsUnstarted())
ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
_ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
_ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <=
ThreadStore::s_pThreadStore->m_ThreadCount);
}
}
if (bRequiresTSL)
{
TSLockHolder.Release();
}
}
#ifdef FEATURE_COMINTEROP
class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy>
{
public:
HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs)
{
LIMITED_METHOD_CONTRACT;
return S_OK;
}
HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs)
{
LIMITED_METHOD_CONTRACT;
return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex)
}
HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs)
{
// Don't assume that Thread exists and do not create it.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
HRESULT hr = S_OK;
if (dwCurThreadAptRefs == 1 && !g_fEEShutDown)
{
// This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA
// we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive.
Thread *pThread = GetThreadNULLOk();
if (pThread != NULL)
{
BEGIN_EXTERNAL_ENTRYPOINT(&hr)
{
if (pThread->GetFinalApartment() == Thread::AS_InSTA)
{
// This will release RCWs and purge the WinRT factory cache on all AppDomains. It
// will also synchronize with the finalizer thread which ensures that the RCWs
// that were already in the global RCW cleanup list will be cleaned up as well.
//
ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
}
}
END_EXTERNAL_ENTRYPOINT;
}
}
return hr;
}
HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs)
{
LIMITED_METHOD_CONTRACT;
return S_OK;
}
};
#endif // FEATURE_COMINTEROP
// When the thread starts running, make sure it is running in the correct apartment
// and context.
BOOL Thread::PrepareApartmentAndContext()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
m_OSThreadId = ::GetCurrentThreadId();
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// Be very careful in here because we haven't set up e.g. TLS yet.
if (m_State & (TS_InSTA | TS_InMTA))
{
// Make sure TS_InSTA and TS_InMTA aren't both set.
_ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
// Determine the apartment state to set based on the requested state.
ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA;
// Clear the requested apartment state from the thread. This is requested since
// the thread might actually be a fiber that has already been initialized to
// a different apartment state than the requested one. If we didn't clear
// the requested apartment state, then we could end up with both TS_InSTA and
// TS_InMTA set at the same time.
FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
// Attempt to set the requested apartment state.
SetApartment(aState, FALSE);
}
// In the case where we own the thread and we have switched it to a different
// starting context, it is the responsibility of the caller (KickOffThread())
// to notice that the context changed, and to adjust the delegate that it will
// dispatch on, as appropriate.
#endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT
#ifdef FEATURE_COMINTEROP
// Our IInitializeSpy will be registered in AppX always, in classic processes
// only if the internal config switch is on.
if (AppX::IsAppXProcess() || g_pConfig->EnableRCWCleanupOnSTAShutdown())
{
NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl();
IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie));
pSpyImpl.SuppressRelease();
m_fInitializeSpyRegistered = true;
}
#endif // FEATURE_COMINTEROP
return TRUE;
}
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
// TS_InSTA (0x00004000) -> AS_InSTA (0)
// TS_InMTA (0x00008000) -> AS_InMTA (1)
#define TS_TO_AS(ts) \
(Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \
// Retrieve the apartment state of the current thread. There are three possible
// states: thread hosts an STA, thread is part of the MTA or thread state is
// undecided. The last state may indicate that the apartment has not been set at
// all (nobody has called CoInitializeEx) or that the EE does not know the
// current state (EE has not called CoInitializeEx).
Thread::ApartmentState Thread::GetApartment()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
ApartmentState as = AS_Unknown;
ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA));
if (maskedTs)
{
_ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA));
static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA);
static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA);
as = TS_TO_AS(maskedTs);
}
if (
#ifdef MDA_SUPPORTED
(NULL == MDA_GET_ASSISTANT(InvalidApartmentStateChange)) &&
#endif
(as != AS_Unknown))
{
return as;
}
return GetApartmentRare(as);
}
Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
if (this == GetThread())
{
THDTYPE type;
HRESULT hr = S_OK;
#ifdef MDA_SUPPORTED
MdaInvalidApartmentStateChange* pProbe = MDA_GET_ASSISTANT(InvalidApartmentStateChange);
if (pProbe)
{
// Without notifications from OLE32, we cannot know when the apartment state of a
// thread changes. But we have cached this fact and depend on it for all our
// blocking and COM Interop behavior to work correctly. Using the CDH, log that it
// is not changing underneath us, on those platforms where it is relatively cheap for
// us to do so.
if (as != AS_Unknown)
{
hr = GetCurrentThreadTypeNT5(&type);
if (hr == S_OK)
{
if (type == THDTYPE_PROCESSMESSAGES && as == AS_InMTA)
{
pProbe->ReportViolation(this, as, FALSE);
}
else if (type == THDTYPE_BLOCKMESSAGES && as == AS_InSTA)
{
pProbe->ReportViolation(this, as, FALSE);
}
}
}
}
#endif
if (as == AS_Unknown)
{
hr = GetCurrentThreadTypeNT5(&type);
if (hr == S_OK)
{
as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA;
// If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to
// be an STA thread. If not, we are an MTA thread, however
// we can't know if the thread has been explicitly set to MTA
// (via a call to CoInitializeEx) or if it has been implicitly
// made MTA (if it hasn't been CoInitializeEx'd but CoInitialize
// has already been called on some other thread in the process.
if (as == AS_InSTA)
FastInterlockOr((ULONG *) &m_State, AS_InSTA);
}
}
}
return as;
}
// Retrieve the explicit apartment state of the current thread. There are three possible
// states: thread hosts an STA, thread is part of the MTA or thread state is
// undecided. The last state may indicate that the apartment has not been set at
// all (nobody has called CoInitializeEx), the EE does not know the
// current state (EE has not called CoInitializeEx), or the thread is implicitly in
// the MTA.
Thread::ApartmentState Thread::GetExplicitApartment()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
_ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
// Initialize m_State by calling GetApartment.
GetApartment();
ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA :
(m_State & TS_InMTA) ? AS_InMTA :
AS_Unknown;
return as;
}
Thread::ApartmentState Thread::GetFinalApartment()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
SO_TOLERANT;
}
CONTRACTL_END;
_ASSERTE(this == GetThread());
ApartmentState as = AS_Unknown;
if (g_fEEShutDown)
{
// On shutdown, do not use cached value. Someone might have called
// CoUninitialize.
FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
}
as = GetApartment();
if (as == AS_Unknown)
{
// On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize
// hasn't been called in the process. In that case we can simply assume MTA. However we
// cannot cache this value in the Thread because if a CoInitialize does occur, then the
// thread state might change.
as = AS_InMTA;
}
return as;
}
// when we get apartment tear-down notification,
// we want reset the apartment state we cache on the thread
VOID Thread::ResetApartment()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// reset the TS_InSTA bit and TS_InMTA bit
ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA));
FastInterlockAnd((ULONG *) &m_State, t_State);
}
// Attempt to set current thread's apartment state. The actual apartment state
// achieved is returned and may differ from the input state if someone managed
// to call CoInitializeEx on this thread first (note that calls to SetApartment
// made before the thread has started are guaranteed to succeed).
// The fFireMDAOnMismatch indicates if we should fire the apartment state probe
// on an apartment state mismatch.
Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
// Reset any bits that request for CoInitialize
ResetRequiresCoInitialize();
// Setting the state to AS_Unknown indicates we should CoUninitialize
// the thread.
if (state == AS_Unknown)
{
BOOL needUninitialize = (m_State & TS_CoInitialized)
#ifdef FEATURE_COMINTEROP
|| IsWinRTInitialized()
#endif // FEATURE_COMINTEROP
;
if (needUninitialize)
{
GCX_PREEMP();
// If we haven't CoInitialized the thread, then we don't have anything to do.
if (m_State & TS_CoInitialized)
{
// We should never be attempting to CoUninitialize another thread than
// the currently running thread.
_ASSERTE(m_OSThreadId == ::GetCurrentThreadId());
// CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits.
::CoUninitialize();
ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized);
FastInterlockAnd((ULONG *) &m_State, ~uninitialized);
}
#ifdef FEATURE_COMINTEROP
if (IsWinRTInitialized())
{
_ASSERTE(WinRTSupported());
BaseWinRTUninitialize();
ResetWinRTInitialized();
}
#endif // FEATURE_COMINTEROP
}
return GetApartment();
}
// Call GetApartment to initialize the current apartment state.
//
// Important note: For Win2k and above this can return AS_InMTA even if the current
// thread has never been CoInitialized. Because of this we MUST NOT look at the
// return value of GetApartment here. We can however look at the m_State flags
// since these will only be set to TS_InMTA if we know for a fact the the
// current thread has explicitly been made MTA (via a call to CoInitializeEx).
GetApartment();
// If the current thread is STA, then it is impossible to change it to
// MTA.
if (m_State & TS_InSTA)
{
#ifdef MDA_SUPPORTED
if (state == AS_InMTA && fFireMDAOnMismatch)
{
MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
}
#endif
return AS_InSTA;
}
// If the current thread is EXPLICITLY MTA, then it is impossible to change it to
// STA.
if (m_State & TS_InMTA)
{
#ifdef MDA_SUPPORTED
if (state == AS_InSTA && fFireMDAOnMismatch)
{
MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
}
#endif
return AS_InMTA;
}
// If the thread isn't even started yet, we mark the state bits without
// calling CoInitializeEx (since we're obviously not in the correct thread
// context yet). We'll retry this call when the thread is started.
// Don't use the TS_Unstarted state bit to check for this, it's cleared far
// too late in the day for us. Instead check whether we're in the correct
// thread context.
if (m_OSThreadId != ::GetCurrentThreadId())
{
FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA);
return state;
}
HRESULT hr;
{
GCX_PREEMP();
// Attempt to set apartment by calling CoInitializeEx. This may fail if
// another caller (outside EE) beat us to it.
//
// Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a
// thread that has never been CoInitialized, the return value will always
// be S_OK, even if another thread in the process has already been
// CoInitialized to MTA. However if the current thread has already been
// CoInitialized to MTA, then S_FALSE will be returned.
hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ?
COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED);
}
if (SUCCEEDED(hr))
{
ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA;
if (hr == S_OK)
{
// The thread has never been CoInitialized.
t_State = (ThreadState)(t_State | TS_CoInitialized);
}
else
{
_ASSERTE(hr == S_FALSE);
// If the thread has already been CoInitialized to the proper mode, then
// we don't want to leave an outstanding CoInit so we CoUninit.
{
GCX_PREEMP();
::CoUninitialize();
}
}
// We succeeded in setting the apartment state to the requested state.
FastInterlockOr((ULONG *) &m_State, t_State);
}
else if (hr == RPC_E_CHANGED_MODE)
{
// We didn't manage to enforce the requested apartment state, but at least
// we can work out what the state is now. No need to actually do the CoInit --
// obviously someone else already took care of that.
FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA));
#ifdef MDA_SUPPORTED
if (fFireMDAOnMismatch)
{
// Report via the customer debug helper that we failed to set the apartment type
// to the specified type.
MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
}
#endif
}
else if (hr == E_OUTOFMEMORY)
{
COMPlusThrowOM();
}
else
{
_ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!");
}
#ifdef FEATURE_COMINTEROP
// If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM
// we need to make sure that it is initialized in the same threading mode as we just started COM itself
// with (or that we detected COM had already been started with).
if (WinRTSupported() && !IsWinRTInitialized())
{
GCX_PREEMP();
BOOL isSTA = m_State & TS_InSTA;
_ASSERTE(isSTA || (m_State & TS_InMTA));
HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED);
if (SUCCEEDED(hrWinRT))
{
if (hrWinRT == S_OK)
{
SetThreadStateNC(TSNC_WinRTInitialized);
}
else
{
_ASSERTE(hrWinRT == S_FALSE);
// If the thread has already been initialized, back it out. We may not
// always be able to call RoUninitialize on shutdown so if there's
// a way to avoid having to, we should take advantage of that.
RoUninitialize();
}
}
else if (hrWinRT == E_OUTOFMEMORY)
{
COMPlusThrowOM();
}
else
{
// We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by
// initializing COM above. COM and WinRT need to always be in the same mode, so we should never
// see that return code at this point.
_ASSERTE(!"Unexpected HRESULT From RoInitialize");
}
}
// Since we've just called CoInitialize, COM has effectively been started up.
// To ensure the CLR is aware of this, we need to call EnsureComStarted.
EnsureComStarted(FALSE);
#endif // FEATURE_COMINTEROP
return GetApartment();
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
//----------------------------------------------------------------------------
//
// ThreadStore Implementation
//
//----------------------------------------------------------------------------
ThreadStore::ThreadStore()
: m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
m_ThreadCount(0),
m_MaxThreadCount(0),
m_UnstartedThreadCount(0),
m_BackgroundThreadCount(0),
m_PendingThreadCount(0),
m_DeadThreadCount(0),
m_DeadThreadCountForGCTrigger(0),
m_TriggerGCForDeadThreads(false),
m_GuidCreated(FALSE),
m_HoldingThread(0)
{
CONTRACTL {
THROWS;
GC_NOTRIGGER;
}
CONTRACTL_END;
m_TerminationEvent.CreateManualEvent(FALSE);
_ASSERTE(m_TerminationEvent.IsValid());
}
void ThreadStore::InitThreadStore()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
s_pThreadStore = new ThreadStore;
g_pThinLockThreadIdDispenser = new IdDispenser();
ThreadSuspend::g_pGCSuspendEvent = new CLREvent();
ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE);
s_pWaitForStackCrawlEvent = new CLREvent();
s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE);
s_DeadThreadCountThresholdForGCTrigger =
static_cast<LONG>(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadCountThresholdForGCTrigger));
if (s_DeadThreadCountThresholdForGCTrigger < 0)
{
s_DeadThreadCountThresholdForGCTrigger = 0;
}
s_DeadThreadGCTriggerPeriodMilliseconds =
CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadGCTriggerPeriodMilliseconds);
s_DeadThreadGenerationCounts = nullptr;
}
// Enter and leave the critical section around the thread store. Clients should
// use LockThreadStore and UnlockThreadStore because ThreadStore lock has
// additional semantics well beyond a normal lock.
DEBUG_NOINLINE void ThreadStore::Enter()
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
CHECK_ONE_STORE();
m_Crst.Enter();
// Threadstore needs special shutdown handling.
if (g_fSuspendOnShutdown)
{
m_Crst.ReleaseAndBlockForShutdownIfNotSpecialThread();
}
}
DEBUG_NOINLINE void ThreadStore::Leave()
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
CHECK_ONE_STORE();
m_Crst.Leave();
}
void ThreadStore::LockThreadStore()
{
WRAPPER_NO_CONTRACT;
// The actual implementation is in ThreadSuspend class since it is coupled
// with thread suspension logic
ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
}
void ThreadStore::UnlockThreadStore()
{
WRAPPER_NO_CONTRACT;
// The actual implementation is in ThreadSuspend class since it is coupled
// with thread suspension logic
ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER);
}
// AddThread adds 'newThread' to m_ThreadList
void ThreadStore::AddThread(Thread *newThread, BOOL bRequiresTSL)
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
LOG((LF_SYNC, INFO3, "AddThread obtain lock\n"));
ThreadStoreLockHolder TSLockHolder(FALSE);
if (bRequiresTSL)
{
TSLockHolder.Acquire();
}
s_pThreadStore->m_ThreadList.InsertTail(newThread);
s_pThreadStore->m_ThreadCount++;
if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount)
s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount;
if (newThread->IsUnstarted())
s_pThreadStore->m_UnstartedThreadCount++;
newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore);
_ASSERTE(!newThread->IsBackground());
_ASSERTE(!newThread->IsDead());
if (bRequiresTSL)
{
TSLockHolder.Release();
}
}
// this function is just desgined to avoid deadlocks during abnormal process termination, and should not be used for any other purpose
BOOL ThreadStore::CanAcquireLock()
{
WRAPPER_NO_CONTRACT;
{
return (s_pThreadStore->m_Crst.m_criticalsection.LockCount == -1 || (size_t)s_pThreadStore->m_Crst.m_criticalsection.OwningThread == (size_t)GetCurrentThreadId());
}
}
// Whenever one of the components of OtherThreadsComplete() has changed in the
// correct direction, see whether we can now shutdown the EE because only background
// threads are running.
void ThreadStore::CheckForEEShutdown()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (g_fWeControlLifetime &&
s_pThreadStore->OtherThreadsComplete())
{
BOOL bRet;
bRet = s_pThreadStore->m_TerminationEvent.Set();
_ASSERTE(bRet);
}
}
BOOL ThreadStore::RemoveThread(Thread *target)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL found;
Thread *ret;
#if 0 // This assert is not valid when failing to create background GC thread.
// Main GC thread holds the TS lock.
_ASSERTE (ThreadStore::HoldingThreadStore());
#endif
_ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
IsAtProcessExit());
_ASSERTE(s_pThreadStore->DbgFindThread(target));
ret = s_pThreadStore->m_ThreadList.FindAndRemove(target);
_ASSERTE(ret && ret == target);
found = (ret != NULL);
if (found)
{
target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore);
s_pThreadStore->m_ThreadCount--;
if (target->IsDead())
{
s_pThreadStore->m_DeadThreadCount--;
s_pThreadStore->DecrementDeadThreadCountForGCTrigger();
}
// Unstarted threads are not in the Background count:
if (target->IsUnstarted())
s_pThreadStore->m_UnstartedThreadCount--;
else
if (target->IsBackground())
s_pThreadStore->m_BackgroundThreadCount--;
FastInterlockExchangeAdd(
&Thread::s_threadPoolCompletionCountOverflow,
target->m_threadPoolCompletionCount);
_ASSERTE(s_pThreadStore->m_ThreadCount >= 0);
_ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0);
_ASSERTE(s_pThreadStore->m_ThreadCount >=
s_pThreadStore->m_BackgroundThreadCount);
_ASSERTE(s_pThreadStore->m_ThreadCount >=
s_pThreadStore->m_UnstartedThreadCount);
_ASSERTE(s_pThreadStore->m_ThreadCount >=
s_pThreadStore->m_DeadThreadCount);
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
CheckForEEShutdown();
}
return found;
}
// When a thread is created as unstarted. Later it may get started, in which case
// someone calls Thread::HasStarted() on that physical thread. This completes
// the Setup and calls here.
void ThreadStore::TransferStartedThread(Thread *thread, BOOL bRequiresTSL)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(GetThread() == thread);
LOG((LF_SYNC, INFO3, "TransferUnstartedThread obtain lock\n"));
ThreadStoreLockHolder TSLockHolder(FALSE);
if (bRequiresTSL)
{
TSLockHolder.Acquire();
}
_ASSERTE(s_pThreadStore->DbgFindThread(thread));
_ASSERTE(thread->HasValidThreadHandle());
_ASSERTE(thread->m_State & Thread::TS_WeOwn);
_ASSERTE(thread->IsUnstarted());
_ASSERTE(!thread->IsDead());
if (thread->m_State & Thread::TS_AbortRequested)
{
PAL_CPP_THROW(EEException *, new EEException(COR_E_THREADABORTED));
}
// Of course, m_ThreadCount is already correct since it includes started and
// unstarted threads.
s_pThreadStore->m_UnstartedThreadCount--;
// We only count background threads that have been started
if (thread->IsBackground())
s_pThreadStore->m_BackgroundThreadCount++;
_ASSERTE(s_pThreadStore->m_PendingThreadCount > 0);
FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount);
// As soon as we erase this bit, the thread becomes eligible for suspension,
// stopping, interruption, etc.
FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted);
FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin);
// release ThreadStore Crst to avoid Crst Violation when calling HandleThreadAbort later
if (bRequiresTSL)
{
TSLockHolder.Release();
}
// One of the components of OtherThreadsComplete() has changed, so check whether
// we should now exit the EE.
CheckForEEShutdown();
}
LONG ThreadStore::s_DeadThreadCountThresholdForGCTrigger = 0;
DWORD ThreadStore::s_DeadThreadGCTriggerPeriodMilliseconds = 0;
SIZE_T *ThreadStore::s_DeadThreadGenerationCounts = nullptr;
void ThreadStore::IncrementDeadThreadCountForGCTrigger()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
// background GC thread resetting this value, hence the interlocked operation. Ignore overflow; overflow would likely never
// occur, the count is treated as unsigned, and nothing bad would happen if it were to overflow.
SIZE_T count = static_cast<SIZE_T>(FastInterlockIncrement(&m_DeadThreadCountForGCTrigger));
SIZE_T countThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger);
if (count < countThreshold || countThreshold == 0)
{
return;
}
IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
if (gcHeap == nullptr)
{
return;
}
SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcHeap->GetMaxGeneration());
SIZE_T gcNowMilliseconds = gcHeap->GetNow();
if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
{
return;
}
if (!g_fEEStarted) // required for FinalizerThread::EnableFinalization() below
{
return;
}
// The GC is triggered on the finalizer thread since it's not safe to trigger it on DLL_THREAD_DETACH.
// TriggerGCForDeadThreadsIfNecessary() will determine which generation of GC to trigger, and may not actually trigger a GC.
// If a GC is triggered, since there would be a delay before the dead thread count is updated, clear the count and wait for
// it to reach the threshold again. If a GC would not be triggered, the count is still cleared here to prevent waking up the
// finalizer thread to do the work in TriggerGCForDeadThreadsIfNecessary() for every dead thread.
m_DeadThreadCountForGCTrigger = 0;
m_TriggerGCForDeadThreads = true;
FinalizerThread::EnableFinalization();
}
void ThreadStore::DecrementDeadThreadCountForGCTrigger()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
// background GC thread resetting this value, hence the interlocked operation.
if (FastInterlockDecrement(&m_DeadThreadCountForGCTrigger) < 0)
{
m_DeadThreadCountForGCTrigger = 0;
}
}
void ThreadStore::OnMaxGenerationGCStarted()
{
LIMITED_METHOD_CONTRACT;
// A dead thread may contribute to triggering a GC at most once. After a max-generation GC occurs, if some dead thread
// objects are still reachable due to references to the thread objects, they will not contribute to triggering a GC again.
// Synchronize the store with increment/decrement operations occurring on different threads, and make the change visible to
// other threads in order to prevent unnecessary GC triggers.
FastInterlockExchange(&m_DeadThreadCountForGCTrigger, 0);
}
bool ThreadStore::ShouldTriggerGCForDeadThreads()
{
LIMITED_METHOD_CONTRACT;
return m_TriggerGCForDeadThreads;
}
void ThreadStore::TriggerGCForDeadThreadsIfNecessary()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
if (!m_TriggerGCForDeadThreads)
{
return;
}
m_TriggerGCForDeadThreads = false;
if (g_fEEShutDown)
{
// Not safe to touch CLR state
return;
}
unsigned gcGenerationToTrigger = 0;
IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
_ASSERTE(gcHeap != nullptr);
SIZE_T generationCountThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger) / 2;
unsigned maxGeneration = gcHeap->GetMaxGeneration();
if (!s_DeadThreadGenerationCounts)
{
// initialize this field on first use with an entry for every table.
s_DeadThreadGenerationCounts = new (nothrow) SIZE_T[maxGeneration + 1];
if (!s_DeadThreadGenerationCounts)
{
return;
}
}
memset(s_DeadThreadGenerationCounts, 0, sizeof(SIZE_T) * (maxGeneration + 1));
{
ThreadStoreLockHolder threadStoreLockHolder;
GCX_COOP();
// Determine the generation for which to trigger a GC. Iterate over all dead threads that have not yet been considered
// for triggering a GC and see how many are in which generations.
for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
thread != nullptr;
thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
{
if (thread->HasDeadThreadBeenConsideredForGCTrigger())
{
continue;
}
Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
if (exposedObject == nullptr)
{
continue;
}
unsigned exposedObjectGeneration = gcHeap->WhichGeneration(exposedObject);
SIZE_T newDeadThreadGenerationCount = ++s_DeadThreadGenerationCounts[exposedObjectGeneration];
if (exposedObjectGeneration > gcGenerationToTrigger && newDeadThreadGenerationCount >= generationCountThreshold)
{
gcGenerationToTrigger = exposedObjectGeneration;
if (gcGenerationToTrigger >= maxGeneration)
{
break;
}
}
}
// Make sure that enough time has elapsed since the last GC of the desired generation. We don't want to trigger GCs
// based on this heuristic too often. Give it some time to let the memory pressure trigger GCs automatically, and only
// if it doesn't in the given time, this heuristic may kick in to trigger a GC.
SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcGenerationToTrigger);
SIZE_T gcNowMilliseconds = gcHeap->GetNow();
if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
{
return;
}
// For threads whose exposed objects are in the generation of GC that will be triggered or in a lower GC generation,
// mark them as having contributed to a GC trigger to prevent redundant GC triggers
for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
thread != nullptr;
thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
{
if (thread->HasDeadThreadBeenConsideredForGCTrigger())
{
continue;
}
Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
if (exposedObject == nullptr)
{
continue;
}
if (gcGenerationToTrigger < maxGeneration &&
gcHeap->WhichGeneration(exposedObject) > gcGenerationToTrigger)
{
continue;
}
thread->SetHasDeadThreadBeenConsideredForGCTrigger();
}
} // ThreadStoreLockHolder, GCX_COOP()
GCHeapUtilities::GetGCHeap()->GarbageCollect(gcGenerationToTrigger, FALSE, collection_non_blocking);
}
#endif // #ifndef DACCESS_COMPILE
// Access the list of threads. You must be inside a critical section, otherwise
// the "cursor" thread might disappear underneath you. Pass in NULL for the
// cursor to begin at the start of the list.
Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
SUPPORTS_DAC;
#ifndef DACCESS_COMPILE
_ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit());
#endif
while (TRUE)
{
cursor = (cursor
? s_pThreadStore->m_ThreadList.GetNext(cursor)
: s_pThreadStore->m_ThreadList.GetHead());
if (cursor == NULL)
break;
if ((cursor->m_State & mask) == bits)
return cursor;
}
return NULL;
}
// Iterate over the threads that have been started
Thread *ThreadStore::GetThreadList(Thread *cursor)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
SUPPORTS_DAC;
return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0);
}
//---------------------------------------------------------------------------------------
//
// Grab a consistent snapshot of the thread's state, for reporting purposes only.
//
// Return Value:
// the current state of the thread
//
Thread::ThreadState Thread::GetSnapshotState()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
ThreadState res = m_State;
if (res & TS_ReportDead)
{
res = (ThreadState) (res | TS_Dead);
}
return res;
}
#ifndef DACCESS_COMPILE
BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus)
{
CONTRACTL
{
NOTHROW;
WRAPPER(GC_TRIGGERS);
}
CONTRACTL_END;
BOOL fLoop = TRUE;
EX_TRY
{
*pStatus = pEvent->Wait(timeout, fAlertable);
fLoop = FALSE;
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
return fLoop;
}
// We shut down the EE only when all the non-background threads have terminated
// (unless this is an exceptional termination). So the main thread calls here to
// wait before tearing down the EE.
void ThreadStore::WaitForOtherThreads()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
CHECK_ONE_STORE();
Thread *pCurThread = GetThread();
// Regardless of whether the main thread is a background thread or not, force
// it to be one. This simplifies our rules for counting non-background threads.
pCurThread->SetBackground(TRUE);
LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n"));
ThreadStoreLockHolder TSLockHolder(TRUE);
if (!OtherThreadsComplete())
{
TSLockHolder.Release();
FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead);
DWORD ret = WAIT_OBJECT_0;
while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret))
{
}
_ASSERTE(ret == WAIT_OBJECT_0);
}
}
// Every EE process can lazily create a GUID that uniquely identifies it (for
// purposes of remoting).
const GUID &ThreadStore::GetUniqueEEId()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
if (!m_GuidCreated)
{
ThreadStoreLockHolder TSLockHolder(TRUE);
if (!m_GuidCreated)
{
HRESULT hr = ::CoCreateGuid(&m_EEGuid);
_ASSERTE(SUCCEEDED(hr));
if (SUCCEEDED(hr))
m_GuidCreated = TRUE;
}
if (!m_GuidCreated)
return IID_NULL;
}
return m_EEGuid;
}
#ifdef _DEBUG
BOOL ThreadStore::DbgFindThread(Thread *target)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
CHECK_ONE_STORE();
// Cache the current change stamp for g_TrapReturningThreads
LONG chgStamp = g_trtChgStamp;
STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThread(), g_TrapReturningThreads.Load(), chgStamp);
#if 0 // g_TrapReturningThreads debug code.
int iRetry = 0;
Retry:
#endif // g_TrapReturningThreads debug code.
BOOL found = FALSE;
Thread *cur = NULL;
LONG cnt = 0;
LONG cntBack = 0;
LONG cntUnstart = 0;
LONG cntDead = 0;
LONG cntReturn = 0;
while ((cur = GetAllThreadList(cur, 0, 0)) != NULL)
{
cnt++;
if (cur->IsDead())
cntDead++;
// Unstarted threads do not contribute to the count of background threads
if (cur->IsUnstarted())
cntUnstart++;
else
if (cur->IsBackground())
cntBack++;
if (cur == target)
found = TRUE;
// Note that (DebugSuspendPending | SuspendPending) implies a count of 2.
// We don't count GCPending because a single trap is held for the entire
// GC, instead of counting each interesting thread.
if (cur->m_State & Thread::TS_DebugSuspendPending)
cntReturn++;
// CoreCLR does not support user-requested thread suspension
_ASSERTE(!(cur->m_State & Thread::TS_UserSuspendPending));
if (cur->m_TraceCallCount > 0)
cntReturn++;
if (cur->IsAbortRequested())
cntReturn++;
}
_ASSERTE(cnt == m_ThreadCount);
_ASSERTE(cntUnstart == m_UnstartedThreadCount);
_ASSERTE(cntBack == m_BackgroundThreadCount);
_ASSERTE(cntDead == m_DeadThreadCount);
_ASSERTE(0 <= m_PendingThreadCount);
#if 0 // g_TrapReturningThreads debug code.
if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/)
{ // If count is off, try again, to account for multiple threads.
if (iRetry < 4)
{
// printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads);
++iRetry;
goto Retry;
}
printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit());
LOG((LF_CORDB, LL_INFO1000,
"SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) );
//_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads);
}
if (iRetry > 0 && iRetry < 4)
{
printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry);
}
#endif // g_TrapReturningThreads debug code.
STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThread(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn);
// Because of race conditions and the fact that the GC places its
// own count, I can't assert this precisely. But I do want to be
// sure that this count isn't wandering ever higher -- with a
// nasty impact on the performance of GC mode changes and method
// call chaining!
//
// We don't bother asserting this during process exit, because
// during a shutdown we will quietly terminate threads that are
// being waited on. (If we aren't shutting down, we carefully
// decrement our counts and alert anyone waiting for us to
// return).
//
// Note: we don't actually assert this if
// ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads
// between the beginning of this function and the moment of the assert.
// *** The order of evaluation in the if condition is important ***
_ASSERTE(
(g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) ||
g_fEEShutDown);
return found;
}
#endif // _DEBUG
void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_SO_TOLERANT;
// If we're waiting for shutdown, we don't want to abort/interrupt this thread
if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
return;
BEGIN_SO_INTOLERANT_CODE(this);
if ((m_UserInterrupt & TI_Abort) != 0)
{
// If the thread is waiting for AD unload to finish, and the thread is interrupted,
// we can start aborting.
HandleThreadAbort(fWaitForADUnload);
}
if ((m_UserInterrupt & TI_Interrupt) != 0)
{
ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible));
FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt);
COMPlusThrow(kThreadInterruptedException);
}
END_SO_INTOLERANT_CODE;
}
#ifdef _DEBUG
#define MAXSTACKBYTES (2 * GetOsPageSize())
void CleanStackForFastGCStress ()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
PVOID StackLimit = ClrTeb::GetStackLimit();
size_t nBytes = (size_t)&nBytes - (size_t)StackLimit;
nBytes &= ~sizeof (size_t);
if (nBytes > MAXSTACKBYTES) {
nBytes = MAXSTACKBYTES;
}
size_t* buffer = (size_t*) _alloca (nBytes);
memset(buffer, 0, nBytes);
GetThread()->m_pCleanedStackBase = &nBytes;
}
void Thread::ObjectRefFlush(Thread* thread)
{
BEGIN_PRESERVE_LAST_ERROR;
// The constructor and destructor of AutoCleanupSONotMainlineHolder (allocated by SO_NOT_MAINLINE_FUNCTION below)
// may trash the last error, so we need to save and restore last error here. Also, we need to add a scope here
// because we can't let the destructor run after we call SetLastError().
{
// this is debug only code, so no need to validate
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_ENTRY_POINT;
_ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
thread->m_allObjRefEntriesBad = FALSE;
CLEANSTACKFORFASTGCSTRESS ();
}
END_PRESERVE_LAST_ERROR;
}
#endif
#if defined(STRESS_HEAP)
PtrHashMap *g_pUniqueStackMap = NULL;
Crst *g_pUniqueStackCrst = NULL;
#define UniqueStackDepth 8
BOOL StackCompare (UPTR val1, UPTR val2)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
size_t *p1 = (size_t *)(val1 << 1);
size_t *p2 = (size_t *)val2;
if (p1[0] != p2[0]) {
return FALSE;
}
size_t nElem = p1[0];
if (nElem >= UniqueStackDepth) {
nElem = UniqueStackDepth;
}
p1 ++;
p2 ++;
for (size_t n = 0; n < nElem; n ++) {
if (p1[n] != p2[n]) {
return FALSE;
}
}
return TRUE;
}
void UniqueStackSetupMap()
{
WRAPPER_NO_CONTRACT;
if (g_pUniqueStackCrst == NULL)
{
Crst *Attempt = new Crst (
CrstUniqueStack,
CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE));
if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst,
Attempt,
NULL) != NULL)
{
// We lost the race
delete Attempt;
}
}
// Now we have a Crst we can use to synchronize the remainder of the init.
if (g_pUniqueStackMap == NULL)
{
CrstHolder ch(g_pUniqueStackCrst);
if (g_pUniqueStackMap == NULL)
{
PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap ();
LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst};
map->Init (256, StackCompare, TRUE, &lock);
g_pUniqueStackMap = map;
}
}
}
BOOL StartUniqueStackMapHelper()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
BOOL fOK = TRUE;
EX_TRY
{
if (g_pUniqueStackMap == NULL)
{
UniqueStackSetupMap();
}
}
EX_CATCH
{
fOK = FALSE;
}
EX_END_CATCH(SwallowAllExceptions);
return fOK;
}
BOOL StartUniqueStackMap ()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
return StartUniqueStackMapHelper();
}
#ifndef FEATURE_PAL
size_t UpdateStackHash(size_t hash, size_t retAddr)
{
return ((hash << 3) + hash) ^ retAddr;
}
/***********************************************************************/
size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// return a hash of every return address found between 'stackTop' (the lowest address)
// and 'stackStop' (the highest address)
size_t hash = 0;
int idx = 0;
#ifdef _TARGET_X86_
static size_t moduleBase = (size_t) -1;
static size_t moduleTop = (size_t) -1;
if (moduleTop == (size_t) -1)
{
MEMORY_BASIC_INFORMATION mbi;
if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi)))
{
moduleBase = (size_t)mbi.AllocationBase;
moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize;
}
else
{
// way bad error, probably just assert and exit
_ASSERTE (!"ClrVirtualQuery failed");
moduleBase = 0;
moduleTop = 0;
}
}
while (stackTop < stackStop)
{
// Clean out things that point to stack, as those can't be return addresses
if (*stackTop > moduleBase && *stackTop < moduleTop)
{
TADDR dummy;
if (isRetAddr((TADDR)*stackTop, &dummy))
{
hash = UpdateStackHash(hash, *stackTop);
// If there is no jitted code on the stack, then just use the
// top 16 frames as the context.
idx++;
if (idx <= UniqueStackDepth)
{
stackTrace [idx] = *stackTop;
}
}
}
stackTop++;
}
#else // _TARGET_X86_
CONTEXT ctx;
ClrCaptureContext(&ctx);
UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx);
UINT_PTR uImageBase;
UINT_PTR uPrevControlPc = uControlPc;
for (;;)
{
RtlLookupFunctionEntry(uControlPc,
ARM_ONLY((DWORD*))(&uImageBase),
NULL
);
if (((UINT_PTR)g_pMSCorEE) != uImageBase)
{
break;
}
uControlPc = Thread::VirtualUnwindCallFrame(&ctx);
UINT_PTR uRetAddrForHash = uControlPc;
if (uPrevControlPc == uControlPc)
{
// This is a special case when we fail to acquire the loader lock
// in RtlLookupFunctionEntry(), which then returns false. The end
// result is that we cannot go any further on the stack and
// we will loop infinitely (because the owner of the loader lock
// is blocked on us).
hash = 0;
break;
}
else
{
uPrevControlPc = uControlPc;
}
hash = UpdateStackHash(hash, uRetAddrForHash);
// If there is no jitted code on the stack, then just use the
// top 16 frames as the context.
idx++;
if (idx <= UniqueStackDepth)
{
stackTrace [idx] = uRetAddrForHash;
}
}
#endif // _TARGET_X86_
stackTrace [0] = idx;
return(hash);
}
void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
EX_TRY {
size_t nElem = stackTrace[0];
if (nElem >= UniqueStackDepth) {
nElem = UniqueStackDepth;
}
AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1)));
memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1));
g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap);
stackTraceInMap.SuppressRelease();
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
}
/***********************************************************************/
/* returns true if this stack has not been seen before, useful for
running tests only once per stack trace. */
BOOL Thread::UniqueStack(void* stackStart)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_NOT_MAINLINE;
}
CONTRACTL_END;
// If we where not told where to start, start at the caller of UniqueStack
if (stackStart == 0)
{
stackStart = &stackStart;
}
if (g_pUniqueStackMap == NULL)
{
if (!StartUniqueStackMap ())
{
// We fail to initialize unique stack map due to OOM.
// Let's say the stack is unique.
return TRUE;
}
}
size_t stackTrace[UniqueStackDepth+1] = {0};
// stackTraceHash represents a hash of entire stack at the time we make the call,
// We insure at least GC per unique stackTrace. What information is contained in
// 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live
// on the stack up to the first jitted function.
size_t stackTraceHash;
Thread* pThread = GetThread();
void* stopPoint = pThread->m_CacheStackBase;
#ifdef _TARGET_X86_
// Find the stop point (most jitted function)
Frame* pFrame = pThread->GetFrame();
for(;;)
{
// skip GC frames
if (pFrame == 0 || pFrame == (Frame*) -1)
break;
pFrame->GetFunction(); // This insures that helper frames are inited
if (pFrame->GetReturnAddress() != 0)
{
stopPoint = pFrame;
break;
}
pFrame = pFrame->Next();
}
#endif // _TARGET_X86_
// Get hash of all return addresses between here an the top most jitted function
stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint,
size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit));
if (stackTraceHash == 0 ||
g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
{
return FALSE;
}
BOOL fUnique = FALSE;
{
CrstHolder ch(g_pUniqueStackCrst);
#ifdef _DEBUG
if (GetThread ())
GetThread ()->m_bUniqueStacking = TRUE;
#endif
if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
{
fUnique = FALSE;
}
else
{
fUnique = TRUE;
FAULT_NOT_FATAL();
UniqueStackHelper(stackTraceHash, stackTrace);
}
#ifdef _DEBUG
if (GetThread ())
GetThread ()->m_bUniqueStacking = FALSE;
#endif
}
#ifdef _DEBUG
static int fCheckStack = -1;
if (fCheckStack == -1)
{
fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack);
}
if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace
&& pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES)
{
_ASSERTE (!"Garbage on stack");
}
#endif
return fUnique;
}
#else // !FEATURE_PAL
BOOL Thread::UniqueStack(void* stackStart)
{
return FALSE;
}
#endif // !FEATURE_PAL
#endif // STRESS_HEAP
/*
* GetStackLowerBound
*
* Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than
* this value -- those pages are reserved for a stack overflow exception processing.
*
* Parameters:
* None
*
* Returns:
* address of the lower bound of the threads's stack.
*/
void * Thread::GetStackLowerBound()
{
// Called during fiber switch. Can not have non-static contract.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
#ifndef FEATURE_PAL
MEMORY_BASIC_INFORMATION lowerBoundMemInfo;
SIZE_T dwRes;
dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION));
if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes)
{
return (void *)(lowerBoundMemInfo.AllocationBase);
}
else
{
return NULL;
}
#else // !FEATURE_PAL
return PAL_GetStackLimit();
#endif // !FEATURE_PAL
}
/*
* GetStackUpperBound
*
* Return the upper bound of the thread's stack space.
*
* Parameters:
* None
*
* Returns:
* address of the base of the threads's stack.
*/
void *Thread::GetStackUpperBound()
{
// Called during fiber switch. Can not have non-static contract.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
return ClrTeb::GetStackBase();
}
BOOL Thread::SetStackLimits(SetStackLimitScope scope)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
if (scope == fAll)
{
m_CacheStackBase = GetStackUpperBound();
m_CacheStackLimit = GetStackLowerBound();
if (m_CacheStackLimit == NULL)
{
_ASSERTE(!"Failed to set stack limits");
return FALSE;
}
// Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. This minimum stack size should
// be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and
// garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack,
// among other things.
#ifdef BIT64
const UINT_PTR MinExecutionStackSize = 128 * 1024;
#else // !BIT64
const UINT_PTR MinExecutionStackSize = 64 * 1024;
#endif // BIT64
_ASSERTE(m_CacheStackBase >= m_CacheStackLimit);
if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) >
MinExecutionStackSize)
{
m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + MinExecutionStackSize;
}
else
{
m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase);
}
}
// Ensure that we've setup the stack guarantee properly before we cache the stack limits
// as they depend upon the stack guarantee.
if (FAILED(CLRSetThreadStackGuarantee()))
return FALSE;
// Cache the last stack addresses that we are allowed to touch. We throw a stack overflow
// if we cross that line. Note that we ignore any subsequent calls to STSG for Whidbey until
// we see an exception and recache the values. We use the LastAllowableAddresses to
// determine if we've taken a hard SO and the ProbeLimits on the probes themselves.
m_LastAllowableStackAddress = GetLastNormalStackAddress();
if (g_pConfig->ProbeForStackOverflow())
{
m_ProbeLimit = m_LastAllowableStackAddress;
}
else
{
// If we have stack probing disabled, set the probeLimit to 0 so that all probes will pass. This
// way we don't have to do an extra check in the probe code.
m_ProbeLimit = 0;
}
return TRUE;
}
//---------------------------------------------------------------------------------------------
// Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes.
//---------------------------------------------------------------------------------------------
HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
{
CONTRACTL
{
WRAPPER(NOTHROW);
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
#ifndef FEATURE_PAL
// TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks
if (Thread::IsSetThreadStackGuaranteeInUse(fScope))
{
// <TODO> Tune this as needed </TODO>
ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE;
int EXTRA_PAGES = 0;
#if defined(_WIN64)
// Free Build EH Stack Stats:
// --------------------------------
// currently the maximum stack usage we'll face while handling a SO includes:
// 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass])
// 1.2k for the CLR EH setup (NakedThrowHelper*)
// 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY)
// ~1.0k for other misc CLR stack allocations
// -----
// 11.0k --> ~2.75 pages for CLR SO EH dispatch
//
// -plus we might need some more for debugger EH dispatch, Watson, etc...
// -also need to take into account that we can lose up to 1 page of the guard region
// -additionally, we need to provide some region to hosts to allow for lock acquisition in a hosted scenario
//
EXTRA_PAGES = 3;
INDEBUG(EXTRA_PAGES += 1);
int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
if (ThreadGuardPages == 0)
{
uGuardSize += (EXTRA_PAGES * GetOsPageSize());
}
else
{
uGuardSize += (ThreadGuardPages * GetOsPageSize());
}
#else // _WIN64
#ifdef _DEBUG
uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure
#endif // _DEBUG
#endif // _WIN64
LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize));
if (!::SetThreadStackGuarantee(&uGuardSize))
{
return HRESULT_FROM_GetLastErrorNA();
}
}
#endif // !FEATURE_PAL
return S_OK;
}
/*
* GetLastNormalStackAddress
*
* GetLastNormalStackAddress returns the last stack address before the guard
* region of a thread. This is the last address that one could write to before
* a stack overflow occurs.
*
* Parameters:
* StackLimit - the base of the stack allocation
*
* Returns:
* Address of the first page of the guard region.
*/
UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
UINT_PTR cbStackGuarantee = GetStackGuarantee();
// Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
// all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS
// will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
// "stack guarantee". But to do this, it has to fault on the page before that region as the app is
// allowed to fault at the very end of that page. So, as a result, the last normal stack address is
// one page sooner.
return StackLimit + (cbStackGuarantee
#ifndef FEATURE_PAL
+ GetOsPageSize()
#endif // !FEATURE_PAL
+ HARD_GUARD_REGION_SIZE);
}
#ifdef _DEBUG
static void DebugLogMBIFlags(UINT uState, UINT uProtect)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
#ifndef FEATURE_PAL
#define LOG_FLAG(flags, name) \
if (flags & name) \
{ \
LOG((LF_EH, LL_INFO1000, "" #name " ")); \
} \
if (uState)
{
LOG((LF_EH, LL_INFO1000, "State: "));
LOG_FLAG(uState, MEM_COMMIT);
LOG_FLAG(uState, MEM_RESERVE);
LOG_FLAG(uState, MEM_DECOMMIT);
LOG_FLAG(uState, MEM_RELEASE);
LOG_FLAG(uState, MEM_FREE);
LOG_FLAG(uState, MEM_PRIVATE);
LOG_FLAG(uState, MEM_MAPPED);
LOG_FLAG(uState, MEM_RESET);
LOG_FLAG(uState, MEM_TOP_DOWN);
LOG_FLAG(uState, MEM_WRITE_WATCH);
LOG_FLAG(uState, MEM_PHYSICAL);
LOG_FLAG(uState, MEM_LARGE_PAGES);
LOG_FLAG(uState, MEM_4MB_PAGES);
}
if (uProtect)
{
LOG((LF_EH, LL_INFO1000, "Protect: "));
LOG_FLAG(uProtect, PAGE_NOACCESS);
LOG_FLAG(uProtect, PAGE_READONLY);
LOG_FLAG(uProtect, PAGE_READWRITE);
LOG_FLAG(uProtect, PAGE_WRITECOPY);
LOG_FLAG(uProtect, PAGE_EXECUTE);
LOG_FLAG(uProtect, PAGE_EXECUTE_READ);
LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE);
LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY);
LOG_FLAG(uProtect, PAGE_GUARD);
LOG_FLAG(uProtect, PAGE_NOCACHE);
LOG_FLAG(uProtect, PAGE_WRITECOMBINE);
}
#undef LOG_FLAG
#endif // !FEATURE_PAL
}
static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_INTOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
MEMORY_BASIC_INFORMATION meminfo;
UINT_PTR uStartOfThisRegion = uLowAddress;
LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
while (uStartOfThisRegion < uHighAddress)
{
SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo));
if (sizeof(meminfo) != res)
{
LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion));
break;
}
UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize;
if (uStartOfNextRegion > uHighAddress)
{
uStartOfNextRegion = uHighAddress;
}
UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize()));
DebugLogMBIFlags(meminfo.State, meminfo.Protect);
LOG((LF_EH, LL_INFO1000, "\n"));
uStartOfThisRegion = uStartOfNextRegion;
}
LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
}
// static
void Thread::DebugLogStackMBIs()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_INTOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
Thread* pThread = GetThread(); // N.B. this can be NULL!
UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound();
UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound();
if (pThread)
{
uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
uStackBase = (UINT_PTR)pThread->GetCachedStackBase();
}
else
{
uStackLimit = (UINT_PTR)GetStackLowerBound();
uStackBase = (UINT_PTR)GetStackUpperBound();
}
UINT_PTR uStackSize = uStackBase - uStackLimit;
LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize()));
if (pThread)
{
LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
}
DebugLogStackRegionMBIs(uStackLimit, uStackBase);
}
#endif // _DEBUG
//
// IsSPBeyondLimit
//
// Determines if the stack pointer is beyond the stack limit, in which case
// we can assume we've taken a hard SO.
//
// Parameters: none
//
// Returns: bool indicating if SP is beyond the limit or not
//
BOOL Thread::IsSPBeyondLimit()
{
WRAPPER_NO_CONTRACT;
// Reset the stack limits if necessary.
// @todo . Add a vectored handler for X86 so that we reset the stack limits
// there, as anything that supports SetThreadStackGuarantee will support vectored handlers.
// Then we can always assume during EH processing that our stack limits are good and we
// don't have to call ResetStackLimits.
ResetStackLimits();
char *approxSP = (char *)GetCurrentSP();
if (approxSP < (char *)(GetLastAllowableStackAddress()))
{
return TRUE;
}
return FALSE;
}
__declspec(noinline) void AllocateSomeStack(){
LIMITED_METHOD_CONTRACT;
#ifdef _TARGET_X86_
const size_t size = 0x200;
#else //_TARGET_X86_
const size_t size = 0x400;
#endif //_TARGET_X86_
INT8* mem = (INT8*)_alloca(size);
// Actually touch the memory we just allocated so the compiler can't
// optimize it away completely.
// NOTE: this assumes the stack grows down (towards 0).
VolatileStore<INT8>(mem, 0);
}
#ifndef FEATURE_PAL
// static // private
BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
SIZE_T dwRes;
MEMORY_BASIC_INFORMATION meminfo;
UINT_PTR uStartOfCurrentRegion = uLowAddress;
while (uStartOfCurrentRegion < uHighAddress)
{
#undef VirtualQuery
// This code can run below YieldTask, which means that it must not call back into the host.
// The reason is that YieldTask is invoked by the host, and the host needs not be reentrant.
dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo));
#define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
// If the query fails then assume we have no guard page.
if (sizeof(meminfo) != dwRes)
{
return FALSE;
}
if (meminfo.Protect & PAGE_GUARD)
{
return TRUE;
}
uStartOfCurrentRegion += meminfo.RegionSize;
}
return FALSE;
}
#endif // !FEATURE_PAL
/*
* DetermineIfGuardPagePresent
*
* DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes
* a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack
* overflow exception.
*
* It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a
* world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack
* could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we
* do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on
* it. If we've never SO'd, then that's two calls to VirtualQuery.
*
* Parameters:
* None
*
* Returns:
* TRUE if the thread has a guard page, FALSE otherwise.
*/
BOOL Thread::DetermineIfGuardPagePresent()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
#ifndef FEATURE_PAL
BOOL bStackGuarded = FALSE;
UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase();
UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit();
// Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the
// very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's
// no need to waste a query on it.
bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE,
uStackBase);
LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING"));
return bStackGuarded;
#else // !FEATURE_PAL
return TRUE;
#endif // !FEATURE_PAL
}
/*
* GetLastNormalStackAddress
*
* GetLastNormalStackAddress returns the last stack address before the guard
* region of this thread. This is the last address that one could write to
* before a stack overflow occurs.
*
* Parameters:
* None
*
* Returns:
* Address of the first page of the guard region.
*/
UINT_PTR Thread::GetLastNormalStackAddress()
{
WRAPPER_NO_CONTRACT;
return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit);
}
#ifdef FEATURE_STACK_PROBE
/*
* CanResetStackTo
*
* Given a target stack pointer, this function will tell us whether or not we could restore the guard page if we
* unwound the stack that far.
*
* Parameters:
* stackPointer -- stack pointer that we want to try to reset the thread's stack up to.
*
* Returns:
* TRUE if there's enough room to reset the stack, false otherwise.
*/
BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
// How much space between the given stack pointer and the first guard page?
//
// This must be signed since the stack pointer might be in the guard region,
// which is at a lower address than GetLastNormalStackAddress will return.
INT_PTR iStackSpaceLeft = (INT_PTR)stackPointer - GetLastNormalStackAddress();
// We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
// We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
if (iStackSpaceLeft > iStackSizeThreshold)
{
return TRUE;
}
else
{
return FALSE;
}
}
/*
* IsStackSpaceAvailable
*
* Given a number of stack pages, this function will tell us whether or not we have that much space
* before the top of the stack. If we are in the guard region we must be already handling an SO,
* so we report how much space is left in the guard region
*
* Parameters:
* numPages -- the number of pages that we need. This can be a fractional amount.
*
* Returns:
* TRUE if there's that many pages of stack available
*/
BOOL Thread::IsStackSpaceAvailable(float numPages)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
// How much space between the current stack pointer and the first guard page?
//
// This must be signed since the stack pointer might be in the guard region,
// which is at a lower address than GetLastNormalStackAddress will return.
float iStackSpaceLeft = static_cast<float>((INT_PTR)GetCurrentSP() - (INT_PTR)GetLastNormalStackAddress());
// If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
// use that.
if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
{
UINT_PTR stackGuarantee = GetStackGuarantee();
// GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
// amount of stack, in addition to any sort of stack guarantee we might have.
//
// All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
// over GB, which isn't possible.
iStackSpaceLeft += stackGuarantee + GetOsPageSize();
}
if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
{
return FALSE;
}
return TRUE;
}
#endif // FEATURE_STACK_PROBE
/*
* GetStackGuarantee
*
* Returns the amount of stack guaranteed after an SO but before the OS rips the process.
*
* Parameters:
* none
*
* Returns:
* The stack guarantee in OS pages.
*/
UINT_PTR Thread::GetStackGuarantee()
{
WRAPPER_NO_CONTRACT;
#ifndef FEATURE_PAL
// There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of
// the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query
// it to see if someone has changed the size of the guard region for this thread.
if (!IsSetThreadStackGuaranteeInUse())
{
return SIZEOF_DEFAULT_STACK_GUARANTEE;
}
ULONG cbNewStackGuarantee = 0;
// Passing in a value of 0 means that we're querying, and the value is changed with the new guard region
// size.
if (::SetThreadStackGuarantee(&cbNewStackGuarantee) &&
(cbNewStackGuarantee != 0))
{
return cbNewStackGuarantee;
}
#endif // FEATURE_PAL
return SIZEOF_DEFAULT_STACK_GUARANTEE;
}
#ifndef FEATURE_PAL
//
// MarkPageAsGuard
//
// Given a page base address, try to turn it into a guard page and then requery to determine success.
//
// static // private
BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
DWORD flOldProtect;
ClrVirtualProtect((LPVOID)uGuardPageBase, 1,
(PAGE_READWRITE | PAGE_GUARD), &flOldProtect);
// Intentionally ignore return value -- if it failed, we'll find out below
// and keep moving up the stack until we either succeed or we hit the guard
// region. If we don't succeed before we hit the guard region, we'll end up
// with a fatal error.
// Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed
// because our stack had grown onto the page we were trying to protect by the time we made it into
// VirtualProtect. So try the next page down.
MEMORY_BASIC_INFORMATION meminfo;
SIZE_T dwRes;
dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo));
return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD));
}
/*
* RestoreGuardPage
*
* RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by
* the OS due to a stack overflow exception. This function requires that you know that you have enough stack space
* to restore the guard page, so make sure you know what you're doing when you decide to call this.
*
* Parameters:
* None
*
* Returns:
* Nothing
*/
VOID Thread::RestoreGuardPage()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
// Need a hard SO probe here.
CONTRACT_VIOLATION(SOToleranceViolation);
BOOL bStackGuarded = DetermineIfGuardPagePresent();
// If the guard page is still there, then just return.
if (bStackGuarded)
{
LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n"));
return;
}
UINT_PTR approxStackPointer;
UINT_PTR guardPageBase;
UINT_PTR guardRegionThreshold;
BOOL pageMissing;
if (!bStackGuarded)
{
// The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is
// reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms)
// to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
// now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
approxStackPointer = (UINT_PTR)GetCurrentSP();
guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize();
// OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
// will not be updated, and then OS's check of stack during exception will fail.
if (approxStackPointer >= guardPageBase)
{
guardPageBase -= GetOsPageSize();
}
// If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
// PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
// sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into
// the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below.
guardRegionThreshold = GetLastNormalStackAddress();
pageMissing = TRUE;
while (pageMissing)
{
LOG((LF_EH, LL_INFO10000,
"Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, "
"last normal stack address=0x%p\n",
guardPageBase, approxStackPointer, guardRegionThreshold));
// Make sure we set the guard page above the guard region.
if (guardPageBase < guardRegionThreshold)
{
goto lFatalError;
}
if (MarkPageAsGuard(guardPageBase))
{
// The current GuardPage should be beyond the current SP.
_ASSERTE (guardPageBase < approxStackPointer);
pageMissing = FALSE;
}
else
{
guardPageBase -= GetOsPageSize();
}
}
}
FinishSOWork();
INDEBUG(DebugLogStackMBIs());
return;
lFatalError:
STRESS_LOG2(LF_EH, LL_ALWAYS,
"Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n",
guardRegionThreshold, guardPageBase);
_ASSERTE(!"Too close to the guard page to reset it!");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
}
#endif // !FEATURE_PAL
#endif // #ifndef DACCESS_COMPILE
//
// InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext
// is false, pRD is filled from the current context of the thread. The
// thread's current context is also filled in pctx. If validContext is true,
// pctx should point to a valid context and pRD is filled from that.
//
bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
if (!validContext)
{
if (GetFilterContext()!= NULL)
{
pctx = GetFilterContext();
}
else
{
#ifdef DACCESS_COMPILE
DacNotImpl();
#else
pctx->ContextFlags = CONTEXT_FULL;
_ASSERTE(this != GetThread()); // do not call GetThreadContext on the active thread
BOOL ret = EEGetThreadContext(this, pctx);
if (!ret)
{
SetIP(pctx, 0);
#ifdef _TARGET_X86_
pRD->ControlPC = pctx->Eip;
pRD->PCTAddr = (TADDR)&(pctx->Eip);
#elif defined(_TARGET_AMD64_)
// nothing more to do here, on Win64 setting the IP to 0 is enough.
#elif defined(_TARGET_ARM_)
// nothing more to do here, on Win64 setting the IP to 0 is enough.
#else
PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay");
#endif
return false;
}
#endif // DACCESS_COMPILE
}
}
FillRegDisplay( pRD, pctx );
return true;
}
void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx)
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
::FillRegDisplay(pRD, pctx);
#if defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this);
pRD->_pThread = this;
CheckRegDisplaySP(pRD);
#endif // defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
}
#ifdef DEBUG_REGDISPLAY
void CheckRegDisplaySP (REGDISPLAY *pRD)
{
if (pRD->SP && pRD->_pThread)
{
#ifndef NO_FIXED_STACK_LIMIT
_ASSERTE(PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit());
#endif // NO_FIXED_STACK_LIMIT
_ASSERTE(PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase());
}
}
#endif // DEBUG_REGDISPLAY
// Trip Functions
// ==============
// When a thread reaches a safe place, it will rendezvous back with us, via one of
// the following trip functions:
void CommonTripThread()
{
#ifndef DACCESS_COMPILE
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
Thread *thread = GetThread();
thread->HandleThreadAbort ();
if (thread->CatchAtSafePoint())
{
_ASSERTE(!ThreadStore::HoldingThreadStore(thread));
#ifdef FEATURE_HIJACK
thread->UnhijackThread();
#endif // FEATURE_HIJACK
// Trap
thread->PulseGCMode();
}
#else
DacNotImpl();
#endif // #ifndef DACCESS_COMPILE
}
#ifndef DACCESS_COMPILE
void Thread::SetFilterContext(CONTEXT *pContext)
{
// SetFilterContext is like pushing a Frame onto the Frame chain.
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension.
PRECONDITION(GetThread() == this); // must be on current thread.
} CONTRACTL_END;
m_debuggerFilterContext = pContext;
}
#endif // #ifndef DACCESS_COMPILE
T_CONTEXT *Thread::GetFilterContext(void)
{
LIMITED_METHOD_DAC_CONTRACT;
return m_debuggerFilterContext;
}
#ifndef DACCESS_COMPILE
// @todo - eventually complete remove the CantStop count on the thread and use
// the one in the PreDef block. For now, we increment both our thread counter,
// and the FLS counter. Eventually we can remove our thread counter and only use
// the FLS counter.
void Thread::SetDebugCantStop(bool fCantStop)
{
LIMITED_METHOD_CONTRACT;
if (fCantStop)
{
IncCantStopCount();
m_debuggerCantStop++;
}
else
{
DecCantStopCount();
m_debuggerCantStop--;
}
}
// @todo - remove this, we only read this from oop.
bool Thread::GetDebugCantStop(void)
{
LIMITED_METHOD_CONTRACT;
return m_debuggerCantStop != 0;
}
//-----------------------------------------------------------------------------
// Call w/a wrapper.
// We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff
// for catch-handler found callbacks for the debugger.
//-----------------------------------------------------------------------------
void MakeADCallDebuggerWrapper(
FPAPPDOMAINCALLBACK fpCallback,
CtxTransitionBaseArgs * args,
ContextTransitionFrame* pFrame)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
BYTE * pCatcherStackAddr = (BYTE*) pFrame;
struct Param : NotifyOfCHFFilterWrapperParam
{
FPAPPDOMAINCALLBACK fpCallback;
CtxTransitionBaseArgs *args;
} param;
param.pFrame = pCatcherStackAddr;
param.fpCallback = fpCallback;
param.args = args;
PAL_TRY(Param *, pParam, ¶m)
{
pParam->fpCallback(pParam->args);
}
PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
{
// Should never reach here b/c handler should always continue search.
_ASSERTE(false);
}
PAL_ENDTRY
}
// Invoke a callback in another appdomain.
// Caller should have checked that we're actually transitioning domains here.
void MakeCallWithAppDomainTransition(
ADID TargetDomain,
FPAPPDOMAINCALLBACK fpCallback,
CtxTransitionBaseArgs * args)
{
DEBUG_ASSURE_NO_RETURN_BEGIN(MAKECALL)
Thread* _ctx_trans_pThread = GetThread();
TESTHOOKCALL(EnteringAppDomain((TargetDomain.m_dwId)));
AppDomain* pTargetDomain = SystemDomain::GetAppDomainFromId(TargetDomain, ADV_CURRENTAD);
_ASSERTE(_ctx_trans_pThread != NULL);
_ASSERTE(_ctx_trans_pThread->GetDomain()->GetId()!= TargetDomain);
bool _ctx_trans_fRaiseNeeded = false;
Exception* _ctx_trans_pTargetDomainException=NULL; \
FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame;
ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame;
args->pCtxFrame = _ctx_trans_pFrame;
TESTHOOKCALL(EnteredAppDomain((TargetDomain.m_dwId)));
/* work around unreachable code warning */
EX_TRY
{
// Invoke the callback
if (CORDebuggerAttached())
{
// If a debugger is attached, do it through a wrapper that will sniff for CHF callbacks.
MakeADCallDebuggerWrapper(fpCallback, args, GET_CTX_TRANSITION_FRAME());
}
else
{
// If no debugger is attached, call directly.
fpCallback(args);
}
}
EX_CATCH
{
LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n",
__FUNCTION__, __FILE__, __LINE__));
_ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION();
_ctx_trans_fRaiseNeeded = true;
}
/* SwallowAllExceptions is fine because we don't get to this point */
/* unless fRaiseNeeded = true or no exception was thrown */
EX_END_CATCH(SwallowAllExceptions);
TESTHOOKCALL(LeavingAppDomain((TargetDomain.m_dwId)));
if (_ctx_trans_fRaiseNeeded)
{
LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n",
__FUNCTION__, __FILE__, __LINE__));
_ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException,_ctx_trans_pFrame);
}
LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",
__FUNCTION__, __FILE__, __LINE__));
#ifdef FEATURE_TESTHOOKS
TESTHOOKCALL(LeftAppDomain(TargetDomain.m_dwId));
#endif
DEBUG_ASSURE_NO_RETURN_END(MAKECALL)
}
void Thread::InitContext()
{
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
// this should only be called when initializing a thread
_ASSERTE(m_pDomain == NULL);
GCX_COOP_NO_THREAD_BROKEN();
m_pDomain = SystemDomain::System()->DefaultDomain();
_ASSERTE(m_pDomain);
m_pDomain->ThreadEnter(this, NULL);
}
void Thread::ClearContext()
{
CONTRACTL {
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
if (!m_pDomain)
return;
m_pDomain->ThreadExit(this, NULL);
// must set exposed context to null first otherwise object verification
// checks will fail AV when m_Context is null
m_pDomain = NULL;
#ifdef FEATURE_COMINTEROP
m_fDisableComObjectEagerCleanup = false;
#endif //FEATURE_COMINTEROP
}
void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, ContextTransitionFrame* pFrame)
{
CONTRACTL
{
THROWS;
WRAPPER(GC_TRIGGERS);
}
CONTRACTL_END;
// pEx is NULL means that the exception is CLRLastThrownObjectException
CLRLastThrownObjectException lastThrown;
Exception* pException = pExOrig ? pExOrig : &lastThrown;
COMPlusThrow(CLRException::GetThrowableFromException(pException));
}
struct FindADCallbackType {
AppDomain *pSearchDomain;
AppDomain *pPrevDomain;
Frame *pFrame;
int count;
enum TargetTransition
{fFirstTransitionInto, fMostRecentTransitionInto}
fTargetTransition;
FindADCallbackType() : pSearchDomain(NULL), pPrevDomain(NULL), pFrame(NULL)
{
LIMITED_METHOD_CONTRACT;
}
};
StackWalkAction StackWalkCallback_FindAD(CrawlFrame* pCF, void* data)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
FindADCallbackType *pData = (FindADCallbackType *)data;
Frame *pFrame = pCF->GetFrame();
if (!pFrame)
return SWA_CONTINUE;
AppDomain *pReturnDomain = pFrame->GetReturnDomain();
if (!pReturnDomain || pReturnDomain == pData->pPrevDomain)
return SWA_CONTINUE;
LOG((LF_APPDOMAIN, LL_INFO100, "StackWalkCallback_FindAD transition frame %8.8x into AD [%d]\n",
pFrame, pReturnDomain->GetId().m_dwId));
if (pData->pPrevDomain == pData->pSearchDomain) {
++pData->count;
// this is a transition into the domain we are unloading, so save it in case it is the first
pData->pFrame = pFrame;
if (pData->fTargetTransition == FindADCallbackType::fMostRecentTransitionInto)
return SWA_ABORT; // only need to find last transition, so bail now
}
pData->pPrevDomain = pReturnDomain;
return SWA_CONTINUE;
}
// This determines if a thread is running in the given domain at any point on the stack
Frame *Thread::IsRunningIn(AppDomain *pDomain, int *count)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
FindADCallbackType fct;
fct.pSearchDomain = pDomain;
if (!fct.pSearchDomain)
return FALSE;
// set prev to current so if are currently running in the target domain,
// we will detect the transition
fct.pPrevDomain = m_pDomain;
fct.fTargetTransition = FindADCallbackType::fMostRecentTransitionInto;
fct.count = 0;
// when this returns, if there is a transition into the AD, it will be in pFirstFrame
StackWalkAction res;
res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
if (count)
*count = fct.count;
return fct.pFrame;
}
// This finds the very first frame on the stack where the thread transitioned into the given domain
Frame *Thread::GetFirstTransitionInto(AppDomain *pDomain, int *count)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
FindADCallbackType fct;
fct.pSearchDomain = pDomain;
// set prev to current so if are currently running in the target domain,
// we will detect the transition
fct.pPrevDomain = m_pDomain;
fct.fTargetTransition = FindADCallbackType::fFirstTransitionInto;
fct.count = 0;
// when this returns, if there is a transition into the AD, it will be in pFirstFrame
StackWalkAction res;
res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
if (count)
*count = fct.count;
return fct.pFrame;
}
BOOL Thread::HaveExtraWorkForFinalizer()
{
LIMITED_METHOD_CONTRACT;
return m_ThreadTasks
|| ThreadpoolMgr::HaveTimerInfosToFlush()
|| ExecutionManager::IsCacheCleanupRequired()
|| Thread::CleanupNeededForFinalizedThread()
|| (m_DetachCount > 0)
|| SystemDomain::System()->RequireAppDomainCleanup()
|| ThreadStore::s_pThreadStore->ShouldTriggerGCForDeadThreads();
}
void Thread::DoExtraWorkForFinalizer()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(GetThread() == this);
_ASSERTE(this == FinalizerThread::GetFinalizerThread());
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (RequiresCoInitialize())
{
SetApartment(AS_InMTA, FALSE);
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
if (RequireSyncBlockCleanup())
{
#ifndef FEATURE_PAL
InteropSyncBlockInfo::FlushStandbyList();
#endif // !FEATURE_PAL
#ifdef FEATURE_COMINTEROP
RCW::FlushStandbyList();
#endif // FEATURE_COMINTEROP
SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks();
}
if (SystemDomain::System()->RequireAppDomainCleanup())
{
SystemDomain::System()->ProcessDelayedUnloadLoaderAllocators();
}
if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread())
{
Thread::CleanupDetachedThreads();
}
if(ExecutionManager::IsCacheCleanupRequired() && GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()>=1)
{
ExecutionManager::ClearCaches();
}
// If there were any TimerInfos waiting to be released, they'll get flushed now
ThreadpoolMgr::FlushQueueOfTimerInfos();
ThreadStore::s_pThreadStore->TriggerGCForDeadThreadsIfNecessary();
}
// HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT
// We have numerous places where we start up a managed thread. This includes several places in the
// ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our
// base exception handling behavior is consistent across those places. The resulting code is convoluted,
// but it's better than the prior situation of each thread being on a different plan.
// We need Middle & Outer methods for the usual problem of combining C++ & SEH.
/* The effect of all this is that we get:
Base of thread -- OS unhandled exception filter that we hook
SEH handler from DispatchOuter
C++ handler from DispatchMiddle
And if there is an AppDomain transition before we call back to user code, we additionally get:
AppDomain transition -- contains its own handlers to terminate the first pass
and marshal the exception.
SEH handler from DispatchOuter
C++ handler from DispatchMiddle
Regardless of whether or not there is an AppDomain transition, we then have:
User code that obviously can throw.
So if we don't have an AD transition, or we take a fault before we successfully transition the
AppDomain, then the base-most DispatchOuter/Middle will deal with the exception. This may
involve swallowing exceptions or it may involve Watson & debugger attach. It will always
involve notifications to any AppDomain.UnhandledException event listeners.
But if we did transition the AppDomain, then any Watson, debugger attach and UnhandledException
events will occur in that AppDomain in the initial first pass. So we get a good debugging
experience and we get notifications to the host that show which AppDomain is allowing exceptions
to go unhandled (so perhaps it can be unloaded or otherwise dealt with).
The trick is that if the exception goes unhandled at the process level, we would normally try
to fire AppDomain events and display the faulting exception on the console from two more
places. These are the base-most DispatchOuter/Middle pair and the hook of the OS unhandled
exception handler at the base of the thread.
This is redundant and messy. (There's no concern with getting a 2nd Watson because we only
do one of these per process anyway). The solution for the base-most DispatchOuter/Middle is
to use the ManagedThreadCallState.flags to control whether the exception has already been
dealt with or not. These flags cause the ThreadBaseRedirectingFilter to either do normal
"base of the thread" exception handling, or to ignore the exception because it has already
been reported in the AppDomain we transitioned to.
But turning off the reporting in the OS unhandled exception filter is harder. We don't want
to flip a bit on the Thread to disable this, unless we can be sure we are only disabling
something we already reported, and that this thread will never recover from that situation and
start executing code again. Here's the normal nightmare scenario with SEH:
1) exception of type A is thrown
2) All the filters in the 1st pass say they don't want an A
3) The exception gets all the way out and is considered unhandled. We report this "fact".
4) Imagine we then set a bit that says this thread shouldn't report unhandled exceptions.
5) The 2nd pass starts.
6) Inside a finally, someone throws an exception of type B.
7) A new 1st pass starts from the point of the throw, with a type B.
8) Now a filter says "Yes, I will swallow exception B."
9) We no longer have an unhandled exception, and execution continues merrily.
This is an unavoidable consequence of the 2-pass model. If you report unhandled exceptions
in the 1st pass (for good debugging), you might find that this was premature and you don't
have an unhandled exception when you get to the 2nd pass.
But it would not be optimal if in step 4 we set a bit that says we should suppress normal
notifications and reporting on this thread, believing that the process will terminate.
The solution is to recognize that the base OS unhandled exception filter runs in two modes.
In the first mode, it operates as today and serves as our backstop. In the second mode
it is fully redundant with the handlers pushed after the AppDomain transition, which are
completely containing the exception to the AD that it occurred in (for purposes of reporting).
So we just need a flag on the thread that says whether or not that set of handlers are pushed
and functioning. That flag enables / disables the base exception reporting and is called
TSNC_AppDomainContainUnhandled
*/
enum ManagedThreadCallStateFlags
{
MTCSF_NormalBase,
MTCSF_ContainToAppDomain,
MTCSF_SuppressDuplicate,
};
struct ManagedThreadCallState
{
ADID pAppDomainId;
AppDomain* pUnsafeAppDomain;
BOOL bDomainIsAsID;
ADCallBackFcnType pTarget;
LPVOID args;
UnhandledExceptionLocation filterType;
ManagedThreadCallStateFlags flags;
BOOL IsAppDomainEqual(AppDomain* pApp)
{
LIMITED_METHOD_CONTRACT;
return bDomainIsAsID?(pApp->GetId()==pAppDomainId):(pUnsafeAppDomain==pApp);
}
ManagedThreadCallState(ADID AppDomainId,ADCallBackFcnType Target,LPVOID Args,
UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
pAppDomainId(AppDomainId),
pUnsafeAppDomain(NULL),
bDomainIsAsID(TRUE),
pTarget(Target),
args(Args),
filterType(FilterType),
flags(Flags)
{
LIMITED_METHOD_CONTRACT;
};
protected:
ManagedThreadCallState(AppDomain* AppDomain,ADCallBackFcnType Target,LPVOID Args,
UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
pAppDomainId(ADID(0)),
pUnsafeAppDomain(AppDomain),
bDomainIsAsID(FALSE),
pTarget(Target),
args(Args),
filterType(FilterType),
flags(Flags)
{
LIMITED_METHOD_CONTRACT;
};
void InitForFinalizer(AppDomain* AppDomain,ADCallBackFcnType Target,LPVOID Args)
{
LIMITED_METHOD_CONTRACT;
filterType=FinalizerThread;
pUnsafeAppDomain=AppDomain;
pTarget=Target;
args=Args;
};
friend void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget,
UnhandledExceptionLocation filterType);
friend void ManagedThreadBase::FinalizerAppDomain(AppDomain* pAppDomain,
ADCallBackFcnType pTarget,
LPVOID args,
ManagedThreadCallState *pTurnAround);
};
// The following static helpers are outside of the ManagedThreadBase struct because I
// don't want to change threads.h whenever I change the mechanism for how unhandled
// exceptions works. The ManagedThreadBase struct is for the public exposure of the
// API only.
static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState);
static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Go ahead and dispatch the call.
(*pCallState->pTarget) (pCallState->args);
}
static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState)
{
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_SO_TOLERANT;
// We have the probe outside the EX_TRY below since corresponding EX_CATCH
// also invokes SO_INTOLERANT code.
BEGIN_SO_INTOLERANT_CODE(GetThread());
EX_TRY_CPP_ONLY
{
// During an unwind, we have some cleanup:
//
// 1) We should no longer suppress any unhandled exception reporting at the base
// of the thread, because any handler that contained the exception to the AppDomain
// where it occurred is now being removed from the stack.
//
// 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause
// because at this point we are in the 2nd phase and the stack has been popped. Any
// stack crawling from another thread will see a frame chain in a popped region of stack.
// Nor can we pop it in a filter, since this would destroy all the stack-walking information
// we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens
// during the 2nd pass but before the stack is actually popped.
class Cleanup
{
Frame *m_pEntryFrame;
Thread *m_pThread;
public:
Cleanup(Thread* pThread)
{
m_pThread = pThread;
m_pEntryFrame = pThread->m_pFrame;
}
~Cleanup()
{
GCX_COOP();
m_pThread->SetFrame(m_pEntryFrame);
m_pThread->ResetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
}
};
Cleanup cleanup(GetThread());
ManagedThreadBase_DispatchInner(pCallState);
}
EX_CATCH_CPP_ONLY
{
GCX_COOP();
Exception *pException = GET_EXCEPTION();
// RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient.
// For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's
// behavior (swallowing all unhandled exception), then swallow all unhandled exception.
//
if (SwallowUnhandledExceptions() ||
IsExceptionOfType(kThreadAbortException, pException))
{
// Do nothing to swallow the exception
}
else
{
// Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out.
// An example is when Thread1 in Default AppDomain creates AppDomain2, enters it, creates
// another thread T2 and T2 throws OOM exception (that goes unhandled). At the transition
// boundary, END_DOMAIN_TRANSITION will catch it and invoke RaiseCrossContextException
// that will rethrow the OOM as a C++ exception.
//
// Without unwind_and_continue_handler below, the exception will fly up the stack to
// this point, where it will be rethrown and thus leak out.
INSTALL_UNWIND_AND_CONTINUE_HANDLER;
EX_RETHROW;
UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
}
}
EX_END_CATCH(SwallowAllExceptions);
END_SO_INTOLERANT_CODE;
}
/*
typedef struct Param
{
ManagedThreadCallState * m_pCallState;
Frame * m_pFrame;
Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {}
} TryParam;
*/
typedef struct Param: public NotifyOfCHFFilterWrapperParam
{
ManagedThreadCallState * m_pCallState;
Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {}
} TryParam;
// Dispatch to the appropriate filter, based on the active CallState.
static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
LONG (*ptrFilter) (PEXCEPTION_POINTERS, PVOID);
TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam);
ManagedThreadCallState * _pCallState = pRealParam->m_pCallState;
ManagedThreadCallStateFlags flags = _pCallState->flags;
if (flags == MTCSF_SuppressDuplicate)
{
LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
GetThread()->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
return EXCEPTION_CONTINUE_SEARCH;
}
LONG ret = -1;
BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
// This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH,
// it will trigger unhandled exception processing.
ptrFilter = ThreadBaseExceptionAppDomainFilter;
// WARNING - ptrFilter may not return
// This occurs when the debugger decides to intercept an exception and catch it in a frame closer
// to the leaf than the one executing this filter
ret = (*ptrFilter) (pExceptionInfo, _pCallState);
// Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to
// unhandled exceptions, all of those cases have already notified the debugger of an unhandled
// exception which prevents a second notification indicating the exception was caught
if (ret == EXCEPTION_EXECUTE_HANDLER)
{
// WARNING - NotifyOfCHFFilterWrapper may not return
// This occurs when the debugger decides to intercept an exception and catch it in a frame closer
// to the leaf than the one executing this filter
NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam);
}
// If we are containing unhandled exceptions to the AppDomain we transitioned into, and the
// exception is coming out, then this exception is going unhandled. We have already done
// Watson and managed events, so suppress all filters below us. Otherwise we are swallowing
// it and returning out of the AppDomain.
if (flags == MTCSF_ContainToAppDomain)
{
if(ret == EXCEPTION_CONTINUE_SEARCH)
{
_pCallState->flags = MTCSF_SuppressDuplicate;
}
else if(ret == EXCEPTION_EXECUTE_HANDLER)
{
_pCallState->flags = MTCSF_NormalBase;
}
// else if( EXCEPTION_CONTINUE_EXECUTION ) do nothing
}
// Get the reference to the current thread..
Thread *pCurThread = GetThread();
_ASSERTE(pCurThread);
if (flags == MTCSF_ContainToAppDomain)
{
if (((ManagedThreadCallState *) _pCallState)->flags == MTCSF_SuppressDuplicate)
{
// Set the flag that we have done unhandled exception processing
// for this managed thread that started in a non-default domain
LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
pCurThread->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
}
}
else
{
_ASSERTE(flags == MTCSF_NormalBase);
LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n"));
//
// In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads,
// ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice.
//
// The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes,
// we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing.
//
// On the desktop CLR, after the first attempt, we will return back to the OS with EXCEPTION_CONTINUE_SEARCH as unhandled exceptions cannot be swallowed. When the exception reaches
// the native threadbase in the OS kernel, the OS will invoke the UEF registered for the process. This can result in CLR's UEF (COMUnhandledExceptionFilter)
// getting invoked that will attempt to perform UE processing yet again for the same thread. To avoid this duplicate processing, we check the presence of
// TSNC_ProcessedUnhandledException state on the thread and if present, we simply return back to the OS.
//
// On desktop CoreCLR, we will only do UE processing once (at the managed threadbase) since no thread is created in default domain - all are created and executed in non-default domain.
// As a result, we go via completely different codepath that prevents duplication of UE processing from happening, especially since desktop CoreCLR is targetted for SL and SL
// always passes us a flag to swallow unhandled exceptions.
//
// On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE
// processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions,
// CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack
// to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set
// TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception.
//
// If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time.
// The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and
// will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not.
//
if (ret != EXCEPTION_EXECUTE_HANDLER)
{
// Since we have already done unhandled exception processing for it, we dont want it
// to happen again if our UEF gets invoked upon returning back to the OS.
//
// Set the flag to indicate so.
pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
}
}
END_SO_INTOLERANT_CODE;
return ret;
}
static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState)
{
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_COOPERATIVE;
// HasStarted() must have already been performed by our caller
_ASSERTE(GetThread() != NULL);
Thread *pThread = GetThread();
#ifdef WIN64EXCEPTIONS
Frame *pFrame = pThread->m_pFrame;
#endif // WIN64EXCEPTIONS
// The sole purpose of having this frame is to tell the debugger that we have a catch handler here
// which may swallow managed exceptions. The debugger needs this in order to send a
// CatchHandlerFound (CHF) notification.
FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
TryParam param(pCallState);
param.pFrame = &catchFrame;
struct TryArgs
{
TryParam *pTryParam;
Thread *pThread;
BOOL *pfHadException;
#ifdef WIN64EXCEPTIONS
Frame *pFrame;
#endif // WIN64EXCEPTIONS
}args;
args.pTryParam = ¶m;
args.pThread = pThread;
BOOL fHadException = TRUE;
args.pfHadException = &fHadException;
#ifdef WIN64EXCEPTIONS
args.pFrame = pFrame;
#endif // WIN64EXCEPTIONS
PAL_TRY(TryArgs *, pArgs, &args)
{
PAL_TRY(TryParam *, pParam, pArgs->pTryParam)
{
ManagedThreadBase_DispatchMiddle(pParam->m_pCallState);
}
PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter)
{
// Note: one of our C++ exceptions will never reach this filter because they're always caught by
// the EX_CATCH in ManagedThreadBase_DispatchMiddle().
//
// If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception.
// For eHostDeterminedPolicy, we will swallow all the managed exception.
#ifdef WIN64EXCEPTIONS
// this must be done after the second pass has run, it does not
// reference anything on the stack, so it is safe to run in an
// SEH __except clause as well as a C++ catch clause.
ExceptionTracker::PopTrackers(pArgs->pFrame);
#endif // WIN64EXCEPTIONS
// Fortunately, ThreadAbortExceptions are always
if (pArgs->pThread->IsAbortRequested())
pArgs->pThread->EEResetAbort(Thread::TAR_Thread);
}
PAL_ENDTRY;
*(pArgs->pfHadException) = FALSE;
}
PAL_FINALLY
{
catchFrame.Pop();
}
PAL_ENDTRY;
}
// For the implementation, there are three variants of work possible:
// 1. Establish the base of a managed thread, and switch to the correct AppDomain.
static void ManagedThreadBase_FullTransitionWithAD(ADID pAppDomain,
ADCallBackFcnType pTarget,
LPVOID args,
UnhandledExceptionLocation filterType)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
ManagedThreadCallState CallState(pAppDomain, pTarget, args, filterType, MTCSF_NormalBase);
ManagedThreadBase_DispatchOuter(&CallState);
}
// 2. Establish the base of a managed thread, but the AppDomain transition must be
// deferred until later.
void ManagedThreadBase_NoADTransition(ADCallBackFcnType pTarget,
UnhandledExceptionLocation filterType)
{
CONTRACTL
{
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
AppDomain *pAppDomain = GetAppDomain();
ManagedThreadCallState CallState(pAppDomain, pTarget, NULL, filterType, MTCSF_NormalBase);
// self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain
// transition.
CallState.args = &CallState;
ManagedThreadBase_DispatchOuter(&CallState);
}
// And here are the various exposed entrypoints for base thread behavior
// The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker
void ManagedThreadBase::KickOff(ADID pAppDomain, ADCallBackFcnType pTarget, LPVOID args)
{
WRAPPER_NO_CONTRACT;
ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ManagedThread);
}
// The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool
void ManagedThreadBase::ThreadPool(ADID pAppDomain, ADCallBackFcnType pTarget, LPVOID args)
{
WRAPPER_NO_CONTRACT;
ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ThreadPoolThread);
}
// The Finalizer thread establishes exception handling at its base, but defers all the AppDomain
// transitions.
void ManagedThreadBase::FinalizerBase(ADCallBackFcnType pTarget)
{
WRAPPER_NO_CONTRACT;
ManagedThreadBase_NoADTransition(pTarget, FinalizerThread);
}
void ManagedThreadBase::FinalizerAppDomain(AppDomain *pAppDomain,
ADCallBackFcnType pTarget,
LPVOID args,
ManagedThreadCallState *pTurnAround)
{
WRAPPER_NO_CONTRACT;
pTurnAround->InitForFinalizer(pAppDomain,pTarget,args);
_ASSERTE(pTurnAround->flags == MTCSF_NormalBase);
ManagedThreadBase_DispatchInner(pTurnAround);
}
//+----------------------------------------------------------------------------
//
// Method: Thread::GetStaticFieldAddress private
//
// Synopsis: Get the address of the field relative to the current thread.
// If an address has not been assigned yet then create one.
//
//+----------------------------------------------------------------------------
LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
_ASSERTE(pFD != NULL);
_ASSERTE(pFD->IsThreadStatic());
_ASSERTE(!pFD->IsRVA());
// for static field the MethodTable is exact even for generic classes
MethodTable *pMT = pFD->GetEnclosingMethodTable();
// We need to make sure that the class has been allocated, however
// we should not call the class constructor
ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT);
PTR_BYTE base = NULL;
if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
{
base = pMT->GetGCThreadStaticsBasePointer();
}
else
{
base = pMT->GetNonGCThreadStaticsBasePointer();
}
_ASSERTE(base != NULL);
DWORD offset = pFD->GetOffset();
_ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset);
// For value classes, the handle points at an OBJECTREF
// which holds the boxed value class, so derefernce and unbox.
if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
{
OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result);
result = obj->GetData();
}
return result;
}
#endif // #ifndef DACCESS_COMPILE
//+----------------------------------------------------------------------------
//
// Method: Thread::GetStaticFieldAddrNoCreate private
//
// Synopsis: Get the address of the field relative to the thread.
// If an address has not been assigned, return NULL.
// No creating is allowed.
//
//+----------------------------------------------------------------------------
TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
CONTRACTL_END;
_ASSERTE(pFD != NULL);
_ASSERTE(pFD->IsThreadStatic());
// for static field the MethodTable is exact even for generic classes
PTR_MethodTable pMT = pFD->GetEnclosingMethodTable();
PTR_BYTE base = NULL;
if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
{
base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this));
}
else
{
base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this));
}
if (base == NULL)
return NULL;
DWORD offset = pFD->GetOffset();
_ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
TADDR result = dac_cast<TADDR>(base) + (DWORD)offset;
// For value classes, the handle points at an OBJECTREF
// which holds the boxed value class, so derefernce and unbox.
if (pFD->IsByValue())
{
_ASSERTE(result != NULL);
PTR_Object obj = *PTR_UNCHECKED_OBJECTREF(result);
if (obj == NULL)
return NULL;
result = dac_cast<TADDR>(obj->GetData());
}
return result;
}
#ifndef DACCESS_COMPILE
//
// NotifyFrameChainOfExceptionUnwind
// -----------------------------------------------------------
// This method will walk the Frame chain from pStartFrame to
// the last frame that is below pvLimitSP and will call each
// frame's ExceptionUnwind method. It will return the first
// Frame that is above pvLimitSP.
//
Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP)
{
CONTRACTL
{
NOTHROW;
DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pStartFrame));
PRECONDITION(CheckPointer(pvLimitSP));
}
CONTRACTL_END;
Frame * pFrame;
#ifdef _DEBUG
//
// assert that the specified Thread's Frame chain actually
// contains the start Frame.
//
pFrame = m_pFrame;
while ((pFrame != pStartFrame) &&
(pFrame != FRAME_TOP))
{
pFrame = pFrame->Next();
}
CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!");
#endif // _DEBUG
pFrame = pStartFrame;
while (pFrame < pvLimitSP)
{
CONSISTENCY_CHECK(pFrame != PTR_NULL);
CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP()));
pFrame->ExceptionUnwind();
pFrame = pFrame->Next();
}
// return the frame after the last one notified of the unwind
return pFrame;
}
//+----------------------------------------------------------------------------
//
// Method: Thread::DeleteThreadStaticData private
//
// Synopsis: Delete the static data for each appdomain that this thread
// visited.
//
//
//+----------------------------------------------------------------------------
void Thread::DeleteThreadStaticData()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
m_ThreadLocalBlock.FreeTable();
}
//+----------------------------------------------------------------------------
//
// Method: Thread::DeleteThreadStaticData public
//
// Synopsis: Delete the static data for the given module. This is called
// when the AssemblyLoadContext unloads.
//
//
//+----------------------------------------------------------------------------
void Thread::DeleteThreadStaticData(ModuleIndex index)
{
m_ThreadLocalBlock.FreeTLM(index.m_dwIndex, FALSE /* isThreadShuttingDown */);
}
void Thread::InitCultureAccessors()
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
}
CONTRACTL_END;
OBJECTREF *pCurrentCulture = NULL;
Thread *pThread = GetThread();
GCX_COOP();
if (managedThreadCurrentCulture == NULL) {
managedThreadCurrentCulture = MscorlibBinder::GetField(FIELD__THREAD__CULTURE);
pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentCulture);
}
if (managedThreadCurrentUICulture == NULL) {
managedThreadCurrentUICulture = MscorlibBinder::GetField(FIELD__THREAD__UI_CULTURE);
pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentUICulture);
}
}
ARG_SLOT Thread::CallPropertyGet(BinderMethodID id, OBJECTREF pObject)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (!pObject) {
return 0;
}
ARG_SLOT retVal;
GCPROTECT_BEGIN(pObject);
MethodDescCallSite propGet(id, &pObject);
// Set up the Stack.
ARG_SLOT pNewArgs = ObjToArgSlot(pObject);
// Make the actual call.
retVal = propGet.Call_RetArgSlot(&pNewArgs);
GCPROTECT_END();
return retVal;
}
ARG_SLOT Thread::CallPropertySet(BinderMethodID id, OBJECTREF pObject, OBJECTREF pValue)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
if (!pObject) {
return 0;
}
ARG_SLOT retVal;
GCPROTECT_BEGIN(pObject);
GCPROTECT_BEGIN(pValue);
MethodDescCallSite propSet(id, &pObject);
// Set up the Stack.
ARG_SLOT pNewArgs[] = {
ObjToArgSlot(pObject),
ObjToArgSlot(pValue)
};
// Make the actual call.
retVal = propSet.Call_RetArgSlot(pNewArgs);
GCPROTECT_END();
GCPROTECT_END();
return retVal;
}
OBJECTREF Thread::GetCulture(BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
FieldDesc * pFD;
_ASSERTE(PreemptiveGCDisabled());
// This is the case when we're building mscorlib and haven't yet created
// the system assembly.
if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
return NULL;
}
// Get the actual thread culture.
OBJECTREF pCurThreadObject = GetExposedObject();
_ASSERTE(pCurThreadObject!=NULL);
THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
OBJECTREF pCurrentCulture = bUICulture ? pThreadBase->GetCurrentUICulture() : pThreadBase->GetCurrentUserCulture();
if (pCurrentCulture==NULL) {
GCPROTECT_BEGIN(pThreadBase);
if (bUICulture) {
// Call the Getter for the CurrentUICulture. This will cause it to populate the field.
ARG_SLOT retVal = CallPropertyGet(METHOD__THREAD__GET_UI_CULTURE,
(OBJECTREF)pThreadBase);
pCurrentCulture = ArgSlotToObj(retVal);
} else {
//This is faster than calling the property, because this is what the call does anyway.
pFD = MscorlibBinder::GetField(FIELD__CULTURE_INFO__CURRENT_CULTURE);
_ASSERTE(pFD);
pFD->CheckRunClassInitThrowing();
pCurrentCulture = pFD->GetStaticOBJECTREF();
_ASSERTE(pCurrentCulture!=NULL);
}
GCPROTECT_END();
}
return pCurrentCulture;
}
// copy culture name into szBuffer and return length
int Thread::GetParentCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// This is the case when we're building mscorlib and haven't yet created
// the system assembly.
if (SystemDomain::System()->SystemAssembly()==NULL) {
const WCHAR *tempName = W("en");
INT32 tempLength = (INT32)wcslen(tempName);
_ASSERTE(length>=tempLength);
memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
return tempLength;
}
ARG_SLOT Result = 0;
INT32 retVal=0;
WCHAR *buffer=NULL;
INT32 bufferLength=0;
STRINGREF cultureName = NULL;
GCX_COOP();
struct _gc {
OBJECTREF pCurrentCulture;
OBJECTREF pParentCulture;
} gc;
ZeroMemory(&gc, sizeof(gc));
GCPROTECT_BEGIN(gc);
gc.pCurrentCulture = GetCulture(bUICulture);
if (gc.pCurrentCulture != NULL) {
Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_PARENT, gc.pCurrentCulture);
}
if (Result) {
gc.pParentCulture = (OBJECTREF)(ArgSlotToObj(Result));
if (gc.pParentCulture != NULL)
{
Result = 0;
Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, gc.pParentCulture);
}
}
GCPROTECT_END();
if (Result==0) {
return 0;
}
// Extract the data out of the String.
cultureName = (STRINGREF)(ArgSlotToObj(Result));
cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
if (bufferLength<length) {
memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
szBuffer[bufferLength]=0;
retVal = bufferLength;
}
return retVal;
}
// copy culture name into szBuffer and return length
int Thread::GetCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// This is the case when we're building mscorlib and haven't yet created
// the system assembly.
if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
const WCHAR *tempName = W("en-US");
INT32 tempLength = (INT32)wcslen(tempName);
_ASSERTE(length>=tempLength);
memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
return tempLength;
}
ARG_SLOT Result = 0;
INT32 retVal=0;
WCHAR *buffer=NULL;
INT32 bufferLength=0;
STRINGREF cultureName = NULL;
GCX_COOP ();
OBJECTREF pCurrentCulture = NULL;
GCPROTECT_BEGIN(pCurrentCulture)
{
pCurrentCulture = GetCulture(bUICulture);
if (pCurrentCulture != NULL)
Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, pCurrentCulture);
}
GCPROTECT_END();
if (Result==0) {
return 0;
}
// Extract the data out of the String.
cultureName = (STRINGREF)(ArgSlotToObj(Result));
cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
if (bufferLength<length) {
memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
szBuffer[bufferLength]=0;
retVal = bufferLength;
}
return retVal;
}
LCID GetThreadCultureIdNoThrow(Thread *pThread, BOOL bUICulture)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
LCID Result = LCID(-1);
EX_TRY
{
Result = pThread->GetCultureId(bUICulture);
}
EX_CATCH
{
}
EX_END_CATCH (SwallowAllExceptions);
return (INT32)Result;
}
// Return a language identifier.
LCID Thread::GetCultureId(BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
// This is the case when we're building mscorlib and haven't yet created
// the system assembly.
if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
return (LCID) -1;
}
LCID Result = (LCID) -1;
#ifdef FEATURE_USE_LCID
GCX_COOP();
OBJECTREF pCurrentCulture = NULL;
GCPROTECT_BEGIN(pCurrentCulture)
{
pCurrentCulture = GetCulture(bUICulture);
if (pCurrentCulture != NULL)
Result = (LCID)CallPropertyGet(METHOD__CULTURE_INFO__GET_ID, pCurrentCulture);
}
GCPROTECT_END();
#endif
return Result;
}
void Thread::SetCultureId(LCID lcid, BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
GCX_COOP();
OBJECTREF CultureObj = NULL;
GCPROTECT_BEGIN(CultureObj)
{
// Convert the LCID into a CultureInfo.
GetCultureInfoForLCID(lcid, &CultureObj);
// Set the newly created culture as the thread's culture.
SetCulture(&CultureObj, bUICulture);
}
GCPROTECT_END();
}
void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture)
{
CONTRACTL {
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// Retrieve the exposed thread object.
OBJECTREF pCurThreadObject = GetExposedObject();
_ASSERTE(pCurThreadObject!=NULL);
// Set the culture property on the thread.
THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
CallPropertySet(bUICulture
? METHOD__THREAD__SET_UI_CULTURE
: METHOD__THREAD__SET_CULTURE,
(OBJECTREF)pThreadBase, *CultureObj);
}
void Thread::SetHasPromotedBytes ()
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
m_fPromoted = TRUE;
_ASSERTE(GCHeapUtilities::IsGCInProgress() && IsGCThread ());
if (!m_fPreemptiveGCDisabled)
{
if (FRAME_TOP == GetFrame())
m_fPromoted = FALSE;
}
}
BOOL ThreadStore::HoldingThreadStore(Thread *pThread)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
if (pThread)
{
return (pThread == s_pThreadStore->m_HoldingThread);
}
else
{
return (s_pThreadStore->m_holderthreadid.IsCurrentThread());
}
}
LONG Thread::GetTotalThreadPoolCompletionCount()
{
CONTRACTL
{
NOTHROW;
MODE_ANY;
}
CONTRACTL_END;
LONG total;
if (g_fEEStarted) //make sure we actually have a thread store
{
// make sure up-to-date thread-local counts are visible to us
::FlushProcessWriteBuffers();
// enumerate all threads, summing their local counts.
ThreadStoreLockHolder tsl;
total = s_threadPoolCompletionCountOverflow.Load();
Thread *pThread = NULL;
while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
{
total += pThread->m_threadPoolCompletionCount;
}
}
else
{
total = s_threadPoolCompletionCountOverflow.Load();
}
return total;
}
INT32 Thread::ResetManagedThreadObject(INT32 nPriority)
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
GCX_COOP();
return ResetManagedThreadObjectInCoopMode(nPriority);
}
INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
if (pObject != NULL)
{
pObject->ResetCulture();
pObject->ResetName();
nPriority = pObject->GetPriority();
}
return nPriority;
}
void Thread::FullResetThread()
{
CONTRACTL {
NOTHROW;
GC_TRIGGERS;
}
CONTRACTL_END;
GCX_COOP();
// We need to put this thread in COOPERATIVE GC first to solve race between AppDomain::Unload
// and Thread::Reset. AppDomain::Unload does a full GC to collect all roots in one AppDomain.
// ThreadStaticData used to be coupled with a managed array of objects in the managed Thread
// object, however this is no longer the case.
// TODO: Do we still need to put this thread into COOP mode?
GCX_FORBID();
DeleteThreadStaticData();
m_alloc_context.alloc_bytes = 0;
m_fPromoted = FALSE;
}
BOOL Thread::IsRealThreadPoolResetNeeded()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
}
CONTRACTL_END;
if(!IsBackground())
return TRUE;
THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
if(pObject != NULL)
{
INT32 nPriority = pObject->GetPriority();
if(nPriority != ThreadNative::PRIORITY_NORMAL)
return TRUE;
}
return FALSE;
}
void Thread::InternalReset(BOOL fFull, BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort)
{
CONTRACTL {
NOTHROW;
if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;SO_INTOLERANT;} else {GC_NOTRIGGER;SO_TOLERANT;}
}
CONTRACTL_END;
_ASSERTE (this == GetThread());
FinishSOWork();
INT32 nPriority = ThreadNative::PRIORITY_NORMAL;
if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
{
nPriority = ThreadNative::PRIORITY_HIGHEST;
}
if(fThreadObjectResetNeeded)
{
nPriority = ResetManagedThreadObject(nPriority);
}
if (fFull)
{
FullResetThread();
}
//m_MarshalAlloc.Collapse(NULL);
if (fResetAbort && IsAbortRequested()) {
UnmarkThreadForAbort(TAR_ALL);
}
if (fResetAbort && IsAborted())
ClearAborted();
if (IsThreadPoolThread() && fThreadObjectResetNeeded)
{
SetBackground(TRUE);
if (nPriority != ThreadNative::PRIORITY_NORMAL)
{
SetThreadPriority(THREAD_PRIORITY_NORMAL);
}
}
else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
{
SetBackground(TRUE);
if (nPriority != ThreadNative::PRIORITY_HIGHEST)
{
SetThreadPriority(THREAD_PRIORITY_HIGHEST);
}
}
}
HRESULT Thread::Abort ()
{
CONTRACTL
{
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
SO_TOLERANT;
}
CONTRACTL_END;
BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW;);
EX_TRY
{
UserAbort(TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Host);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
END_SO_INTOLERANT_CODE;
return S_OK;
}
HRESULT Thread::RudeAbort()
{
CONTRACTL
{
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
SO_TOLERANT;
}
CONTRACTL_END;
BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
EX_TRY
{
UserAbort(TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_Host);
}
EX_CATCH
{
}
EX_END_CATCH(SwallowAllExceptions);
END_SO_INTOLERANT_CODE;
return S_OK;
}
HRESULT Thread::NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
SO_TOLERANT;
}
CONTRACTL_END;
*pbNeedsPriorityScheduling = (m_fPreemptiveGCDisabled ||
(g_fEEStarted && this == FinalizerThread::GetFinalizerThread()));
return S_OK;
}
HRESULT Thread::LocksHeld(SIZE_T *pLockCount)
{
LIMITED_METHOD_CONTRACT;
*pLockCount = m_dwLockCount;
return S_OK;
}
HRESULT Thread::BeginPreventAsyncAbort()
{
WRAPPER_NO_CONTRACT;
#ifdef _DEBUG
int count =
#endif
FastInterlockIncrement((LONG*)&m_PreventAbort);
#ifdef _DEBUG
ASSERT(count > 0);
FastInterlockIncrement((LONG*)&m_dwDisableAbortCheckCount);
#endif
return S_OK;
}
HRESULT Thread::EndPreventAsyncAbort()
{
WRAPPER_NO_CONTRACT;
#ifdef _DEBUG
int count =
#endif
FastInterlockDecrement((LONG*)&m_PreventAbort);
#ifdef _DEBUG
ASSERT(count >= 0);
FastInterlockDecrement((LONG*)&m_dwDisableAbortCheckCount);
#endif
return S_OK;
}
ULONG Thread::AddRef()
{
WRAPPER_NO_CONTRACT;
_ASSERTE(m_ExternalRefCount > 0);
_ASSERTE (m_UnmanagedRefCount != (DWORD) -1);
ULONG ref = FastInterlockIncrement((LONG*)&m_UnmanagedRefCount);
return ref;
}
ULONG Thread::Release()
{
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC_HOST_ONLY;
_ASSERTE (m_ExternalRefCount > 0);
_ASSERTE (m_UnmanagedRefCount > 0);
ULONG ref = FastInterlockDecrement((LONG*)&m_UnmanagedRefCount);
return ref;
}
HRESULT Thread::QueryInterface(REFIID riid, void **ppUnk)
{
LIMITED_METHOD_CONTRACT;
return E_NOINTERFACE;
}
void Thread::SetupThreadForHost()
{
CONTRACTL
{
THROWS;
GC_TRIGGERS;
SO_TOLERANT;
}
CONTRACTL_END;
_ASSERTE (GetThread() == this);
CONTRACT_VIOLATION(SOToleranceViolation);
}
ETaskType GetCurrentTaskType()
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
ETaskType TaskType = TT_UNKNOWN;
size_t type = (size_t)ClrFlsGetValue (TlsIdx_ThreadType);
if (type & ThreadType_DbgHelper)
{
TaskType = TT_DEBUGGERHELPER;
}
else if (type & ThreadType_GC)
{
TaskType = TT_GC;
}
else if (type & ThreadType_Finalizer)
{
TaskType = TT_FINALIZER;
}
else if (type & ThreadType_Timer)
{
TaskType = TT_THREADPOOL_TIMER;
}
else if (type & ThreadType_Gate)
{
TaskType = TT_THREADPOOL_GATE;
}
else if (type & ThreadType_Wait)
{
TaskType = TT_THREADPOOL_WAIT;
}
else if (type & ThreadType_Threadpool_IOCompletion)
{
TaskType = TT_THREADPOOL_IOCOMPLETION;
}
else if (type & ThreadType_Threadpool_Worker)
{
TaskType = TT_THREADPOOL_WORKER;
}
else
{
Thread *pThread = GetThread();
if (pThread)
{
TaskType = TT_USER;
}
}
return TaskType;
}
DeadlockAwareLock::DeadlockAwareLock(const char *description)
: m_pHoldingThread(NULL)
#ifdef _DEBUG
, m_description(description)
#endif
{
LIMITED_METHOD_CONTRACT;
}
DeadlockAwareLock::~DeadlockAwareLock()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
}
CONTRACTL_END;
// Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
CrstHolder lock(&g_DeadlockAwareCrst);
}
CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread)
{
CONTRACTL
{
PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread());
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
// Note that this check is recursive in order to produce descriptive check failure messages.
Thread *pHoldingThread = m_pHoldingThread.Load();
if (pThread == pHoldingThread)
{
CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread));
}
if (pHoldingThread != NULL)
{
DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load();
if (pBlockingLock != NULL)
{
CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread),
("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread));
}
}
CHECK_OK;
}
BOOL DeadlockAwareLock::CanEnterLock()
{
Thread * pThread = GetThread();
CONSISTENCY_CHECK_MSG(pThread != NULL,
"Cannot do deadlock detection on non-EE thread");
CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
"Cannot block on two locks at once");
{
CrstHolder lock(&g_DeadlockAwareCrst);
// Look for deadlocks
DeadlockAwareLock *pLock = this;
while (TRUE)
{
Thread * holdingThread = pLock->m_pHoldingThread;
if (holdingThread == pThread)
{
// Deadlock!
return FALSE;
}
if (holdingThread == NULL)
{
// Lock is unheld
break;
}
pLock = holdingThread->m_pBlockingLock;
if (pLock == NULL)
{
// Thread is running free
break;
}
}
return TRUE;
}
}
BOOL DeadlockAwareLock::TryBeginEnterLock()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
Thread * pThread = GetThread();
CONSISTENCY_CHECK_MSG(pThread != NULL,
"Cannot do deadlock detection on non-EE thread");
CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
"Cannot block on two locks at once");
{
CrstHolder lock(&g_DeadlockAwareCrst);
// Look for deadlocks
DeadlockAwareLock *pLock = this;
while (TRUE)
{
Thread * holdingThread = pLock->m_pHoldingThread;
if (holdingThread == pThread)
{
// Deadlock!
return FALSE;
}
if (holdingThread == NULL)
{
// Lock is unheld
break;
}
pLock = holdingThread->m_pBlockingLock;
if (pLock == NULL)
{
// Thread is running free
break;
}
}
pThread->m_pBlockingLock = this;
}
return TRUE;
};
void DeadlockAwareLock::BeginEnterLock()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
Thread * pThread = GetThread();
CONSISTENCY_CHECK_MSG(pThread != NULL,
"Cannot do deadlock detection on non-EE thread");
CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
"Cannot block on two locks at once");
{
CrstHolder lock(&g_DeadlockAwareCrst);
// Look for deadlock loop
CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!");
pThread->m_pBlockingLock = this;
}
};
void DeadlockAwareLock::EndEnterLock()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
Thread * pThread = GetThread();
CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread);
CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this);
// No need to take a lock when going from blocking to holding. This
// transition implies the lack of a deadlock that other threads can see.
// (If they would see a deadlock after the transition, they would see
// one before as well.)
m_pHoldingThread = pThread;
}
void DeadlockAwareLock::LeaveLock()
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL_END;
CONSISTENCY_CHECK(m_pHoldingThread == GetThread());
CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL);
m_pHoldingThread = NULL;
}
#ifdef _DEBUG
// Normally, any thread we operate on has a Thread block in its TLS. But there are
// a few special threads we don't normally execute managed code on.
//
// There is a scenario where we run managed code on such a thread, which is when the
// DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This
// is incredibly dangerous. If a GC is provoked, the system may have trouble performing
// the GC because its threads aren't available yet.
static DWORD SpecialEEThreads[10];
static LONG cnt_SpecialEEThreads = 0;
void dbgOnly_IdentifySpecialEEThread()
{
WRAPPER_NO_CONTRACT;
LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads);
_ASSERTE(ourCount < (LONG) NumItems(SpecialEEThreads));
SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId();
}
BOOL dbgOnly_IsSpecialEEThread()
{
WRAPPER_NO_CONTRACT;
DWORD ourId = ::GetCurrentThreadId();
for (LONG i=0; i<cnt_SpecialEEThreads; i++)
if (ourId == SpecialEEThreads[i])
return TRUE;
// If we have an EE thread doing helper thread duty, then it is temporarily
// 'special' too.
#ifdef DEBUGGING_SUPPORTED
if (g_pDebugInterface)
{
//<TODO>We probably should use Thread::GetThreadId</TODO>
DWORD helperID = g_pDebugInterface->GetHelperThreadID();
if (helperID == ourId)
return TRUE;
}
#endif
//<TODO>Clean this up</TODO>
if (GetThread() == NULL)
return TRUE;
return FALSE;
}
#endif // _DEBUG
// There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
// code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
// exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
// general corruption.
#ifdef MDA_SUPPORTED
NOINLINE BOOL HasIllegalReentrancyRare()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
ENTRY_POINT;
MODE_ANY;
}
CONTRACTL_END;
Thread *pThread = GetThread();
if (pThread == NULL || !pThread->PreemptiveGCDisabled())
return FALSE;
BEGIN_ENTRYPOINT_VOIDRET;
MDA_TRIGGER_ASSISTANT(Reentrancy, ReportViolation());
END_ENTRYPOINT_VOIDRET;
return TRUE;
}
#endif
// Actually fire the Reentrancy probe, if warranted.
BOOL HasIllegalReentrancy()
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
ENTRY_POINT;
MODE_ANY;
}
CONTRACTL_END;
#ifdef MDA_SUPPORTED
if (NULL == MDA_GET_ASSISTANT(Reentrancy))
return FALSE;
return HasIllegalReentrancyRare();
#else
return FALSE;
#endif // MDA_SUPPORTED
}
#endif // #ifndef DACCESS_COMPILE
#ifdef DACCESS_COMPILE
void
STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
WRAPPER_NO_CONTRACT;
DAC_ENUM_STHIS(STATIC_DATA);
}
void
Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
WRAPPER_NO_CONTRACT;
DAC_ENUM_VTHIS();
if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
{
if (m_pDomain.IsValid())
{
m_pDomain->EnumMemoryRegions(flags, true);
}
}
if (m_debuggerFilterContext.IsValid())
{
m_debuggerFilterContext.EnumMem();
}
OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle);
m_ExceptionState.EnumChainMemoryRegions(flags);
m_ThreadLocalBlock.EnumMemoryRegions(flags);
if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
{
//
// Allow all of the frames on the stack to enumerate
// their memory.
//
PTR_Frame frame = m_pFrame;
while (frame.IsValid() &&
frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP))
{
frame->EnumMemoryRegions(flags);
frame = frame->m_Next;
}
}
//
// Try and do a stack trace and save information
// for each part of the stack. This is very vulnerable
// to memory problems so ignore all exceptions here.
//
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
(
EnumMemoryRegionsWorker(flags);
);
}
void
Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags)
{
WRAPPER_NO_CONTRACT;
if (IsUnstarted())
{
return;
}
T_CONTEXT context;
BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context);
REGDISPLAY regDisp;
StackFrameIterator frameIter;
TADDR previousSP = 0; //start at zero; this allows first check to always succeed.
TADDR currentSP;
// Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude
// legal address.
currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR);
if (GetFilterContext())
{
context = *GetFilterContext();
}
else
{
DacGetThreadContext(this, &context);
}
FillRegDisplay(®Disp, &context);
frameIter.Init(this, NULL, ®Disp, 0);
while (frameIter.IsValid())
{
//
// There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper
// You ***MUST*** maintain identical semantics for both checks!
//
// Before we continue, we should check to be sure we have a valid
// stack pointer. This is to prevent stacks that are not walked
// properly due to
// a) stack corruption bugs
// b) bad stack walks
// from continuing on indefinitely.
//
// We will force SP to strictly increase.
// this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay)
// for ia64, SP may be equal, but in this case BSP must strictly decrease.
// We will force SP to be properly aligned.
// We will force SP to be in the correct range.
//
if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD)
{
// This check cannot be applied to explicit frames; they may not move the SP at all.
// Also, a single function can push several on the stack at a time with no guarantees about
// ordering so we can't check that the addresses of the explicit frames are monotonically increasing.
// There is the potential that the walk will not terminate if a set of explicit frames reference
// each other circularly. While we could choose a limit for the number of explicit frames allowed
// in a row like the total stack size/pointer size, we have no known problems with this scenario.
// Thus for now we ignore it.
currentSP = (TADDR)GetRegdisplaySP(®Disp);
if (currentSP <= previousSP)
{
_ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame.");
break;
}
}
// On windows desktop, the stack pointer should be a multiple
// of pointer-size-aligned in the target address space
if (currentSP % sizeof(TADDR) != 0)
{
_ASSERTE(!"Target stack has been corrupted, SP must be aligned.");
break;
}
if (!IsAddressInStack(currentSP))
{
_ASSERTE(!"Target stack has been corrupted, SP must in in the stack range.");
break;
}
// Enumerate the code around the call site to help debugger stack walking heuristics
PCODE callEnd = GetControlPC(®Disp);
DacEnumCodeForStackwalk(callEnd);
if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
{
if (frameIter.m_crawl.GetAppDomain())
{
frameIter.m_crawl.GetAppDomain()->EnumMemoryRegions(flags, true);
}
}
// To stackwalk through funceval frames, we need to be sure to preserve the
// DebuggerModule's m_pRuntimeDomainFile. This is the only case that doesn't use the current
// vmDomainFile in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following
// code mimics that function.
// Allow failure, since we want to continue attempting to walk the stack regardless of the outcome.
EX_TRY
{
if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) ||
(frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION))
{
Frame * pFrame = frameIter.m_crawl.GetFrame();
g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame);
}
}
EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
MethodDesc* pMD = frameIter.m_crawl.GetFunction();
if (pMD != NULL)
{
pMD->EnumMemoryRegions(flags);
#if defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
// Enumerate unwind info
// Note that we don't do this based on the MethodDesc because in theory there isn't a 1:1 correspondence
// between MethodDesc and code (and so unwind info, and even debug info). Eg., EnC creates new versions
// of the code, but the MethodDesc always points at the latest version (which isn't necessarily
// the one on the stack). In practice this is unlikely to be a problem since wanting a minidump
// and making EnC edits are usually mutually exclusive.
if (frameIter.m_crawl.IsFrameless())
{
frameIter.m_crawl.GetJitManager()->EnumMemoryRegionsForMethodUnwindInfo(flags, frameIter.m_crawl.GetCodeInfo());
}
#endif // defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
}
previousSP = currentSP;
if (frameIter.Next() != SWA_CONTINUE)
{
break;
}
}
}
void
ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
{
SUPPORTS_DAC;
WRAPPER_NO_CONTRACT;
// This will write out the context of the s_pThreadStore. ie
// just the pointer
//
s_pThreadStore.EnumMem();
if (s_pThreadStore.IsValid())
{
// write out the whole ThreadStore structure
DacEnumHostDPtrMem(s_pThreadStore);
// The thread list may be corrupt, so just
// ignore exceptions during enumeration.
EX_TRY
{
Thread* thread = s_pThreadStore->m_ThreadList.GetHead();
LONG dwNumThreads = s_pThreadStore->m_ThreadCount;
for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++)
{
// Even if this thread is totally broken and we can't enum it, struggle on.
// If we do not, we will leave this loop and not enum stack memory for any further threads.
CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED(
thread->EnumMemoryRegions(flags);
);
thread = s_pThreadStore->m_ThreadList.GetNext(thread);
}
}
EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
}
}
#endif // #ifdef DACCESS_COMPILE
#ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
// For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each
// thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a combination
// of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The result is in 100
// nanosecond units.
ULONGLONG Thread::QueryThreadProcessorUsage()
{
LIMITED_METHOD_CONTRACT;
// Get current values for the amount of kernel and user time used by this thread over its entire lifetime.
FILETIME sCreationTime, sExitTime, sKernelTime, sUserTime;
HANDLE hThread = GetThreadHandle();
BOOL fResult = GetThreadTimes(hThread,
&sCreationTime,
&sExitTime,
&sKernelTime,
&sUserTime);
if (!fResult)
{
#ifdef _DEBUG
ULONG error = GetLastError();
printf("GetThreadTimes failed: %d; handle is %p\n", error, hThread);
_ASSERTE(FALSE);
#endif
return 0;
}
// Combine the user and kernel times into a single value (FILETIME is just a structure representing an
// unsigned int64 in two 32-bit pieces).
_ASSERTE(sizeof(FILETIME) == sizeof(UINT64));
ULONGLONG ullCurrentUsage = *(ULONGLONG*)&sKernelTime + *(ULONGLONG*)&sUserTime;
// Store the current processor usage as the new baseline, and retrieve the previous usage.
ULONGLONG ullPreviousUsage = VolatileLoad(&m_ullProcessorUsageBaseline);
if (ullPreviousUsage >= ullCurrentUsage ||
ullPreviousUsage != (ULONGLONG)InterlockedCompareExchange64(
(LONGLONG*)&m_ullProcessorUsageBaseline,
(LONGLONG)ullCurrentUsage,
(LONGLONG)ullPreviousUsage))
{
// another thread beat us to it, and already reported this usage.
return 0;
}
// The result is the difference between this value and the previous usage value.
return ullCurrentUsage - ullPreviousUsage;
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
|
; A271832: Period 12 zigzag sequence: repeat [0,1,2,3,4,5,6,5,4,3,2,1].
; 0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1,2,3,4,5,6,5,4,3,2,1,0,1
lpb $0
mul $0,11
mod $0,12
lpe
|
; A086593: Bisection of A086592, denominators of the left-hand half of Kepler's tree of fractions.
; Submitted by Christian Krause
; 2,3,4,5,5,7,7,8,6,9,10,11,9,12,11,13,7,11,13,14,13,17,15,18,11,16,17,19,14,19,18,21,8,13,16,17,17,22,19,23,16,23,24,27,19,26,25,29,13,20,23,25,22,29,26,31,17,25,27,30,23,31,29,34,9,15,19,20,21,27,23,28,21,30,31,35,24,33,32,37,19,29,33,36,31,41,37,44,23,34,37,41,32,43,40,47,15,24,29,31
mul $0,2
mov $2,2
lpb $0
div $0,2
sub $2,$3
add $3,$0
mod $3,2
mov $4,$2
add $2,$1
mul $3,$4
add $1,$3
sub $3,2
lpe
mov $0,$2
div $0,2
add $0,1
|
; A077444: Numbers k such that (k^2 + 4)/2 is a square.
; 2,14,82,478,2786,16238,94642,551614,3215042,18738638,109216786,636562078,3710155682,21624372014,126036076402,734592086398,4281516441986,24954506565518,145445522951122,847718631141214
mul $0,2
mov $1,2
mov $2,4
lpb $0,1
sub $0,1
mov $3,$2
mov $2,$1
add $1,$3
add $2,$1
lpe
|
; uchar tshc_px2bitmask(uchar x)
SECTION code_clib
SECTION code_arch
PUBLIC tshc_px2bitmask
EXTERN zx_px2bitmask
defc tshc_px2bitmask = zx_px2bitmask
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.