name
string | code
string | asm
string | file
string |
|---|---|---|---|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, true, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::QuadMiIntersectorKPluecker<4, 4, true>>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::QuadMIntersectorKPluecker<4, 4, true>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc48, %rsp # imm = 0xC48
movq %rcx, %r10
movq 0xc88(%rsp), %rsi
movq 0xc80(%rsp), %rax
leaq 0x4a8(%rsp), %r15
movq %rdx, -0x8(%r15)
movss (%rax,%rcx,4), %xmm7
movss 0x10(%rax,%rcx,4), %xmm8
movss 0x20(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm10
movss 0x70(%rax,%rcx,4), %xmm11
movss 0x80(%rax,%rcx,4), %xmm12
movss 0x1d8b54a(%rip), %xmm13 # 0x1f1ff10
movaps %xmm10, %xmm14
mulss %xmm13, %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm11, %xmm15
mulss %xmm13, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulss %xmm12, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movss 0x1d8b520(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r13
movslq 0xa0(%rax,%rcx,4), %rdi
movslq 0xb0(%rax,%rcx,4), %r8
movq %r13, %r11
xorq $0x10, %r11
movq %rdi, %rcx
xorq $0x10, %rcx
movq %rcx, 0x2a8(%rsp)
movq %r8, %rcx
xorq $0x10, %rcx
movq %rcx, 0x2a0(%rsp)
movss 0xc0(%rax,%r10,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0xd0(%rax,%r10,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
leaq 0x1fbb50e(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x490(%rsp)
movaps 0xf0(%rax), %xmm0
movaps %xmm0, 0x420(%rsp)
pushq $0x1
popq %rdx
movl %r10d, %ecx
shll %cl, %edx
movslq %edx, %rcx
shlq $0x4, %rcx
addq %rax, %rcx
movq %rcx, 0x140(%rsp)
movq %r13, 0x110(%rsp)
movq %r9, 0x30(%rsp)
movq %r10, 0x28(%rsp)
movaps %xmm7, 0xd0(%rsp)
movaps %xmm8, 0xc0(%rsp)
movaps %xmm9, 0xb0(%rsp)
movaps %xmm10, 0xa0(%rsp)
movaps %xmm11, 0x90(%rsp)
movaps %xmm12, 0x80(%rsp)
movaps %xmm13, 0x70(%rsp)
movaps %xmm14, 0x60(%rsp)
movaps %xmm15, 0x50(%rsp)
movq %rdi, 0x20(%rsp)
movq %r8, 0x18(%rsp)
movq %r11, 0x10(%rsp)
movaps %xmm3, 0xf0(%rsp)
movaps %xmm4, 0xe0(%rsp)
movq %r15, %rcx
leaq 0x4a0(%rsp), %rax
cmpq %rax, %r15
je 0x19617c
leaq -0x8(%rcx), %r15
movq %rcx, 0x38(%rsp)
movq -0x8(%rcx), %rbx
testb $0x8, %bl
jne 0x194bbd
movaps 0x20(%rbx,%r13), %xmm0
subps %xmm7, %xmm0
mulps %xmm14, %xmm0
movaps 0x20(%rbx,%rdi), %xmm1
subps %xmm8, %xmm1
mulps %xmm15, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%rbx,%r8), %xmm1
subps %xmm9, %xmm1
mulps %xmm13, %xmm1
maxps %xmm3, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%rbx,%r11), %xmm1
subps %xmm7, %xmm1
mulps %xmm10, %xmm1
movq 0x2a8(%rsp), %rax
movaps 0x20(%rbx,%rax), %xmm2
subps %xmm8, %xmm2
mulps %xmm11, %xmm2
minps %xmm2, %xmm1
movq 0x2a0(%rsp), %rax
movaps 0x20(%rbx,%rax), %xmm2
subps %xmm9, %xmm2
mulps %xmm12, %xmm2
minps %xmm4, %xmm2
minps %xmm2, %xmm1
cmpleps %xmm1, %xmm0
movmskps %xmm0, %r12d
testb $0x8, %bl
jne 0x194bf5
testq %r12, %r12
je 0x194bf9
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %r14d, %r14d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x194bff
movq %rax, %rbx
testl %r14d, %r14d
je 0x194b3f
jmp 0x194c34
pushq $0x6
jmp 0x194bfb
pushq $0x4
popq %r14
jmp 0x194bea
movq %rax, (%r15)
addq $0x8, %r15
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x194c2f
movq %rcx, (%r15)
addq $0x8, %r15
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x194c0e
movq %rcx, %rbx
jmp 0x194bea
cmpl $0x6, %r14d
jne 0x19613f
movl %ebx, %eax
andl $0xf, %eax
xorl %r14d, %r14d
addq $-0x8, %rax
setne %cl
je 0x19613f
andq $-0x10, %rbx
xorl %edx, %edx
movq %r15, 0x118(%rsp)
movq %rax, 0x290(%rsp)
movb %cl, 0xf(%rsp)
movq %rdx, 0x298(%rsp)
imulq $0x60, %rdx, %rax
movq (%rsi), %rdx
leaq (%rbx,%rax), %r11
addq $0x40, %r11
prefetcht0 -0x40(%r11)
prefetcht0 (%r11)
movl (%r11), %ecx
movq %rdx, 0x120(%rsp)
movq 0x228(%rdx), %rdi
movq (%rdi,%rcx,8), %rcx
movl -0x40(%r11), %edx
movl -0x3c(%r11), %esi
movups (%rcx,%rdx,4), %xmm13
movl -0x30(%r11), %edx
movups (%rcx,%rdx,4), %xmm5
movl -0x10(%r11), %edx
movups (%rcx,%rdx,4), %xmm10
movl 0x4(%r11), %edx
movq (%rdi,%rdx,8), %rdx
movups (%rdx,%rsi,4), %xmm2
movl -0x2c(%r11), %esi
movups (%rdx,%rsi,4), %xmm1
movl -0xc(%r11), %esi
movups (%rdx,%rsi,4), %xmm0
movl 0x8(%r11), %esi
movq (%rdi,%rsi,8), %rsi
movl -0x38(%r11), %r8d
movups (%rsi,%r8,4), %xmm7
movl -0x28(%r11), %r8d
movups (%rsi,%r8,4), %xmm4
movl -0x8(%r11), %r8d
movups (%rsi,%r8,4), %xmm3
movl 0xc(%r11), %r8d
movq (%rdi,%r8,8), %rdi
movl -0x34(%r11), %r8d
movups (%rdi,%r8,4), %xmm8
movl -0x24(%r11), %r8d
movups (%rdi,%r8,4), %xmm9
movl -0x4(%r11), %r8d
movups (%rdi,%r8,4), %xmm6
movaps %xmm13, %xmm14
unpcklps %xmm7, %xmm14 # xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
unpckhps %xmm7, %xmm13 # xmm13 = xmm13[2],xmm7[2],xmm13[3],xmm7[3]
movaps %xmm2, %xmm7
unpcklps %xmm8, %xmm7 # xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
unpckhps %xmm8, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
unpcklps %xmm2, %xmm13 # xmm13 = xmm13[0],xmm2[0],xmm13[1],xmm2[1]
movaps %xmm14, %xmm15
unpcklps %xmm7, %xmm15 # xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1]
unpckhps %xmm7, %xmm14 # xmm14 = xmm14[2],xmm7[2],xmm14[3],xmm7[3]
movaps %xmm5, %xmm11
unpcklps %xmm4, %xmm11 # xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
unpckhps %xmm4, %xmm5 # xmm5 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
movaps %xmm1, %xmm2
unpcklps %xmm9, %xmm2 # xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
unpckhps %xmm9, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
unpcklps %xmm1, %xmm5 # xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
movaps %xmm11, %xmm4
unpcklps %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
unpckhps %xmm2, %xmm11 # xmm11 = xmm11[2],xmm2[2],xmm11[3],xmm2[3]
movaps %xmm10, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
unpckhps %xmm3, %xmm10 # xmm10 = xmm10[2],xmm3[2],xmm10[3],xmm3[3]
movaps %xmm0, %xmm1
unpcklps %xmm6, %xmm1 # xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
unpckhps %xmm6, %xmm0 # xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
unpcklps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0],xmm10[1],xmm0[1]
movaps %xmm2, %xmm0
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
unpckhps %xmm1, %xmm2 # xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
movss (%r9,%r10,4), %xmm1
movss 0x10(%r9,%r10,4), %xmm8
movss 0x20(%r9,%r10,4), %xmm7
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
subps %xmm1, %xmm15
subps %xmm8, %xmm14
subps %xmm7, %xmm13
movaps %xmm4, 0x470(%rsp)
movaps %xmm4, %xmm3
subps %xmm1, %xmm3
movaps %xmm11, 0x460(%rsp)
movaps %xmm11, %xmm4
subps %xmm8, %xmm4
movaps %xmm5, 0x480(%rsp)
subps %xmm7, %xmm5
movaps %xmm5, 0x130(%rsp)
movaps %xmm0, 0x190(%rsp)
subps %xmm1, %xmm0
movaps %xmm2, 0x1a0(%rsp)
subps %xmm8, %xmm2
movaps %xmm10, 0x1b0(%rsp)
movaps %xmm10, %xmm1
subps %xmm7, %xmm1
movaps %xmm0, %xmm10
subps %xmm15, %xmm10
movaps %xmm2, %xmm12
subps %xmm14, %xmm12
movaps %xmm1, %xmm11
subps %xmm13, %xmm11
movaps %xmm2, %xmm7
addps %xmm14, %xmm7
movaps %xmm1, %xmm8
addps %xmm13, %xmm8
movaps %xmm10, %xmm9
mulps %xmm7, %xmm9
mulps %xmm11, %xmm7
movaps %xmm12, %xmm6
mulps %xmm8, %xmm6
subps %xmm7, %xmm6
movaps %xmm0, %xmm7
addps %xmm15, %xmm7
movaps %xmm10, 0x230(%rsp)
mulps %xmm10, %xmm8
movaps %xmm7, %xmm10
movaps %xmm11, 0x250(%rsp)
mulps %xmm11, %xmm10
subps %xmm8, %xmm10
movaps %xmm12, 0x240(%rsp)
mulps %xmm12, %xmm7
subps %xmm7, %xmm9
movss 0x50(%r9,%r10,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movss 0x60(%r9,%r10,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm12, %xmm9
mulps %xmm5, %xmm10
addps %xmm9, %xmm10
movss 0x40(%r9,%r10,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movaps %xmm7, 0x40(%rsp)
mulps %xmm7, %xmm6
addps %xmm10, %xmm6
movaps %xmm15, %xmm8
subps %xmm3, %xmm8
movaps %xmm14, %xmm11
subps %xmm4, %xmm11
movaps %xmm13, %xmm7
movaps 0x130(%rsp), %xmm9
subps %xmm9, %xmm7
movaps %xmm14, 0x260(%rsp)
movaps %xmm14, %xmm10
addps %xmm4, %xmm10
movaps %xmm13, 0x280(%rsp)
addps %xmm9, %xmm13
movaps %xmm8, %xmm14
mulps %xmm10, %xmm14
mulps %xmm7, %xmm10
movaps %xmm11, %xmm9
mulps %xmm13, %xmm9
subps %xmm10, %xmm9
movaps %xmm15, 0x270(%rsp)
movaps %xmm15, %xmm10
addps %xmm3, %xmm10
mulps %xmm8, %xmm13
movaps %xmm10, %xmm15
movaps %xmm7, 0x210(%rsp)
mulps %xmm7, %xmm15
subps %xmm13, %xmm15
mulps %xmm11, %xmm10
subps %xmm10, %xmm14
mulps %xmm12, %xmm14
mulps %xmm5, %xmm15
addps %xmm14, %xmm15
movaps 0x40(%rsp), %xmm7
mulps %xmm7, %xmm9
addps %xmm15, %xmm9
movaps %xmm3, %xmm15
subps %xmm0, %xmm15
addps %xmm3, %xmm0
movaps %xmm4, %xmm13
subps %xmm2, %xmm13
addps %xmm4, %xmm2
movaps 0x130(%rsp), %xmm3
movaps %xmm3, %xmm14
subps %xmm1, %xmm14
addps %xmm3, %xmm1
movaps %xmm15, %xmm4
mulps %xmm2, %xmm4
mulps %xmm14, %xmm2
movaps %xmm13, %xmm3
mulps %xmm1, %xmm3
subps %xmm2, %xmm3
mulps %xmm15, %xmm1
movaps %xmm0, %xmm2
mulps %xmm14, %xmm2
subps %xmm1, %xmm2
mulps %xmm13, %xmm0
subps %xmm0, %xmm4
movaps %xmm12, 0x220(%rsp)
mulps %xmm12, %xmm4
mulps %xmm5, %xmm2
addps %xmm4, %xmm2
mulps %xmm7, %xmm3
addps %xmm2, %xmm3
movaps %xmm6, %xmm1
addps %xmm9, %xmm1
addps %xmm3, %xmm1
movaps %xmm6, %xmm0
minps %xmm9, %xmm0
minps %xmm3, %xmm0
movaps %xmm6, 0x1d0(%rsp)
movaps %xmm9, 0x1e0(%rsp)
maxps %xmm9, %xmm6
maxps %xmm3, %xmm6
movaps %xmm1, 0x1c0(%rsp)
movaps %xmm1, %xmm3
andps 0x1d576ae(%rip), %xmm3 # 0x1eec6c0
movaps %xmm3, %xmm1
mulps 0x1d5cd54(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d576a9(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
leaq (%rbx,%rax), %rax
movq %rax, 0x148(%rsp)
movl -0x20(%r11), %eax
movl -0x1c(%r11), %r8d
movl -0x18(%r11), %r9d
movq %r11, 0x130(%rsp)
movl -0x14(%r11), %r10d
movmskps %xmm6, %r11d
movups (%rcx,%rax,4), %xmm0
movaps %xmm0, 0x180(%rsp)
movups (%rdx,%r8,4), %xmm0
movaps %xmm0, 0x450(%rsp)
movb 0xf(%rsp), %dl
movups (%rsi,%r9,4), %xmm0
movaps %xmm0, 0x430(%rsp)
movq 0xc88(%rsp), %rsi
movq 0x30(%rsp), %r9
movups (%rdi,%r10,4), %xmm0
movaps %xmm0, 0x440(%rsp)
movq 0x28(%rsp), %r10
testl %r11d, %r11d
je 0x19571b
movaps %xmm11, %xmm0
movaps 0x250(%rsp), %xmm9
mulps %xmm9, %xmm0
movaps %xmm8, %xmm1
movaps %xmm5, %xmm4
movaps 0x240(%rsp), %xmm12
mulps %xmm12, %xmm1
movaps %xmm13, 0x200(%rsp)
movaps 0x210(%rsp), %xmm7
mulps %xmm7, %xmm13
movaps %xmm15, %xmm10
mulps %xmm11, %xmm10
mulps %xmm7, %xmm12
subps %xmm0, %xmm12
movaps %xmm14, %xmm5
movaps %xmm14, 0x1f0(%rsp)
movaps 0x230(%rsp), %xmm14
movaps %xmm14, %xmm2
mulps %xmm11, %xmm14
mulps %xmm5, %xmm11
subps %xmm13, %xmm11
movaps 0x1d575a1(%rip), %xmm5 # 0x1eec6c0
andps %xmm5, %xmm0
andps %xmm5, %xmm13
cmpltps %xmm13, %xmm0
blendvps %xmm0, %xmm12, %xmm11
movaps 0x40(%rsp), %xmm12
movaps %xmm8, %xmm0
mulps 0x1f0(%rsp), %xmm0
mulps %xmm7, %xmm2
mulps %xmm7, %xmm15
mulps %xmm8, %xmm9
subps %xmm2, %xmm9
subps %xmm0, %xmm15
andps %xmm5, %xmm2
andps %xmm5, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm9, %xmm15
mulps 0x200(%rsp), %xmm8
subps %xmm1, %xmm14
subps %xmm10, %xmm8
andps %xmm5, %xmm1
andps %xmm5, %xmm10
cmpltps %xmm10, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm14, %xmm8
movaps 0x220(%rsp), %xmm0
mulps %xmm8, %xmm0
mulps %xmm15, %xmm4
addps %xmm0, %xmm4
mulps %xmm11, %xmm12
addps %xmm4, %xmm12
addps %xmm12, %xmm12
movaps 0x280(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x260(%rsp), %xmm1
mulps %xmm15, %xmm1
addps %xmm0, %xmm1
movaps 0x270(%rsp), %xmm5
mulps %xmm11, %xmm5
addps %xmm1, %xmm5
rcpps %xmm12, %xmm1
movaps %xmm12, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d57826(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm5, %xmm5
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm5, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19571b
cmpneqps 0x1d567d9(%rip), %xmm12 # 0x1eeba10
andps %xmm12, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19571b
movaps 0x1d0(%rsp), %xmm9
movaps %xmm9, 0x2b0(%rsp)
movaps 0x1e0(%rsp), %xmm6
movaps %xmm6, 0x2c0(%rsp)
movaps 0x1c0(%rsp), %xmm4
movaps %xmm4, 0x2d0(%rsp)
movaps %xmm11, 0x2e0(%rsp)
movaps %xmm15, 0x2f0(%rsp)
movaps %xmm8, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm0, 0x340(%rsp)
movdqa 0x490(%rsp), %xmm0
movdqa %xmm0, 0x380(%rsp)
rcpps %xmm4, %xmm2
mulps %xmm2, %xmm4
movaps 0x1d5774e(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm1
subps %xmm4, %xmm1
mulps %xmm2, %xmm1
addps %xmm2, %xmm1
cmpnltps 0x1d5ca6a(%rip), %xmm3 # 0x1ef1d40
andps %xmm1, %xmm3
mulps %xmm3, %xmm9
minps %xmm5, %xmm9
mulps %xmm6, %xmm3
minps %xmm5, %xmm3
movaps %xmm5, %xmm1
subps %xmm9, %xmm1
movaps %xmm5, %xmm2
subps %xmm3, %xmm2
blendvps %xmm0, %xmm1, %xmm9
movaps %xmm9, 0x320(%rsp)
blendvps %xmm0, %xmm2, %xmm3
movaps %xmm3, 0x330(%rsp)
movaps %xmm11, 0x350(%rsp)
movaps %xmm15, 0x360(%rsp)
movaps %xmm8, 0x370(%rsp)
movzbl %al, %r13d
movaps 0xd0(%rsp), %xmm7
movaps 0xc0(%rsp), %xmm8
movaps 0xb0(%rsp), %xmm9
movaps 0xa0(%rsp), %xmm10
movaps 0x90(%rsp), %xmm11
movaps 0x80(%rsp), %xmm12
movaps 0x70(%rsp), %xmm13
movaps 0x60(%rsp), %xmm14
movaps 0x50(%rsp), %xmm15
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r11
movaps 0xf0(%rsp), %xmm3
movaps 0xe0(%rsp), %xmm4
bsfq %r13, %rbp
movq 0x130(%rsp), %rax
movl (%rax,%rbp,4), %ecx
movq 0x120(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %r15
movl 0x90(%r9,%r10,4), %eax
testl %eax, 0x34(%r15)
je 0x1953db
movq 0x10(%rsi), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1953f3
cmpq $0x0, 0x48(%r15)
jne 0x1953f3
xorl %eax, %eax
jmp 0x1953e1
btcq %rbp, %r13
movb $0x1, %al
testb %al, %al
je 0x196150
testq %r13, %r13
jne 0x195395
jmp 0x19570b
movss 0x80(%r9,%r10,4), %xmm0
movss %xmm0, 0x40(%rsp)
movss 0x340(%rsp,%rbp,4), %xmm0
movss 0x320(%rsp,%rbp,4), %xmm1
movss 0x330(%rsp,%rbp,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0x8(%rsi), %rax
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movq 0x148(%rsp), %rcx
movd 0x50(%rcx,%rbp,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x350(%rsp,%rbp,4), %xmm4
movss 0x360(%rsp,%rbp,4), %xmm5
movss 0x370(%rsp,%rbp,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x390(%rsp)
movaps %xmm5, 0x3a0(%rsp)
movaps %xmm6, 0x3b0(%rsp)
movaps %xmm1, 0x3c0(%rsp)
movaps %xmm2, 0x3d0(%rsp)
movdqa %xmm3, 0x3e0(%rsp)
movdqa %xmm0, 0x3f0(%rsp)
leaq 0x400(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movq 0x140(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x100(%rsp)
leaq 0x100(%rsp), %rcx
movq %rcx, 0x150(%rsp)
movq 0x18(%r15), %rcx
movq %rcx, 0x158(%rsp)
movq %rax, 0x160(%rsp)
movq %r9, 0x168(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x4, 0x178(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x1955c6
leaq 0x150(%rsp), %rdi
callq *%rax
movb 0xf(%rsp), %dl
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r8
movq 0x20(%rsp), %rdi
movaps 0x50(%rsp), %xmm15
movaps 0x60(%rsp), %xmm14
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm12
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm7
movq 0xc88(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movdqa 0x100(%rsp), %xmm1
ptest %xmm1, %xmm1
movaps 0xf0(%rsp), %xmm3
movaps 0xe0(%rsp), %xmm4
je 0x1956cf
movq 0x10(%rsi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x195691
testb $0x2, (%rcx)
jne 0x19560b
testb $0x40, 0x3e(%r15)
je 0x195691
leaq 0x150(%rsp), %rdi
callq *%rax
movb 0xf(%rsp), %dl
movaps 0xe0(%rsp), %xmm4
movaps 0xf0(%rsp), %xmm3
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r8
movq 0x20(%rsp), %rdi
movaps 0x50(%rsp), %xmm15
movaps 0x60(%rsp), %xmm14
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm12
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm7
movq 0xc88(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movdqa 0x100(%rsp), %xmm0
pcmpeqd 0x1d5636e(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d56772(%rip), %xmm1 # 0x1eebe20
movq 0x168(%rsp), %rax
movaps 0x1d56343(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1956df
pcmpeqd 0x1d56339(%rip), %xmm1 # 0x1eeba10
pxor 0x1d56741(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x1953e1
movd 0x40(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%r10,4)
btcq %rbp, %r13
jmp 0x1953e1
movq 0x118(%rsp), %r15
movq 0x110(%rsp), %r13
movaps 0x180(%rsp), %xmm8
movaps %xmm8, %xmm7
movaps 0x430(%rsp), %xmm0
unpcklps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
unpckhps %xmm0, %xmm8 # xmm8 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
movaps 0x450(%rsp), %xmm1
movaps %xmm1, %xmm0
movaps 0x440(%rsp), %xmm2
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
unpcklps %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
movaps %xmm7, %xmm15
unpcklps %xmm0, %xmm15 # xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
unpckhps %xmm0, %xmm7 # xmm7 = xmm7[2],xmm0[2],xmm7[3],xmm0[3]
movss (%r9,%r10,4), %xmm1
movss 0x10(%r9,%r10,4), %xmm2
movss 0x20(%r9,%r10,4), %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm1, %xmm15
subps %xmm2, %xmm7
subps %xmm0, %xmm8
movaps 0x190(%rsp), %xmm3
subps %xmm1, %xmm3
movaps %xmm3, %xmm4
movaps %xmm3, 0x190(%rsp)
movaps 0x1a0(%rsp), %xmm5
subps %xmm2, %xmm5
movaps 0x1b0(%rsp), %xmm3
subps %xmm0, %xmm3
movaps %xmm3, 0x1b0(%rsp)
movaps 0x470(%rsp), %xmm12
subps %xmm1, %xmm12
movaps 0x460(%rsp), %xmm14
subps %xmm2, %xmm14
movaps 0x480(%rsp), %xmm9
subps %xmm0, %xmm9
movaps %xmm12, %xmm6
subps %xmm15, %xmm6
movaps %xmm14, %xmm11
subps %xmm7, %xmm11
movaps %xmm9, %xmm10
subps %xmm8, %xmm10
movaps %xmm12, %xmm0
addps %xmm15, %xmm0
movaps %xmm14, %xmm1
addps %xmm7, %xmm1
movaps %xmm9, %xmm2
addps %xmm8, %xmm2
movaps %xmm6, %xmm3
mulps %xmm1, %xmm3
mulps %xmm10, %xmm1
movaps %xmm11, %xmm13
mulps %xmm2, %xmm13
subps %xmm1, %xmm13
movaps %xmm6, 0x270(%rsp)
mulps %xmm6, %xmm2
movaps %xmm0, %xmm1
movaps %xmm10, 0x260(%rsp)
mulps %xmm10, %xmm1
subps %xmm2, %xmm1
movss 0x50(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm2, 0x120(%rsp)
movaps %xmm11, 0x250(%rsp)
mulps %xmm11, %xmm0
subps %xmm0, %xmm3
movss 0x60(%r9,%r10,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm11, %xmm3
mulps %xmm2, %xmm1
addps %xmm3, %xmm1
movss 0x40(%r9,%r10,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm6, %xmm13
addps %xmm1, %xmm13
movaps %xmm15, %xmm2
subps %xmm4, %xmm2
movaps %xmm7, %xmm10
movaps %xmm5, %xmm1
movaps %xmm5, 0x1a0(%rsp)
subps %xmm5, %xmm10
movaps %xmm8, %xmm5
movaps 0x1b0(%rsp), %xmm4
subps %xmm4, %xmm5
movaps %xmm7, 0x280(%rsp)
movaps %xmm7, %xmm0
addps %xmm1, %xmm0
movaps %xmm8, 0x180(%rsp)
movaps %xmm8, %xmm1
movaps %xmm2, %xmm3
addps %xmm4, %xmm1
mulps %xmm0, %xmm2
mulps %xmm5, %xmm0
movaps %xmm10, %xmm7
mulps %xmm1, %xmm7
subps %xmm0, %xmm7
movaps %xmm15, 0x40(%rsp)
movaps 0x190(%rsp), %xmm8
addps %xmm8, %xmm15
movaps %xmm3, 0x220(%rsp)
mulps %xmm3, %xmm1
movaps %xmm15, %xmm3
movaps %xmm5, 0x230(%rsp)
mulps %xmm5, %xmm3
subps %xmm1, %xmm3
movaps %xmm10, 0x210(%rsp)
mulps %xmm10, %xmm15
movaps %xmm6, %xmm10
subps %xmm15, %xmm2
mulps %xmm11, %xmm2
movaps 0x120(%rsp), %xmm6
mulps %xmm6, %xmm3
addps %xmm2, %xmm3
mulps %xmm10, %xmm7
addps %xmm3, %xmm7
movaps %xmm8, %xmm5
subps %xmm12, %xmm5
addps %xmm8, %xmm12
movaps 0x1a0(%rsp), %xmm0
movaps %xmm0, %xmm8
subps %xmm14, %xmm8
addps %xmm0, %xmm14
movaps %xmm4, %xmm3
subps %xmm9, %xmm3
addps %xmm4, %xmm9
movaps %xmm5, %xmm1
mulps %xmm14, %xmm1
mulps %xmm3, %xmm14
movaps %xmm8, %xmm0
mulps %xmm9, %xmm0
subps %xmm14, %xmm0
mulps %xmm5, %xmm9
movaps %xmm12, %xmm2
movaps %xmm3, %xmm14
mulps %xmm3, %xmm2
subps %xmm9, %xmm2
mulps %xmm8, %xmm12
subps %xmm12, %xmm1
movaps %xmm11, 0x240(%rsp)
mulps %xmm11, %xmm1
mulps %xmm6, %xmm2
addps %xmm1, %xmm2
mulps %xmm10, %xmm0
addps %xmm2, %xmm0
movaps %xmm13, %xmm15
addps %xmm7, %xmm15
addps %xmm0, %xmm15
movaps %xmm13, %xmm1
minps %xmm7, %xmm1
minps %xmm0, %xmm1
movaps %xmm13, 0x1d0(%rsp)
movaps %xmm13, %xmm3
movaps %xmm7, 0x1e0(%rsp)
maxps %xmm7, %xmm3
maxps %xmm0, %xmm3
movaps %xmm15, 0x1c0(%rsp)
andps 0x1d56cbb(%rip), %xmm15 # 0x1eec6c0
movaps %xmm15, %xmm0
mulps 0x1d5c360(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm3
xorps 0x1d56cb5(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm3
movmskps %xmm3, %eax
testl %eax, %eax
je 0x1960ba
movaps 0x210(%rsp), %xmm6
movaps %xmm6, %xmm0
movaps 0x260(%rsp), %xmm7
mulps %xmm7, %xmm0
movaps 0x220(%rsp), %xmm4
movaps %xmm4, %xmm1
movaps 0x250(%rsp), %xmm11
mulps %xmm11, %xmm1
movaps %xmm8, %xmm12
movaps %xmm8, 0x200(%rsp)
movaps 0x230(%rsp), %xmm13
mulps %xmm13, %xmm12
movaps %xmm5, %xmm9
mulps %xmm6, %xmm9
mulps %xmm13, %xmm11
subps %xmm0, %xmm11
movaps 0x270(%rsp), %xmm8
movaps %xmm8, %xmm2
mulps %xmm6, %xmm8
movaps %xmm14, 0x1f0(%rsp)
mulps %xmm14, %xmm6
subps %xmm12, %xmm6
movaps 0x1d56c11(%rip), %xmm14 # 0x1eec6c0
andps %xmm14, %xmm0
andps %xmm14, %xmm12
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm11, %xmm6
movaps %xmm4, %xmm0
mulps 0x1f0(%rsp), %xmm0
mulps %xmm13, %xmm2
mulps %xmm13, %xmm5
mulps %xmm4, %xmm7
subps %xmm2, %xmm7
subps %xmm0, %xmm5
andps %xmm14, %xmm2
andps %xmm14, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm7, %xmm5
mulps 0x200(%rsp), %xmm4
subps %xmm1, %xmm8
subps %xmm9, %xmm4
andps %xmm14, %xmm1
andps %xmm14, %xmm9
cmpltps %xmm9, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm8, %xmm4
movaps 0x240(%rsp), %xmm1
mulps %xmm4, %xmm1
movaps 0x120(%rsp), %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
mulps %xmm6, %xmm10
addps %xmm0, %xmm10
addps %xmm10, %xmm10
movaps 0x180(%rsp), %xmm0
mulps %xmm4, %xmm0
movaps 0x280(%rsp), %xmm1
mulps %xmm5, %xmm1
addps %xmm0, %xmm1
movaps 0x40(%rsp), %xmm8
mulps %xmm6, %xmm8
addps %xmm1, %xmm8
rcpps %xmm10, %xmm1
movaps %xmm10, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d56e9a(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm8, %xmm8
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm8, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm3, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1960ba
cmpneqps 0x1d55e4b(%rip), %xmm10 # 0x1eeba10
andps %xmm10, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1960ba
movaps %xmm4, %xmm9
movaps 0x1d0(%rsp), %xmm4
movaps %xmm4, 0x2b0(%rsp)
movaps 0x1e0(%rsp), %xmm7
movaps %xmm7, 0x2c0(%rsp)
movaps 0x1c0(%rsp), %xmm8
movaps %xmm8, 0x2d0(%rsp)
movaps %xmm6, 0x2e0(%rsp)
movaps %xmm5, 0x2f0(%rsp)
movaps %xmm9, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm0, 0x340(%rsp)
movdqa 0x420(%rsp), %xmm0
movdqa %xmm0, 0x380(%rsp)
movq (%rsi), %rcx
movq %rcx, 0x120(%rsp)
rcpps %xmm8, %xmm2
mulps %xmm2, %xmm8
movaps 0x1d56db1(%rip), %xmm3 # 0x1eeca10
movaps %xmm3, %xmm1
subps %xmm8, %xmm1
mulps %xmm2, %xmm1
addps %xmm2, %xmm1
cmpnltps 0x1d5c0cb(%rip), %xmm15 # 0x1ef1d40
andps %xmm1, %xmm15
mulps %xmm15, %xmm4
minps %xmm3, %xmm4
mulps %xmm7, %xmm15
minps %xmm3, %xmm15
movaps %xmm3, %xmm1
subps %xmm4, %xmm1
movaps %xmm3, %xmm2
subps %xmm15, %xmm2
blendvps %xmm0, %xmm1, %xmm4
movaps %xmm4, 0x320(%rsp)
blendvps %xmm0, %xmm2, %xmm15
movaps %xmm15, 0x330(%rsp)
movaps %xmm6, 0x350(%rsp)
movaps %xmm5, 0x360(%rsp)
movaps %xmm9, 0x370(%rsp)
movzbl %al, %r13d
movaps 0xd0(%rsp), %xmm7
movaps 0xc0(%rsp), %xmm8
movaps 0xb0(%rsp), %xmm9
movaps 0xa0(%rsp), %xmm10
movaps 0x90(%rsp), %xmm11
movaps 0x80(%rsp), %xmm12
movaps 0x70(%rsp), %xmm13
movaps 0x60(%rsp), %xmm14
movaps 0x50(%rsp), %xmm15
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r11
movaps 0xf0(%rsp), %xmm3
movaps 0xe0(%rsp), %xmm4
bsfq %r13, %rbp
movq 0x130(%rsp), %rax
movl (%rax,%rbp,4), %ecx
movq 0x120(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %r15
movl 0x90(%r9,%r10,4), %eax
testl %eax, 0x34(%r15)
je 0x195d7a
movq 0x10(%rsi), %rax
cmpq $0x0, 0x10(%rax)
jne 0x195d92
cmpq $0x0, 0x48(%r15)
jne 0x195d92
xorl %eax, %eax
jmp 0x195d80
btcq %rbp, %r13
movb $0x1, %al
testb %al, %al
je 0x196150
testq %r13, %r13
jne 0x195d34
jmp 0x1960aa
movss 0x80(%r9,%r10,4), %xmm0
movss %xmm0, 0x40(%rsp)
movss 0x340(%rsp,%rbp,4), %xmm0
movss 0x320(%rsp,%rbp,4), %xmm1
movss 0x330(%rsp,%rbp,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0x8(%rsi), %rax
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movq 0x148(%rsp), %rcx
movd 0x50(%rcx,%rbp,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x350(%rsp,%rbp,4), %xmm4
movss 0x360(%rsp,%rbp,4), %xmm5
movss 0x370(%rsp,%rbp,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x390(%rsp)
movaps %xmm5, 0x3a0(%rsp)
movaps %xmm6, 0x3b0(%rsp)
movaps %xmm1, 0x3c0(%rsp)
movaps %xmm2, 0x3d0(%rsp)
movdqa %xmm3, 0x3e0(%rsp)
movdqa %xmm0, 0x3f0(%rsp)
leaq 0x400(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movq 0x140(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x100(%rsp)
leaq 0x100(%rsp), %rcx
movq %rcx, 0x150(%rsp)
movq 0x18(%r15), %rcx
movq %rcx, 0x158(%rsp)
movq %rax, 0x160(%rsp)
movq %r9, 0x168(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x4, 0x178(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x195f65
leaq 0x150(%rsp), %rdi
callq *%rax
movb 0xf(%rsp), %dl
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r8
movq 0x20(%rsp), %rdi
movaps 0x50(%rsp), %xmm15
movaps 0x60(%rsp), %xmm14
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm12
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm7
movq 0xc88(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movdqa 0x100(%rsp), %xmm1
ptest %xmm1, %xmm1
movaps 0xf0(%rsp), %xmm3
movaps 0xe0(%rsp), %xmm4
je 0x19606e
movq 0x10(%rsi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x196030
testb $0x2, (%rcx)
jne 0x195faa
testb $0x40, 0x3e(%r15)
je 0x196030
leaq 0x150(%rsp), %rdi
callq *%rax
movb 0xf(%rsp), %dl
movaps 0xe0(%rsp), %xmm4
movaps 0xf0(%rsp), %xmm3
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r8
movq 0x20(%rsp), %rdi
movaps 0x50(%rsp), %xmm15
movaps 0x60(%rsp), %xmm14
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm12
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm7
movq 0xc88(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movdqa 0x100(%rsp), %xmm0
pcmpeqd 0x1d559cf(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d55dd3(%rip), %xmm1 # 0x1eebe20
movq 0x168(%rsp), %rax
movaps 0x1d559a4(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x19607e
pcmpeqd 0x1d5599a(%rip), %xmm1 # 0x1eeba10
pxor 0x1d55da2(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x195d80
movd 0x40(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%r10,4)
btcq %rbp, %r13
jmp 0x195d80
movq 0x118(%rsp), %r15
movq 0x110(%rsp), %r13
movq 0x298(%rsp), %rdx
incq %rdx
movq 0x290(%rsp), %rax
cmpq %rax, %rdx
setb %cl
jne 0x194c69
movaps 0xd0(%rsp), %xmm7
movaps 0xc0(%rsp), %xmm8
movaps 0xb0(%rsp), %xmm9
movaps 0xa0(%rsp), %xmm10
movaps 0x90(%rsp), %xmm11
movaps 0x80(%rsp), %xmm12
movaps 0x70(%rsp), %xmm13
movaps 0x60(%rsp), %xmm14
movaps 0x50(%rsp), %xmm15
movq 0x20(%rsp), %rdi
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r11
movaps 0xf0(%rsp), %xmm3
movaps 0xe0(%rsp), %xmm4
movq 0x38(%rsp), %rcx
testb $0x3, %r14b
je 0x194b1e
jmp 0x19617c
testb $0x1, %dl
movq 0x118(%rsp), %r15
movq 0x110(%rsp), %r13
movq 0x38(%rsp), %rcx
je 0x196144
movl $0xff800000, 0x80(%r9,%r10,4) # imm = 0xFF800000
pushq $0x1
popq %r14
jmp 0x196144
leaq 0x4a0(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xc48, %rsp # imm = 0xC48
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::QuadMiMBIntersectorKMoeller<4, 4, true>>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::QuadMIntersectorKMoellerTrumbore<4, 4, true>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1338, %rsp # imm = 0x1338
movq %rcx, %r11
movq 0x1370(%rsp), %rax
leaq 0x400(%rsp), %rsi
movq %rdx, -0x10(%rsi)
andl $0x0, -0x8(%rsi)
movss (%rax,%rcx,4), %xmm8
movss 0x10(%rax,%rcx,4), %xmm6
movss 0x20(%rax,%rcx,4), %xmm7
movss 0x60(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x3d0(%rsp)
movss 0x70(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x3c0(%rsp)
movss 0x80(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x3b0(%rsp)
movslq 0x90(%rax,%rcx,4), %rcx
movslq 0xa0(%rax,%r11,4), %rdx
movslq 0xb0(%rax,%r11,4), %rdi
movq %rcx, 0x38(%rsp)
xorq $0x10, %rcx
movq %rcx, 0x170(%rsp)
movq %rdx, 0x180(%rsp)
xorq $0x10, %rdx
movq %rdx, 0x168(%rsp)
movq %rdi, 0x178(%rsp)
xorq $0x10, %rdi
movq %rdi, 0x160(%rsp)
movss 0xc0(%rax,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x3a0(%rsp)
movss 0xd0(%rax,%r11,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
leaq 0x1fb9ce4(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x360(%rsp)
pushq $0x1
popq %rdx
movl %r11d, %ecx
shll %cl, %edx
movaps 0xf0(%rax), %xmm0
movaps %xmm0, 0x350(%rsp)
movslq %edx, %rcx
shlq $0x4, %rcx
addq %rax, %rcx
movq %rcx, 0xf0(%rsp)
movq %r9, 0x48(%rsp)
movq %r11, 0x40(%rsp)
movaps %xmm8, 0x390(%rsp)
movaps %xmm6, 0x380(%rsp)
movaps %xmm7, 0x370(%rsp)
leaq 0x3f0(%rsp), %rax
cmpq %rax, %rsi
je 0x197c35
movss -0x8(%rsi), %xmm0
addq $-0x10, %rsi
ucomiss 0x80(%r9,%r11,4), %xmm0
ja 0x1962f3
movq %rsi, 0x8(%rsp)
movq (%rsi), %rbx
movq 0x38(%rsp), %rsi
testb $0x8, %bl
jne 0x19644e
movq %rbx, %rax
andq $-0x10, %rax
movss 0x70(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rax,%rsi), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rsi), %xmm3
subps %xmm8, %xmm3
movaps 0x3d0(%rsp), %xmm9
mulps %xmm9, %xmm3
movaps 0x3a0(%rsp), %xmm2
movq 0x180(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%rcx), %xmm4
maxps %xmm3, %xmm2
subps %xmm6, %xmm4
movaps 0x3c0(%rsp), %xmm10
mulps %xmm10, %xmm4
movq 0x178(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rcx), %xmm3
subps %xmm7, %xmm3
movaps 0x3b0(%rsp), %xmm11
mulps %xmm11, %xmm3
maxps %xmm3, %xmm4
maxps %xmm4, %xmm2
movq 0x170(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%rcx), %xmm4
subps %xmm8, %xmm4
movq 0x168(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm5
mulps %xmm0, %xmm5
addps 0x20(%rax,%rcx), %xmm5
mulps %xmm9, %xmm4
subps %xmm6, %xmm5
mulps %xmm10, %xmm5
movq 0x160(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rcx), %xmm3
subps %xmm7, %xmm3
mulps %xmm11, %xmm3
minps %xmm3, %xmm5
movaps %xmm1, %xmm3
minps %xmm4, %xmm3
minps %xmm5, %xmm3
movl %ebx, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x196499
movaps %xmm2, %xmm0
cmpleps %xmm3, %xmm0
pslld $0x1f, %xmm0
movmskps %xmm0, %r12d
movaps %xmm2, 0x1d0(%rsp)
testb $0x8, %bl
jne 0x196495
testq %r12, %r12
je 0x1964bb
andq $-0x10, %rbx
bsfq %r12, %rcx
leaq -0x1(%r12), %r8
xorl %eax, %eax
movq (%rbx,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %r12, %r8
jne 0x1964c0
movq %rdx, %rbx
testl %eax, %eax
je 0x196325
jmp 0x1966c2
pushq $0x6
jmp 0x1964bd
movaps %xmm2, %xmm4
cmpleps %xmm3, %xmm4
movaps 0xe0(%rax), %xmm3
cmpleps %xmm0, %xmm3
cmpltps 0xf0(%rax), %xmm0
andps %xmm3, %xmm0
andps %xmm4, %xmm0
jmp 0x19643d
pushq $0x4
popq %rax
jmp 0x196488
movl 0x1d0(%rsp,%rcx,4), %edi
bsfq %r8, %r10
leaq -0x1(%r8), %rcx
movq (%rbx,%r10,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
movl 0x1d0(%rsp,%r10,4), %r10d
andq %r8, %rcx
jne 0x19652d
movq 0x8(%rsp), %r8
leaq 0x10(%r8), %rcx
cmpl %r10d, %edi
jae 0x196514
movq %rsi, (%r8)
movl %r10d, 0x8(%r8)
movq %rcx, 0x8(%rsp)
movq %rdx, %rbx
jmp 0x196523
movq %rdx, (%r8)
movl %edi, 0x8(%r8)
movq %rcx, 0x8(%rsp)
movq %rsi, %rbx
movq 0x38(%rsp), %rsi
jmp 0x196488
movq %rdx, %xmm2
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %rsi, %xmm4
movd %r10d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdx
movq (%rbx,%rsi,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm3
movd 0x1d0(%rsp,%rsi,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rcx, %rdx
jne 0x1965ec
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movq 0x8(%rsp), %rcx
movaps %xmm2, (%rcx)
movaps %xmm3, 0x10(%rcx)
movq %xmm4, %rbx
addq $0x20, %rcx
movq %rcx, 0x8(%rsp)
jmp 0x196523
bsfq %rdx, %rcx
movq (%rbx,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movaps %xmm6, %xmm9
movq %rdx, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm7, %xmm10
movd 0x1d0(%rsp,%rcx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movaps %xmm10, %xmm7
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movaps %xmm9, %xmm6
movq 0x8(%rsp), %rcx
movaps %xmm2, (%rcx)
movaps %xmm5, 0x10(%rcx)
movaps %xmm4, 0x20(%rcx)
movq %xmm3, %rbx
addq $0x30, %rcx
jmp 0x1965e2
cmpl $0x6, %eax
jne 0x197bfa
movl %ebx, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x158(%rsp)
je 0x197c04
andq $-0x10, %rbx
xorl %eax, %eax
movq %r12, 0xf8(%rsp)
movq %rax, 0x1c8(%rsp)
imulq $0x60, %rax, %rax
movq 0x1378(%rsp), %rcx
movq (%rcx), %rdx
movss 0x70(%r9,%r11,4), %xmm3
movl 0x40(%rbx,%rax), %ecx
movq %rdx, 0x150(%rsp)
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rsi
movss 0x28(%rsi), %xmm0
movss 0x2c(%rsi), %xmm1
movss 0x30(%rsi), %xmm2
subss %xmm1, %xmm3
subss %xmm1, %xmm2
divss %xmm2, %xmm3
mulss %xmm0, %xmm3
roundss $0x9, %xmm3, %xmm2
addss 0x1d5a27a(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm2
xorps %xmm1, %xmm1
maxss %xmm2, %xmm1
cvttss2si %xmm1, %ecx
movslq %ecx, %rcx
movq 0xe0(%rsi), %rdi
imulq $0x38, %rcx, %r8
movq (%rdi,%r8), %r9
movl (%rbx,%rax), %esi
movl 0x4(%rbx,%rax), %r14d
movq %r14, 0x190(%rsp)
movups (%r9,%rsi,4), %xmm15
movl 0x10(%rbx,%rax), %r10d
movq %r10, 0x1b0(%rsp)
movups (%r9,%r10,4), %xmm14
movl 0x20(%rbx,%rax), %r10d
movq %r10, 0x1c0(%rsp)
movups (%r9,%r10,4), %xmm4
movl 0x30(%rbx,%rax), %r10d
movq %r10, 0x120(%rsp)
movups (%r9,%r10,4), %xmm0
movl 0x44(%rbx,%rax), %r9d
movq (%rdx,%r9,8), %r9
movq 0xe0(%r9), %r11
movq (%r11,%r8), %r9
movups (%r9,%r14,4), %xmm9
movl 0x14(%rbx,%rax), %r10d
movq %r10, 0x1a0(%rsp)
movups (%r9,%r10,4), %xmm2
movl 0x24(%rbx,%rax), %r10d
movq %r10, 0x1b8(%rsp)
movups (%r9,%r10,4), %xmm7
movl 0x34(%rbx,%rax), %r10d
movq %r10, 0x100(%rsp)
movups (%r9,%r10,4), %xmm6
movl 0x48(%rbx,%rax), %r9d
movq (%rdx,%r9,8), %r9
movq 0xe0(%r9), %r12
movq (%r12,%r8), %r9
movl 0x8(%rbx,%rax), %r13d
movups (%r9,%r13,4), %xmm11
movl 0x18(%rbx,%rax), %r10d
movq %r10, 0x188(%rsp)
movups (%r9,%r10,4), %xmm10
movl 0x28(%rbx,%rax), %r10d
movq %r10, 0x198(%rsp)
movups (%r9,%r10,4), %xmm5
movl 0x38(%rbx,%rax), %r10d
movq %r10, 0x1a8(%rsp)
movups (%r9,%r10,4), %xmm8
movl 0x4c(%rbx,%rax), %r9d
movq (%rdx,%r9,8), %rdx
subss %xmm1, %xmm3
movq 0xe0(%rdx), %rdx
movq (%rdx,%r8), %r15
movl 0xc(%rbx,%rax), %ebp
movups (%r15,%rbp,4), %xmm12
movl 0x1c(%rbx,%rax), %r8d
movups (%r15,%r8,4), %xmm13
movaps %xmm15, %xmm1
unpcklps %xmm11, %xmm1 # xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
unpckhps %xmm11, %xmm15 # xmm15 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
movaps %xmm9, %xmm11
unpcklps %xmm12, %xmm11 # xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
unpckhps %xmm12, %xmm9 # xmm9 = xmm9[2],xmm12[2],xmm9[3],xmm12[3]
movl 0x2c(%rbx,%rax), %r14d
movups (%r15,%r14,4), %xmm12
unpcklps %xmm9, %xmm15 # xmm15 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
movaps %xmm15, 0xc0(%rsp)
movaps %xmm1, %xmm9
unpcklps %xmm11, %xmm9 # xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
movaps %xmm9, 0x10(%rsp)
unpckhps %xmm11, %xmm1 # xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
movaps %xmm1, 0xd0(%rsp)
movaps %xmm14, %xmm1
unpcklps %xmm10, %xmm1 # xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
unpckhps %xmm10, %xmm14 # xmm14 = xmm14[2],xmm10[2],xmm14[3],xmm10[3]
movaps %xmm2, %xmm10
unpcklps %xmm13, %xmm10 # xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
unpckhps %xmm13, %xmm2 # xmm2 = xmm2[2],xmm13[2],xmm2[3],xmm13[3]
movl 0x3c(%rbx,%rax), %r10d
movups (%r15,%r10,4), %xmm9
unpcklps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
movaps %xmm14, 0x110(%rsp)
movaps %xmm1, %xmm2
unpcklps %xmm10, %xmm2 # xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1]
movaps %xmm2, 0x140(%rsp)
unpckhps %xmm10, %xmm1 # xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
movaps %xmm1, 0x130(%rsp)
movaps %xmm4, %xmm1
unpcklps %xmm5, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
unpckhps %xmm5, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movaps %xmm7, %xmm2
unpcklps %xmm12, %xmm2 # xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
unpckhps %xmm12, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
incl %ecx
movslq %ecx, %rcx
imulq $0x38, %rcx, %r15
movq (%rdi,%r15), %rcx
movups (%rcx,%rsi,4), %xmm5
unpcklps %xmm7, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
movaps %xmm4, 0x70(%rsp)
movaps %xmm1, %xmm4
unpcklps %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
movaps %xmm4, 0x50(%rsp)
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
movaps %xmm1, 0x60(%rsp)
movaps %xmm0, %xmm4
unpcklps %xmm8, %xmm4 # xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1]
unpckhps %xmm8, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
movaps %xmm6, %xmm2
unpcklps %xmm9, %xmm2 # xmm2 = xmm2[0],xmm9[0],xmm2[1],xmm9[1]
unpckhps %xmm9, %xmm6 # xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
movq (%r12,%r15), %rsi
movups (%rsi,%r13,4), %xmm7
unpcklps %xmm6, %xmm0 # xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
movaps %xmm0, 0x3e0(%rsp)
movaps %xmm4, %xmm0
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
movaps %xmm0, 0xe0(%rsp)
unpckhps %xmm2, %xmm4 # xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
movq (%r11,%r15), %rdi
movq 0x40(%rsp), %r11
movq 0x48(%rsp), %r9
movaps %xmm5, %xmm11
unpcklps %xmm7, %xmm11 # xmm11 = xmm11[0],xmm7[0],xmm11[1],xmm7[1]
unpckhps %xmm7, %xmm5 # xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
movq 0x190(%rsp), %r12
movups (%rdi,%r12,4), %xmm2
movq (%rdx,%r15), %rdx
movups (%rdx,%rbp,4), %xmm6
movaps %xmm2, %xmm7
unpcklps %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
unpckhps %xmm6, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
unpcklps %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
movaps %xmm11, %xmm2
unpcklps %xmm7, %xmm2 # xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
unpckhps %xmm7, %xmm11 # xmm11 = xmm11[2],xmm7[2],xmm11[3],xmm7[3]
movq 0x1b0(%rsp), %r15
movups (%rcx,%r15,4), %xmm7
movq 0x188(%rsp), %r15
movups (%rsi,%r15,4), %xmm6
movaps %xmm7, %xmm10
unpcklps %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
unpckhps %xmm6, %xmm7 # xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
movq 0x1a0(%rsp), %r15
movups (%rdi,%r15,4), %xmm6
movups (%rdx,%r8,4), %xmm8
movaps %xmm6, %xmm9
unpcklps %xmm8, %xmm9 # xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
unpckhps %xmm8, %xmm6 # xmm6 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
unpcklps %xmm6, %xmm7 # xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
movaps %xmm10, %xmm15
unpcklps %xmm9, %xmm15 # xmm15 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
unpckhps %xmm9, %xmm10 # xmm10 = xmm10[2],xmm9[2],xmm10[3],xmm9[3]
movq 0x1c0(%rsp), %r8
movups (%rcx,%r8,4), %xmm6
movq 0x198(%rsp), %r8
movups (%rsi,%r8,4), %xmm8
movaps %xmm6, %xmm9
unpcklps %xmm8, %xmm9 # xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
unpckhps %xmm8, %xmm6 # xmm6 = xmm6[2],xmm8[2],xmm6[3],xmm8[3]
movq 0x1b8(%rsp), %r8
movups (%rdi,%r8,4), %xmm8
movups (%rdx,%r14,4), %xmm12
movaps %xmm8, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
unpckhps %xmm12, %xmm8 # xmm8 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
unpcklps %xmm8, %xmm6 # xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1]
movaps %xmm9, %xmm14
unpcklps %xmm13, %xmm14 # xmm14 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
unpckhps %xmm13, %xmm9 # xmm9 = xmm9[2],xmm13[2],xmm9[3],xmm13[3]
movq 0x120(%rsp), %r8
movups (%rcx,%r8,4), %xmm12
movq 0x1a8(%rsp), %rcx
movups (%rsi,%rcx,4), %xmm13
movaps %xmm12, %xmm8
unpcklps %xmm13, %xmm8 # xmm8 = xmm8[0],xmm13[0],xmm8[1],xmm13[1]
unpckhps %xmm13, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
movq 0x100(%rsp), %rcx
movups (%rdi,%rcx,4), %xmm13
movups (%rdx,%r10,4), %xmm0
movaps %xmm13, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
unpckhps %xmm0, %xmm13 # xmm13 = xmm13[2],xmm0[2],xmm13[3],xmm0[3]
unpcklps %xmm13, %xmm12 # xmm12 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
movaps %xmm8, %xmm13
unpcklps %xmm1, %xmm13 # xmm13 = xmm13[0],xmm1[0],xmm13[1],xmm1[1]
unpckhps %xmm1, %xmm8 # xmm8 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
movss 0x1d55c34(%rip), %xmm0 # 0x1eec714
subss %xmm3, %xmm0
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm3, %xmm2
movaps 0x10(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm2, %xmm1
movaps %xmm1, 0x10(%rsp)
mulps %xmm3, %xmm11
movaps 0xd0(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm11, %xmm1
movaps %xmm1, %xmm11
mulps %xmm3, %xmm5
movaps 0xc0(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm5, %xmm1
movaps %xmm1, %xmm5
mulps %xmm3, %xmm15
movaps 0x140(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm15, %xmm1
movaps %xmm0, %xmm15
mulps %xmm3, %xmm10
movaps 0x130(%rsp), %xmm2
mulps %xmm0, %xmm2
addps %xmm10, %xmm2
mulps %xmm3, %xmm7
movaps 0x110(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm7, %xmm0
movaps %xmm0, %xmm10
movaps %xmm0, 0x110(%rsp)
mulps %xmm3, %xmm14
movaps 0x50(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm14, %xmm0
movaps %xmm0, 0x50(%rsp)
mulps %xmm3, %xmm9
movaps 0x60(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm9, %xmm0
movaps %xmm0, 0x60(%rsp)
mulps %xmm3, %xmm6
movaps 0x70(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm6, %xmm0
movaps %xmm0, 0x70(%rsp)
mulps %xmm3, %xmm13
mulps %xmm3, %xmm8
mulps %xmm12, %xmm3
movaps 0xe0(%rsp), %xmm12
mulps %xmm15, %xmm12
addps %xmm13, %xmm12
mulps %xmm15, %xmm4
addps %xmm8, %xmm4
mulps 0x3e0(%rsp), %xmm15
addps %xmm3, %xmm15
movaps %xmm11, %xmm7
movaps %xmm2, 0x130(%rsp)
subps %xmm2, %xmm7
movaps %xmm5, %xmm9
subps %xmm10, %xmm9
movaps %xmm4, 0x120(%rsp)
movaps %xmm4, %xmm6
subps %xmm11, %xmm6
movaps %xmm15, %xmm8
subps %xmm5, %xmm8
movaps %xmm7, %xmm0
mulps %xmm8, %xmm0
movaps %xmm9, %xmm14
mulps %xmm6, %xmm14
subps %xmm0, %xmm14
movaps %xmm12, 0xe0(%rsp)
movaps %xmm12, %xmm4
movaps 0x10(%rsp), %xmm13
subps %xmm13, %xmm4
movss (%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm13, %xmm3
subps %xmm0, %xmm13
movss 0x10(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm11, %xmm2
subps %xmm0, %xmm2
movaps %xmm2, 0xd0(%rsp)
movss 0x40(%r9,%r11,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x50(%r9,%r11,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movaps %xmm2, %xmm0
mulps %xmm11, %xmm0
movaps %xmm13, %xmm2
mulps %xmm12, %xmm2
subps %xmm0, %xmm2
movaps %xmm1, 0x140(%rsp)
subps %xmm1, %xmm3
movaps %xmm3, %xmm10
mulps %xmm8, %xmm10
mulps %xmm2, %xmm8
mulps %xmm9, %xmm2
mulps %xmm4, %xmm9
subps %xmm9, %xmm10
movss 0x20(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm0, %xmm5
movss 0x60(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm13, %xmm1
movaps %xmm13, 0x10(%rsp)
mulps %xmm0, %xmm1
movaps %xmm5, %xmm13
mulps %xmm11, %xmm13
subps %xmm1, %xmm13
movaps %xmm3, %xmm1
mulps %xmm6, %xmm1
mulps %xmm13, %xmm6
mulps %xmm7, %xmm13
mulps %xmm4, %xmm7
subps %xmm1, %xmm7
movaps %xmm5, 0xc0(%rsp)
movaps %xmm5, %xmm1
movaps 0xd0(%rsp), %xmm5
mulps %xmm12, %xmm1
movaps %xmm5, %xmm9
mulps %xmm0, %xmm9
subps %xmm1, %xmm9
mulps %xmm7, %xmm0
movaps %xmm10, 0x100(%rsp)
mulps %xmm10, %xmm12
addps %xmm0, %xmm12
mulps %xmm14, %xmm11
addps %xmm12, %xmm11
xorps %xmm12, %xmm12
addps %xmm8, %xmm6
mulps %xmm9, %xmm4
addps %xmm6, %xmm4
addps %xmm2, %xmm13
mulps %xmm3, %xmm9
movaps 0x70(%rsp), %xmm10
addps %xmm13, %xmm9
movaps 0x50(%rsp), %xmm3
movaps %xmm11, %xmm8
andps 0x1d55964(%rip), %xmm8 # 0x1eec6d0
xorps %xmm8, %xmm4
xorps %xmm8, %xmm9
movaps %xmm4, %xmm0
cmpnltps %xmm12, %xmm0
movaps %xmm9, %xmm1
cmpnltps %xmm12, %xmm1
andps %xmm0, %xmm1
movaps %xmm11, %xmm2
cmpneqps %xmm12, %xmm11
movaps 0x60(%rsp), %xmm12
andps %xmm11, %xmm1
movaps 0x110(%rsp), %xmm11
andps 0x1d55915(%rip), %xmm2 # 0x1eec6c0
movaps %xmm4, %xmm6
addps %xmm9, %xmm6
cmpleps %xmm2, %xmm6
andps %xmm1, %xmm6
leaq (%rbx,%rax), %r13
movmskps %xmm6, %eax
testl %eax, %eax
jne 0x196fab
movaps %xmm3, %xmm7
subps 0xe0(%rsp), %xmm7
movaps %xmm12, %xmm1
subps 0x120(%rsp), %xmm1
movaps %xmm10, %xmm6
subps %xmm15, %xmm6
movaps 0x140(%rsp), %xmm2
subps %xmm3, %xmm2
movaps %xmm3, %xmm5
movaps 0x130(%rsp), %xmm3
subps %xmm12, %xmm3
subps %xmm10, %xmm11
movaps %xmm11, %xmm0
mulps %xmm1, %xmm0
movaps %xmm1, %xmm8
movaps %xmm1, 0xe0(%rsp)
movaps %xmm3, %xmm14
mulps %xmm6, %xmm14
subps %xmm0, %xmm14
movaps %xmm2, %xmm0
mulps %xmm6, %xmm0
movaps %xmm11, %xmm1
movaps %xmm7, 0x10(%rsp)
mulps %xmm7, %xmm1
subps %xmm0, %xmm1
movaps %xmm3, %xmm0
mulps %xmm7, %xmm0
movaps %xmm2, %xmm4
mulps %xmm8, %xmm4
subps %xmm0, %xmm4
movss (%r9,%r11,4), %xmm0
movss 0x10(%r9,%r11,4), %xmm8
movaps %xmm10, %xmm13
movss 0x20(%r9,%r11,4), %xmm10
movss 0x40(%r9,%r11,4), %xmm7
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0x50(%r9,%r11,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movaps %xmm11, %xmm15
movss 0x60(%r9,%r11,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
subps %xmm0, %xmm5
subps %xmm8, %xmm12
subps %xmm10, %xmm13
movaps %xmm13, %xmm8
movaps %xmm13, %xmm0
mulps %xmm9, %xmm8
movaps %xmm12, %xmm13
mulps %xmm11, %xmm13
subps %xmm8, %xmm13
movaps %xmm5, %xmm10
mulps %xmm11, %xmm10
movaps %xmm0, %xmm8
mulps %xmm7, %xmm8
subps %xmm10, %xmm8
movaps %xmm12, 0x60(%rsp)
mulps %xmm7, %xmm12
movaps %xmm5, 0x50(%rsp)
mulps %xmm9, %xmm5
subps %xmm12, %xmm5
movaps %xmm1, %xmm12
movaps %xmm4, 0x70(%rsp)
mulps %xmm4, %xmm11
mulps %xmm1, %xmm9
addps %xmm11, %xmm9
mulps %xmm14, %xmm7
addps %xmm9, %xmm7
mulps %xmm5, %xmm15
mulps %xmm8, %xmm3
addps %xmm15, %xmm3
mulps %xmm13, %xmm2
addps %xmm3, %xmm2
mulps %xmm6, %xmm5
mulps 0xe0(%rsp), %xmm8
addps %xmm5, %xmm8
movaps %xmm7, %xmm10
andps 0x1d557a0(%rip), %xmm10 # 0x1eec6d0
xorps %xmm10, %xmm2
mulps 0x10(%rsp), %xmm13
addps %xmm8, %xmm13
xorps %xmm10, %xmm13
movaps %xmm2, %xmm1
xorps %xmm3, %xmm3
cmpnltps %xmm3, %xmm1
movaps %xmm13, %xmm8
cmpnltps %xmm3, %xmm8
andps %xmm1, %xmm8
movaps %xmm7, %xmm5
andps 0x1d5575d(%rip), %xmm5 # 0x1eec6c0
cmpneqps %xmm3, %xmm7
andps %xmm7, %xmm8
movaps %xmm2, %xmm6
addps %xmm13, %xmm6
cmpleps %xmm5, %xmm6
andps %xmm8, %xmm6
movmskps %xmm6, %eax
testl %eax, %eax
movq 0xf8(%rsp), %r12
jne 0x197524
movq 0x1c8(%rsp), %rax
incq %rax
cmpq 0x158(%rsp), %rax
jne 0x1966f0
jmp 0x197c04
movaps 0xc0(%rsp), %xmm0
mulps %xmm7, %xmm0
mulps 0x100(%rsp), %xmm5
addps %xmm0, %xmm5
movaps 0x10(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm5, %xmm0
xorps %xmm0, %xmm8
movaps %xmm8, 0x10(%rsp)
movss 0x30(%r9,%r11,4), %xmm0
movss 0x80(%r9,%r11,4), %xmm8
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm2, %xmm0
cmpltps 0x10(%rsp), %xmm0
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm2, %xmm8
movaps 0x10(%rsp), %xmm1
cmpleps %xmm8, %xmm1
movaps 0x10(%rsp), %xmm5
andps %xmm0, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x196dc8
movq 0x150(%rsp), %r15
movaps %xmm4, 0x1d0(%rsp)
movaps %xmm9, 0x1e0(%rsp)
movaps %xmm5, 0x1f0(%rsp)
movaps %xmm2, 0x200(%rsp)
movaps %xmm14, 0x210(%rsp)
movaps 0x100(%rsp), %xmm3
movaps %xmm3, 0x220(%rsp)
movaps %xmm7, 0x230(%rsp)
movaps %xmm1, 0x240(%rsp)
movaps 0x360(%rsp), %xmm0
movaps %xmm0, 0x2b0(%rsp)
movaps %xmm1, 0x20(%rsp)
rcpps %xmm2, %xmm8
mulps %xmm8, %xmm2
movaps 0x1d5597b(%rip), %xmm10 # 0x1eeca10
movaps %xmm10, %xmm6
subps %xmm2, %xmm6
mulps %xmm8, %xmm6
addps %xmm8, %xmm6
movaps 0x10(%rsp), %xmm2
mulps %xmm6, %xmm2
movaps %xmm2, 0x10(%rsp)
movaps 0x10(%rsp), %xmm2
movaps %xmm2, 0x270(%rsp)
mulps %xmm6, %xmm4
minps %xmm10, %xmm4
mulps %xmm9, %xmm6
minps %xmm10, %xmm6
movaps %xmm10, %xmm2
subps %xmm4, %xmm2
movaps %xmm10, %xmm8
subps %xmm6, %xmm8
blendvps %xmm0, %xmm2, %xmm4
movaps %xmm4, 0x250(%rsp)
blendvps %xmm0, %xmm8, %xmm6
movaps 0x10(%rsp), %xmm4
movaps %xmm6, 0x260(%rsp)
movaps %xmm14, 0x280(%rsp)
movaps %xmm3, 0x290(%rsp)
movaps %xmm7, 0x2a0(%rsp)
movaps 0x1d548d4(%rip), %xmm2 # 0x1eeb9f0
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm0
shufps $0x4e, %xmm3, %xmm0 # xmm0 = xmm0[2,3],xmm3[0,1]
minps %xmm3, %xmm0
cmpeqps %xmm2, %xmm0
andps %xmm1, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x197149
movaps %xmm0, %xmm1
movmskps %xmm1, %eax
bsfq %rax, %r12
movq %r15, %rdx
movl 0x40(%r13,%r12,4), %eax
movq 0x1e8(%rdx), %rcx
movq (%rcx,%rax,8), %r14
movl 0x90(%r9,%r11,4), %ecx
testl %ecx, 0x34(%r14)
je 0x197455
movq 0x1378(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x197193
cmpq $0x0, 0x40(%r14)
je 0x197a72
movaps %xmm15, 0xd0(%rsp)
movss 0x250(%rsp,%r12,4), %xmm0
movss 0x260(%rsp,%r12,4), %xmm1
movss 0x280(%rsp,%r12,4), %xmm2
movss 0x290(%rsp,%r12,4), %xmm3
movq 0x1378(%rsp), %rdx
movq 0x8(%rdx), %rcx
movd %eax, %xmm4
pshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movd 0x50(%r13,%r12,4), %xmm5
pshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0x2a0(%rsp,%r12,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm2, 0x2c0(%rsp)
movaps %xmm3, 0x2d0(%rsp)
movaps %xmm6, 0x2e0(%rsp)
movaps %xmm0, 0x2f0(%rsp)
movaps %xmm1, 0x300(%rsp)
movdqa %xmm5, 0x310(%rsp)
movdqa %xmm4, 0x320(%rsp)
leaq 0x330(%rsp), %rax
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rax)
movdqa %xmm0, (%rax)
movd (%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x330(%rsp)
movd 0x4(%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x340(%rsp)
movss 0x80(%r9,%r11,4), %xmm0
movss %xmm0, 0xc0(%rsp)
movss 0x270(%rsp,%r12,4), %xmm0
movss %xmm0, 0x80(%r9,%r11,4)
movq 0xf0(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x90(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x98(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
leaq 0x2c0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x4, 0xb8(%rsp)
movq 0x40(%r14), %rax
testq %rax, %rax
je 0x197311
leaq 0x90(%rsp), %rdi
callq *%rax
movdqa 0x80(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1974a8
movq 0x1378(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x197350
testb $0x2, (%rcx)
jne 0x197346
testb $0x40, 0x3e(%r14)
je 0x197350
leaq 0x90(%rsp), %rdi
callq *%rax
movdqa 0x80(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d546aa(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d54aae(%rip), %xmm1 # 0x1eebe20
je 0x1974b8
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1974b8
andl $0x0, 0x20(%rsp,%r12,4)
movaps 0x20(%rsp), %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x197b24
movaps 0x1d5457e(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm4, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19749c
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %r12
jmp 0x197153
pcmpeqd 0x1d54560(%rip), %xmm1 # 0x1eeba10
pxor 0x1d54968(%rip), %xmm1 # 0x1eebe20
ptest 0x1d5565f(%rip), %xmm1 # 0x1eecb20
jne 0x1974df
movq 0x48(%rsp), %rax
movq 0x40(%rsp), %rcx
movd 0xc0(%rsp), %xmm0
movd %xmm0, 0x80(%rax,%rcx,4)
andl $0x0, 0x20(%rsp,%r12,4)
movq 0x48(%rsp), %r9
movq 0x40(%rsp), %r11
movss 0x80(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x10(%rsp), %xmm4
movaps %xmm4, %xmm1
cmpleps %xmm0, %xmm1
andps 0x20(%rsp), %xmm1
movaps %xmm1, 0x20(%rsp)
movq %r15, %rdx
movaps 0xd0(%rsp), %xmm15
jmp 0x19745b
mulps 0x70(%rsp), %xmm0
movaps 0x60(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
movaps 0x50(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm1, %xmm0
xorps %xmm0, %xmm10
movss 0x30(%r9,%r11,4), %xmm7
movss 0x80(%r9,%r11,4), %xmm8
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm5, %xmm7
cmpltps %xmm10, %xmm7
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm5, %xmm8
movaps %xmm10, %xmm1
cmpleps %xmm8, %xmm1
andps %xmm7, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x196f8d
movaps %xmm2, %xmm9
movaps %xmm2, 0x1d0(%rsp)
movaps %xmm13, 0x1e0(%rsp)
movaps %xmm10, 0x1f0(%rsp)
movaps %xmm5, 0x200(%rsp)
movaps %xmm14, 0x210(%rsp)
movaps %xmm12, 0x220(%rsp)
movaps 0x70(%rsp), %xmm2
movaps %xmm2, 0x230(%rsp)
movaps %xmm1, 0x240(%rsp)
movaps 0x350(%rsp), %xmm0
movaps %xmm0, 0x2b0(%rsp)
movq 0x1378(%rsp), %rax
movq (%rax), %rbp
movaps %xmm1, 0x20(%rsp)
rcpps %xmm5, %xmm6
mulps %xmm6, %xmm5
movaps 0x1d55410(%rip), %xmm8 # 0x1eeca10
movaps %xmm8, %xmm7
subps %xmm5, %xmm7
mulps %xmm6, %xmm7
addps %xmm6, %xmm7
mulps %xmm7, %xmm10
movaps %xmm10, 0x270(%rsp)
mulps %xmm7, %xmm9
minps %xmm8, %xmm9
mulps %xmm13, %xmm7
minps %xmm8, %xmm7
movaps %xmm8, %xmm5
subps %xmm9, %xmm5
movaps %xmm8, %xmm6
subps %xmm7, %xmm6
blendvps %xmm0, %xmm5, %xmm9
movaps %xmm9, 0x250(%rsp)
blendvps %xmm0, %xmm6, %xmm7
movaps %xmm7, 0x260(%rsp)
movaps %xmm14, 0x280(%rsp)
movaps %xmm12, 0x290(%rsp)
movaps %xmm2, 0x2a0(%rsp)
movaps 0x1d5437a(%rip), %xmm2 # 0x1eeb9f0
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm10, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm0
shufps $0x4e, %xmm3, %xmm0 # xmm0 = xmm0[2,3],xmm3[0,1]
minps %xmm3, %xmm0
cmpeqps %xmm2, %xmm0
andps %xmm1, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1976a4
movaps %xmm0, %xmm1
movmskps %xmm1, %eax
bsfq %rax, %r15
movl 0x40(%r13,%r15,4), %eax
movq 0x1e8(%rbp), %rcx
movq (%rcx,%rax,8), %r14
movl 0x90(%r9,%r11,4), %ecx
testl %ecx, 0x34(%r14)
je 0x1979a7
movq 0x1378(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1976eb
cmpq $0x0, 0x40(%r14)
je 0x197b43
movaps %xmm10, 0x60(%rsp)
movss 0x250(%rsp,%r15,4), %xmm0
movss 0x260(%rsp,%r15,4), %xmm1
movss 0x280(%rsp,%r15,4), %xmm2
movss 0x290(%rsp,%r15,4), %xmm3
movq 0x1378(%rsp), %rdx
movq 0x8(%rdx), %rcx
movd %eax, %xmm4
pshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movd 0x50(%r13,%r15,4), %xmm5
pshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0x2a0(%rsp,%r15,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm2, 0x2c0(%rsp)
movaps %xmm3, 0x2d0(%rsp)
movaps %xmm6, 0x2e0(%rsp)
movaps %xmm0, 0x2f0(%rsp)
movaps %xmm1, 0x300(%rsp)
movdqa %xmm5, 0x310(%rsp)
movdqa %xmm4, 0x320(%rsp)
leaq 0x330(%rsp), %rax
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rax)
movdqa %xmm0, (%rax)
movd (%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x330(%rsp)
movd 0x4(%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x340(%rsp)
movss 0x80(%r9,%r11,4), %xmm0
movss %xmm0, 0x50(%rsp)
movss 0x270(%rsp,%r15,4), %xmm0
movss %xmm0, 0x80(%r9,%r11,4)
movq 0xf0(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x90(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x98(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0xa0(%rsp)
movq %r9, 0xa8(%rsp)
leaq 0x2c0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x4, 0xb8(%rsp)
movq 0x40(%r14), %rax
testq %rax, %rax
je 0x197863
leaq 0x90(%rsp), %rdi
callq *%rax
movdqa 0x80(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1979fb
movq 0x1378(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1978a2
testb $0x2, (%rcx)
jne 0x197898
testb $0x40, 0x3e(%r14)
je 0x1978a2
leaq 0x90(%rsp), %rdi
callq *%rax
movdqa 0x80(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d54158(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d5455c(%rip), %xmm1 # 0x1eebe20
je 0x197a0b
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x197a0b
andl $0x0, 0x20(%rsp,%r15,4)
movaps 0x20(%rsp), %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x196f8d
movaps 0x1d5402c(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm10, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1979ef
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %r15
jmp 0x1976ab
pcmpeqd 0x1d5400d(%rip), %xmm1 # 0x1eeba10
pxor 0x1d54415(%rip), %xmm1 # 0x1eebe20
ptest 0x1d5510c(%rip), %xmm1 # 0x1eecb20
jne 0x197a2f
movq 0x48(%rsp), %rax
movq 0x40(%rsp), %rcx
movd 0x50(%rsp), %xmm0
movd %xmm0, 0x80(%rax,%rcx,4)
andl $0x0, 0x20(%rsp,%r15,4)
movq 0x48(%rsp), %r9
movq 0x40(%rsp), %r11
movss 0x80(%r9,%r11,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x60(%rsp), %xmm10
movaps %xmm10, %xmm1
cmpleps %xmm0, %xmm1
andps 0x20(%rsp), %xmm1
movaps %xmm1, 0x20(%rsp)
movq 0xf8(%rsp), %r12
jmp 0x1979ad
movss 0x250(%rsp,%r12,4), %xmm0
movss 0x260(%rsp,%r12,4), %xmm1
movss 0x270(%rsp,%r12,4), %xmm2
movss %xmm2, 0x80(%r9,%r11,4)
movss 0x280(%rsp,%r12,4), %xmm2
movss %xmm2, 0xc0(%r9,%r11,4)
movss 0x290(%rsp,%r12,4), %xmm2
movss %xmm2, 0xd0(%r9,%r11,4)
movss 0x2a0(%rsp,%r12,4), %xmm2
movss %xmm2, 0xe0(%r9,%r11,4)
movss %xmm0, 0xf0(%r9,%r11,4)
movss %xmm1, 0x100(%r9,%r11,4)
movl 0x50(%r13,%r12,4), %ecx
movl %ecx, 0x110(%r9,%r11,4)
movl %eax, 0x120(%r9,%r11,4)
movq 0x1378(%rsp), %rcx
movq 0x8(%rcx), %rax
movl (%rax), %eax
movl %eax, 0x130(%r9,%r11,4)
movq 0x8(%rcx), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r9,%r11,4)
movaps 0x70(%rsp), %xmm10
movaps 0x110(%rsp), %xmm11
movaps 0x60(%rsp), %xmm12
movaps 0x50(%rsp), %xmm3
jmp 0x196dc8
movss 0x250(%rsp,%r15,4), %xmm0
movss 0x260(%rsp,%r15,4), %xmm1
movss 0x270(%rsp,%r15,4), %xmm2
movss %xmm2, 0x80(%r9,%r11,4)
movss 0x280(%rsp,%r15,4), %xmm2
movss %xmm2, 0xc0(%r9,%r11,4)
movss 0x290(%rsp,%r15,4), %xmm2
movss %xmm2, 0xd0(%r9,%r11,4)
movss 0x2a0(%rsp,%r15,4), %xmm2
movss %xmm2, 0xe0(%r9,%r11,4)
movss %xmm0, 0xf0(%r9,%r11,4)
movss %xmm1, 0x100(%r9,%r11,4)
movl 0x50(%r13,%r15,4), %ecx
movl %ecx, 0x110(%r9,%r11,4)
movl %eax, 0x120(%r9,%r11,4)
movq 0x1378(%rsp), %rcx
movq 0x8(%rcx), %rax
movl (%rax), %eax
movl %eax, 0x130(%r9,%r11,4)
movq 0x8(%rcx), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r9,%r11,4)
jmp 0x196f8d
movq 0x8(%rsp), %rsi
jmp 0x1962f3
movss 0x80(%r9,%r11,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq 0x8(%rsp), %rsi
movaps 0x390(%rsp), %xmm8
movaps 0x380(%rsp), %xmm6
movaps 0x370(%rsp), %xmm7
jmp 0x1962f3
addq $0x1338, %rsp # imm = 0x1338
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::QuadMiMBIntersectorKMoeller<4, 4, true>>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::QuadMIntersectorKMoellerTrumbore<4, 4, true>&, embree::RayK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xbb8, %rsp # imm = 0xBB8
movq 0xbf0(%rsp), %rax
leaq 0x418(%rsp), %rdi
movq %rdx, -0x8(%rdi)
movss (%rax,%rcx,4), %xmm5
movss 0x10(%rax,%rcx,4), %xmm6
movss 0x20(%rax,%rcx,4), %xmm7
movss 0x60(%rax,%rcx,4), %xmm8
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r8
movslq 0xa0(%rax,%rcx,4), %r10
movslq 0xb0(%rax,%rcx,4), %r11
movq %r8, %rbx
xorq $0x10, %rbx
movq %r10, %r14
xorq $0x10, %r14
movq %r11, %r15
xorq $0x10, %r15
movss 0xc0(%rax,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
leaq 0x1fb827d(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x3d0(%rsp)
movaps 0xf0(%rax), %xmm0
movaps %xmm0, 0x3c0(%rsp)
pushq $0x1
popq %rdx
shll %cl, %edx
movslq %edx, %rdx
shlq $0x4, %rdx
addq %rax, %rdx
movq %rdx, 0x128(%rsp)
leaq 0x410(%rsp), %r12
movq %r9, 0x38(%rsp)
movq %rcx, 0x30(%rsp)
movaps %xmm5, 0xe0(%rsp)
movaps %xmm6, 0xd0(%rsp)
movaps %xmm7, 0xc0(%rsp)
movaps %xmm8, 0xb0(%rsp)
movaps %xmm9, 0xa0(%rsp)
movaps %xmm10, 0x90(%rsp)
movq %r8, 0x158(%rsp)
movq %r10, 0x150(%rsp)
movq %r11, 0x148(%rsp)
movq %rbx, 0x140(%rsp)
movq %r14, 0x138(%rsp)
movq %r15, 0x130(%rsp)
movaps %xmm11, 0x80(%rsp)
movaps %xmm12, 0x70(%rsp)
movq %rdi, %r13
cmpq %r12, %rdi
je 0x199271
leaq -0x8(%r13), %rdi
movq -0x8(%r13), %rbp
testb $0x8, %bpl
jne 0x197eac
movq %rbp, %rax
andq $-0x10, %rax
movss 0x70(%r9,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rax,%r8), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%r8), %xmm2
subps %xmm5, %xmm2
mulps %xmm8, %xmm2
movaps %xmm11, %xmm1
movaps 0x80(%rax,%r10), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r10), %xmm3
maxps %xmm2, %xmm1
subps %xmm6, %xmm3
mulps %xmm9, %xmm3
movaps 0x80(%rax,%r11), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%r11), %xmm2
subps %xmm7, %xmm2
mulps %xmm10, %xmm2
maxps %xmm2, %xmm3
maxps %xmm3, %xmm1
movaps 0x80(%rax,%rbx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rbx), %xmm3
subps %xmm5, %xmm3
mulps %xmm8, %xmm3
movaps %xmm12, %xmm2
minps %xmm3, %xmm2
movaps 0x80(%rax,%r14), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r14), %xmm3
subps %xmm6, %xmm3
movaps 0x80(%rax,%r15), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%r15), %xmm4
mulps %xmm9, %xmm3
subps %xmm7, %xmm4
mulps %xmm10, %xmm4
minps %xmm4, %xmm3
minps %xmm3, %xmm2
movl %ebp, %edx
andl $0x7, %edx
cmpleps %xmm2, %xmm1
cmpl $0x6, %edx
je 0x197ef5
pslld $0x1f, %xmm1
movmskps %xmm1, %esi
testb $0x8, %bpl
jne 0x197ef1
testq %rsi, %rsi
je 0x197f10
andq $-0x10, %rbp
bsfq %rsi, %rax
leaq -0x1(%rsi), %rdx
movq (%rbp,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rsi, %rdx
jne 0x197f15
movq %rax, %rbp
xorl %eax, %eax
testl %eax, %eax
je 0x197dcc
jmp 0x197f63
pushq $0x6
jmp 0x197f12
movaps 0xe0(%rax), %xmm2
cmpleps %xmm0, %xmm2
cmpltps 0xf0(%rax), %xmm0
andps %xmm2, %xmm0
andps %xmm0, %xmm1
jmp 0x197ea4
pushq $0x4
popq %rax
jmp 0x197ee7
movq %rsi, 0x8(%rsp)
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rsi
leaq -0x1(%rdx), %rax
movq (%rbp,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
andq %rdx, %rax
je 0x197f59
movq %rsi, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rsi
leaq -0x1(%rax), %rdx
jmp 0x197f29
movq %rsi, %rbp
movq 0x8(%rsp), %rsi
jmp 0x197ee5
cmpl $0x6, %eax
jne 0x199215
movl %ebp, %edx
andl $0xf, %edx
xorl %eax, %eax
addq $-0x8, %rdx
movq %rdx, 0x210(%rsp)
setne %dl
je 0x199215
movq %rsi, 0x8(%rsp)
movq %r13, 0x28(%rsp)
movq %rdi, 0x160(%rsp)
andq $-0x10, %rbp
xorl %eax, %eax
movb %dl, 0x7(%rsp)
movq %rax, 0x218(%rsp)
imulq $0x60, %rax, %rax
movq 0xbf8(%rsp), %rdx
movq (%rdx), %rsi
movss 0x70(%r9,%rcx,4), %xmm15
movl 0x40(%rbp,%rax), %edx
movq %rsi, 0x168(%rsp)
movq 0x1e8(%rsi), %rcx
movq (%rcx,%rdx,8), %rdx
movss 0x28(%rdx), %xmm0
movss 0x2c(%rdx), %xmm1
movss 0x30(%rdx), %xmm2
subss %xmm1, %xmm15
subss %xmm1, %xmm2
divss %xmm2, %xmm15
mulss %xmm0, %xmm15
roundss $0x9, %xmm15, %xmm1
addss 0x1d589c2(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm1, %xmm0
subss %xmm0, %xmm15
cvttss2si %xmm0, %esi
movslq %esi, %rdi
movq 0xe0(%rdx), %rsi
imulq $0x38, %rdi, %r10
movq (%rsi,%r10), %rdx
movq %rdx, 0x100(%rsp)
movl (%rbp,%rax), %r8d
movl 0x4(%rbp,%rax), %r9d
movups (%rdx,%r8,4), %xmm2
movl 0x10(%rbp,%rax), %r11d
movups (%rdx,%r11,4), %xmm1
movl 0x30(%rbp,%rax), %ebx
movq %rbx, 0x1d0(%rsp)
movups (%rdx,%rbx,4), %xmm14
movl 0x44(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movq 0xe0(%rdx), %r13
movq (%r13,%r10), %rdx
movups (%rdx,%r9,4), %xmm0
movq %rdx, %rbx
movl 0x48(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %r14
movl 0x4c(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0x14(%rbp,%rax), %edx
movq %rdx, 0xf0(%rsp)
movups (%rbx,%rdx,4), %xmm5
movq %rbx, %rdx
movq %rbx, 0x1f0(%rsp)
incl %edi
movslq %edi, %rdi
imulq $0x38, %rdi, %r15
movq (%rsi,%r15), %rsi
movups (%rsi,%r8,4), %xmm4
movl 0x34(%rbp,%rax), %ebx
movups (%rdx,%rbx,4), %xmm7
movq 0xe0(%r14), %r14
movups (%rsi,%r11,4), %xmm13
movq (%r14,%r10), %r12
movq %r12, 0x1e0(%rsp)
movq (%r13,%r15), %r8
movups (%r8,%r9,4), %xmm6
movl 0x8(%rbp,%rax), %r11d
movups (%r12,%r11,4), %xmm8
movq (%r14,%r15), %rdi
movups (%rdi,%r11,4), %xmm9
movl 0x18(%rbp,%rax), %r14d
movups (%r12,%r14,4), %xmm10
movq 0xe0(%rcx), %rcx
movq (%rcx,%r10), %r10
movq (%rcx,%r15), %r11
movl 0x38(%rbp,%rax), %edx
movups (%r12,%rdx,4), %xmm11
movl 0xc(%rbp,%rax), %r15d
movups (%r10,%r15,4), %xmm12
movaps %xmm2, %xmm3
unpcklps %xmm8, %xmm3 # xmm3 = xmm3[0],xmm8[0],xmm3[1],xmm8[1]
unpckhps %xmm8, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
movaps %xmm0, %xmm8
unpcklps %xmm12, %xmm8 # xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
unpckhps %xmm12, %xmm0 # xmm0 = xmm0[2],xmm12[2],xmm0[3],xmm12[3]
movl 0x1c(%rbp,%rax), %r12d
movups (%r10,%r12,4), %xmm12
unpcklps %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
movaps %xmm2, 0x40(%rsp)
movaps %xmm3, %xmm0
unpcklps %xmm8, %xmm0 # xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
movaps %xmm0, 0x10(%rsp)
unpckhps %xmm8, %xmm3 # xmm3 = xmm3[2],xmm8[2],xmm3[3],xmm8[3]
movaps %xmm1, %xmm2
unpcklps %xmm10, %xmm2 # xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1]
unpckhps %xmm10, %xmm1 # xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
movaps %xmm5, %xmm8
unpcklps %xmm12, %xmm8 # xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
unpckhps %xmm12, %xmm5 # xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
movl 0x3c(%rbp,%rax), %r13d
movups (%r10,%r13,4), %xmm10
unpcklps %xmm5, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
movaps %xmm1, 0x110(%rsp)
movaps %xmm2, %xmm0
unpcklps %xmm8, %xmm0 # xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
movaps %xmm0, 0x60(%rsp)
unpckhps %xmm8, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
movaps %xmm14, %xmm1
unpcklps %xmm11, %xmm1 # xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
unpckhps %xmm11, %xmm14 # xmm14 = xmm14[2],xmm11[2],xmm14[3],xmm11[3]
movaps %xmm7, %xmm5
unpcklps %xmm10, %xmm5 # xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
unpckhps %xmm10, %xmm7 # xmm7 = xmm7[2],xmm10[2],xmm7[3],xmm10[3]
movups (%r11,%r15,4), %xmm10
unpcklps %xmm7, %xmm14 # xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
movaps %xmm14, 0x200(%rsp)
movaps %xmm1, %xmm14
unpcklps %xmm5, %xmm14 # xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
unpckhps %xmm5, %xmm1 # xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
movaps %xmm4, %xmm8
unpcklps %xmm9, %xmm8 # xmm8 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
unpckhps %xmm9, %xmm4 # xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
movaps %xmm6, %xmm5
unpcklps %xmm10, %xmm5 # xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
unpckhps %xmm10, %xmm6 # xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
movups (%rdi,%r14,4), %xmm7
unpcklps %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
movaps %xmm8, %xmm11
unpcklps %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
unpckhps %xmm5, %xmm8 # xmm8 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
movaps %xmm13, %xmm6
unpcklps %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
unpckhps %xmm7, %xmm13 # xmm13 = xmm13[2],xmm7[2],xmm13[3],xmm7[3]
movq 0xf0(%rsp), %rcx
movups (%r8,%rcx,4), %xmm5
movups (%r11,%r12,4), %xmm7
movq 0x30(%rsp), %rcx
movq 0x38(%rsp), %r9
movaps %xmm5, %xmm9
unpcklps %xmm7, %xmm9 # xmm9 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
unpckhps %xmm7, %xmm5 # xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
unpcklps %xmm5, %xmm13 # xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1]
movaps %xmm13, 0xf0(%rsp)
movaps %xmm6, %xmm10
unpcklps %xmm9, %xmm10 # xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
unpckhps %xmm9, %xmm6 # xmm6 = xmm6[2],xmm9[2],xmm6[3],xmm9[3]
movq 0x1d0(%rsp), %r14
movups (%rsi,%r14,4), %xmm5
movups (%rdi,%rdx,4), %xmm9
movaps %xmm5, %xmm7
unpcklps %xmm9, %xmm7 # xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1]
unpckhps %xmm9, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
movups (%r8,%rbx,4), %xmm9
movups (%r11,%r13,4), %xmm12
movaps %xmm9, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
unpckhps %xmm12, %xmm9 # xmm9 = xmm9[2],xmm12[2],xmm9[3],xmm12[3]
unpcklps %xmm9, %xmm5 # xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
movaps %xmm7, %xmm9
unpcklps %xmm13, %xmm9 # xmm9 = xmm9[0],xmm13[0],xmm9[1],xmm13[1]
unpckhps %xmm13, %xmm7 # xmm7 = xmm7[2],xmm13[2],xmm7[3],xmm13[3]
movss 0x1d54486(%rip), %xmm0 # 0x1eec714
subss %xmm15, %xmm0
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm15, %xmm11
movaps 0x10(%rsp), %xmm12
mulps %xmm0, %xmm12
addps %xmm11, %xmm12
mulps %xmm15, %xmm8
mulps %xmm0, %xmm3
addps %xmm8, %xmm3
mulps %xmm15, %xmm4
movaps 0x40(%rsp), %xmm8
mulps %xmm0, %xmm8
addps %xmm4, %xmm8
movaps %xmm8, %xmm4
mulps %xmm15, %xmm10
movaps 0x60(%rsp), %xmm8
mulps %xmm0, %xmm8
addps %xmm10, %xmm8
movaps %xmm8, 0x60(%rsp)
mulps %xmm15, %xmm6
movaps 0xf0(%rsp), %xmm8
mulps %xmm15, %xmm8
mulps %xmm0, %xmm2
addps %xmm6, %xmm2
movaps 0x110(%rsp), %xmm6
mulps %xmm0, %xmm6
addps %xmm8, %xmm6
movaps %xmm6, %xmm8
mulps %xmm15, %xmm9
mulps %xmm15, %xmm7
movaps %xmm15, 0x1d0(%rsp)
mulps %xmm15, %xmm5
mulps %xmm0, %xmm14
addps %xmm9, %xmm14
mulps %xmm0, %xmm1
addps %xmm7, %xmm1
movaps %xmm0, 0x3e0(%rsp)
movaps 0x200(%rsp), %xmm6
mulps %xmm0, %xmm6
addps %xmm5, %xmm6
movaps %xmm6, %xmm5
movaps %xmm3, %xmm6
movaps %xmm2, 0xf0(%rsp)
subps %xmm2, %xmm6
movaps %xmm4, %xmm0
movaps %xmm4, %xmm9
movaps %xmm8, 0x110(%rsp)
subps %xmm8, %xmm9
movaps %xmm1, 0x400(%rsp)
movaps %xmm1, %xmm7
subps %xmm3, %xmm7
movaps %xmm5, 0x200(%rsp)
movaps %xmm5, %xmm8
subps %xmm4, %xmm8
movaps %xmm6, %xmm4
mulps %xmm8, %xmm4
movaps %xmm9, %xmm1
mulps %xmm7, %xmm1
subps %xmm4, %xmm1
movaps %xmm14, 0x3f0(%rsp)
movaps %xmm14, %xmm4
subps %xmm12, %xmm4
movss (%r9,%rcx,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movaps %xmm12, %xmm10
movaps %xmm12, %xmm15
subps %xmm5, %xmm15
movss 0x10(%r9,%rcx,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
subps %xmm5, %xmm3
movss 0x40(%r9,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x50(%r9,%rcx,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movaps %xmm3, %xmm5
mulps %xmm11, %xmm5
movaps %xmm15, %xmm13
mulps %xmm12, %xmm13
subps %xmm5, %xmm13
subps 0x60(%rsp), %xmm10
movaps %xmm10, %xmm2
mulps %xmm8, %xmm2
mulps %xmm13, %xmm8
mulps %xmm9, %xmm13
mulps %xmm4, %xmm9
subps %xmm9, %xmm2
movss 0x20(%r9,%rcx,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
subps %xmm9, %xmm0
movss 0x60(%r9,%rcx,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm15, %xmm9
movaps %xmm15, 0x10(%rsp)
mulps %xmm14, %xmm9
movaps %xmm0, %xmm15
mulps %xmm11, %xmm15
subps %xmm9, %xmm15
movaps %xmm10, %xmm9
mulps %xmm7, %xmm9
mulps %xmm15, %xmm7
mulps %xmm6, %xmm15
mulps %xmm4, %xmm6
subps %xmm9, %xmm6
movaps %xmm0, 0x40(%rsp)
mulps %xmm12, %xmm0
movaps %xmm3, 0x1c0(%rsp)
movaps %xmm3, %xmm9
mulps %xmm14, %xmm9
subps %xmm0, %xmm9
mulps %xmm6, %xmm14
movaps %xmm2, 0x1a0(%rsp)
mulps %xmm2, %xmm12
addps %xmm14, %xmm12
movaps %xmm1, 0x1b0(%rsp)
mulps %xmm1, %xmm11
addps %xmm12, %xmm11
addps %xmm8, %xmm7
mulps %xmm9, %xmm4
addps %xmm7, %xmm4
addps %xmm13, %xmm15
xorps %xmm13, %xmm13
movaps 0x1d5420c(%rip), %xmm0 # 0x1eec6d0
mulps %xmm10, %xmm9
addps %xmm15, %xmm9
movaps 0x1d541ec(%rip), %xmm15 # 0x1eec6c0
movaps %xmm11, %xmm7
andps %xmm0, %xmm7
xorps %xmm7, %xmm4
xorps %xmm7, %xmm9
movaps %xmm4, %xmm0
cmpnltps %xmm13, %xmm0
movaps %xmm9, %xmm12
cmpnltps %xmm13, %xmm12
andps %xmm0, %xmm12
movaps %xmm11, %xmm8
cmpneqps %xmm13, %xmm11
andps %xmm11, %xmm12
andps %xmm15, %xmm8
movaps %xmm4, %xmm10
addps %xmm9, %xmm10
cmpleps %xmm8, %xmm10
andps %xmm12, %xmm10
leaq (%rbp,%rax), %r12
movl 0x20(%rbp,%rax), %r15d
movl 0x24(%rbp,%rax), %edx
movl 0x28(%rbp,%rax), %ebx
movl 0x2c(%rbp,%rax), %eax
movmskps %xmm10, %r14d
movq 0x100(%rsp), %r13
movups (%r13,%r15,4), %xmm14
movups (%rsi,%r15,4), %xmm13
movq 0x1f0(%rsp), %r15
movups (%r15,%rdx,4), %xmm5
movups (%r8,%rdx,4), %xmm11
movq 0x1e0(%rsp), %rdx
movups (%rdx,%rbx,4), %xmm1
movups (%rdi,%rbx,4), %xmm3
movups (%r10,%rax,4), %xmm2
movups (%r11,%rax,4), %xmm12
testl %r14d, %r14d
jne 0x1987d7
movaps %xmm14, %xmm0
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
unpckhps %xmm1, %xmm14 # xmm14 = xmm14[2],xmm1[2],xmm14[3],xmm1[3]
movaps %xmm5, %xmm1
unpcklps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
unpckhps %xmm2, %xmm5 # xmm5 = xmm5[2],xmm2[2],xmm5[3],xmm2[3]
unpcklps %xmm5, %xmm14 # xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
movaps %xmm0, %xmm7
unpcklps %xmm1, %xmm7 # xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
unpckhps %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps %xmm13, %xmm1
unpcklps %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
unpckhps %xmm3, %xmm13 # xmm13 = xmm13[2],xmm3[2],xmm13[3],xmm3[3]
movaps %xmm11, %xmm2
unpcklps %xmm12, %xmm2 # xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
unpckhps %xmm12, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
unpcklps %xmm11, %xmm13 # xmm13 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
movaps %xmm1, %xmm3
unpcklps %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
movaps 0x1d0(%rsp), %xmm2
mulps %xmm2, %xmm3
mulps %xmm2, %xmm1
mulps %xmm2, %xmm13
movaps 0x3e0(%rsp), %xmm2
mulps %xmm2, %xmm7
addps %xmm3, %xmm7
mulps %xmm2, %xmm0
addps %xmm1, %xmm0
mulps %xmm2, %xmm14
addps %xmm13, %xmm14
movaps %xmm7, %xmm1
subps 0x3f0(%rsp), %xmm1
movaps %xmm1, %xmm5
movaps %xmm0, %xmm1
subps 0x400(%rsp), %xmm1
movaps %xmm1, %xmm3
movaps %xmm14, %xmm8
subps 0x200(%rsp), %xmm8
movaps 0x60(%rsp), %xmm15
subps %xmm7, %xmm15
movaps 0xf0(%rsp), %xmm4
subps %xmm0, %xmm4
movaps 0x110(%rsp), %xmm1
subps %xmm14, %xmm1
movaps %xmm1, %xmm2
mulps %xmm3, %xmm2
movaps %xmm3, %xmm10
movaps %xmm3, 0x40(%rsp)
movaps %xmm4, %xmm6
mulps %xmm8, %xmm6
subps %xmm2, %xmm6
movaps %xmm15, %xmm3
mulps %xmm8, %xmm3
movaps %xmm1, %xmm2
movaps %xmm5, %xmm9
movaps %xmm5, 0x10(%rsp)
mulps %xmm5, %xmm2
subps %xmm3, %xmm2
movaps %xmm4, %xmm5
mulps %xmm9, %xmm5
movaps %xmm15, %xmm3
mulps %xmm10, %xmm3
subps %xmm5, %xmm3
movss (%r9,%rcx,4), %xmm5
movss 0x10(%r9,%rcx,4), %xmm10
movss 0x20(%r9,%rcx,4), %xmm12
movss 0x40(%r9,%rcx,4), %xmm9
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x50(%r9,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x60(%r9,%rcx,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
subps %xmm5, %xmm7
subps %xmm10, %xmm0
subps %xmm12, %xmm14
movaps %xmm14, 0x100(%rsp)
movaps %xmm14, %xmm10
mulps %xmm11, %xmm10
movaps %xmm0, %xmm5
mulps %xmm13, %xmm5
subps %xmm10, %xmm5
movaps %xmm7, %xmm12
mulps %xmm13, %xmm12
movaps %xmm14, %xmm10
mulps %xmm9, %xmm10
subps %xmm12, %xmm10
movaps %xmm0, %xmm14
mulps %xmm9, %xmm14
movaps %xmm7, %xmm12
mulps %xmm11, %xmm12
subps %xmm14, %xmm12
movaps %xmm6, %xmm14
mulps %xmm3, %xmm13
mulps %xmm2, %xmm11
addps %xmm13, %xmm11
movaps 0x1d53fa6(%rip), %xmm13 # 0x1eec6d0
mulps %xmm6, %xmm9
addps %xmm11, %xmm9
mulps %xmm12, %xmm1
mulps %xmm10, %xmm4
addps %xmm1, %xmm4
mulps %xmm5, %xmm15
addps %xmm4, %xmm15
mulps %xmm8, %xmm12
mulps 0x40(%rsp), %xmm10
addps %xmm12, %xmm10
movaps %xmm9, %xmm1
andps %xmm13, %xmm1
xorps %xmm8, %xmm8
xorps %xmm1, %xmm15
mulps 0x10(%rsp), %xmm5
addps %xmm10, %xmm5
xorps %xmm1, %xmm5
movaps %xmm15, %xmm6
cmpnltps %xmm8, %xmm6
movaps %xmm5, %xmm10
cmpnltps %xmm8, %xmm10
andps %xmm6, %xmm10
movaps %xmm9, %xmm6
andps 0x1d53f30(%rip), %xmm6 # 0x1eec6c0
cmpneqps %xmm8, %xmm9
andps %xmm9, %xmm10
movaps %xmm15, %xmm8
addps %xmm5, %xmm8
cmpleps %xmm6, %xmm8
andps %xmm10, %xmm8
movmskps %xmm8, %eax
testl %eax, %eax
jne 0x198cd1
movq 0x218(%rsp), %rax
incq %rax
cmpq 0x210(%rsp), %rax
setb %dl
jne 0x197fa0
jmp 0x199187
movaps 0x40(%rsp), %xmm0
mulps %xmm6, %xmm0
movaps 0x1c0(%rsp), %xmm15
mulps 0x1a0(%rsp), %xmm15
addps %xmm0, %xmm15
movaps 0x10(%rsp), %xmm0
mulps 0x1b0(%rsp), %xmm0
addps %xmm15, %xmm0
xorps %xmm0, %xmm7
movss 0x30(%r9,%rcx,4), %xmm0
movss 0x80(%r9,%rcx,4), %xmm15
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm8, %xmm0
cmpltps %xmm7, %xmm0
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulps %xmm8, %xmm15
movaps %xmm15, 0x10(%rsp)
movaps %xmm7, %xmm15
cmpleps 0x10(%rsp), %xmm15
andps %xmm0, %xmm15
andps %xmm10, %xmm15
movmskps %xmm15, %eax
testl %eax, %eax
je 0x19857b
movaps %xmm1, 0x3a0(%rsp)
movaps %xmm2, 0x3b0(%rsp)
movaps %xmm5, 0x1c0(%rsp)
movaps %xmm3, 0x1e0(%rsp)
movaps %xmm12, 0x1f0(%rsp)
movaps %xmm11, 0x40(%rsp)
movq %r12, 0x10(%rsp)
movaps %xmm4, 0x220(%rsp)
movaps %xmm9, 0x230(%rsp)
movaps %xmm7, 0x240(%rsp)
movaps %xmm8, 0x250(%rsp)
movaps 0x1b0(%rsp), %xmm1
movaps %xmm1, 0x260(%rsp)
movaps 0x1a0(%rsp), %xmm5
movaps %xmm5, 0x270(%rsp)
movaps %xmm6, 0x280(%rsp)
movaps %xmm15, 0x290(%rsp)
movdqa 0x3d0(%rsp), %xmm0
movdqa %xmm0, 0x300(%rsp)
rcpps %xmm8, %xmm2
mulps %xmm2, %xmm8
movaps 0x1d54113(%rip), %xmm10 # 0x1eeca10
movaps %xmm10, %xmm3
subps %xmm8, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
mulps %xmm3, %xmm7
movaps %xmm7, 0x2c0(%rsp)
mulps %xmm3, %xmm4
minps %xmm10, %xmm4
mulps %xmm9, %xmm3
minps %xmm10, %xmm3
movaps %xmm10, %xmm2
subps %xmm4, %xmm2
movaps %xmm10, %xmm7
subps %xmm3, %xmm7
blendvps %xmm0, %xmm2, %xmm4
movaps %xmm4, 0x2a0(%rsp)
blendvps %xmm0, %xmm7, %xmm3
movaps %xmm3, 0x2b0(%rsp)
movaps %xmm1, 0x2d0(%rsp)
movaps %xmm5, 0x2e0(%rsp)
movaps %xmm6, 0x2f0(%rsp)
movzbl %al, %r14d
movaps 0xe0(%rsp), %xmm5
movaps 0xd0(%rsp), %xmm6
movaps 0xc0(%rsp), %xmm7
movaps 0xb0(%rsp), %xmm8
movaps 0xa0(%rsp), %xmm9
movaps 0x90(%rsp), %xmm10
movaps 0x80(%rsp), %xmm11
movaps 0x70(%rsp), %xmm12
leaq 0x410(%rsp), %r12
movq 0x28(%rsp), %r13
movq 0x8(%rsp), %rsi
movq 0x168(%rsp), %rdi
bsfq %r14, %r15
movq 0x10(%rsp), %rax
movl 0x40(%rax,%r15,4), %edx
movq 0x1e8(%rdi), %rax
movq (%rax,%rdx,8), %rbx
movl 0x90(%r9,%rcx,4), %eax
testl %eax, 0x34(%rbx)
je 0x198a09
movq 0xbf8(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x198a21
cmpq $0x0, 0x48(%rbx)
jne 0x198a21
xorl %eax, %eax
jmp 0x198a0f
btcq %r15, %r14
movb $0x1, %al
testb %al, %al
je 0x19921f
testq %r14, %r14
jne 0x1989c5
jmp 0x19914e
movaps %xmm13, 0x1b0(%rsp)
movaps %xmm14, 0x100(%rsp)
movss 0x80(%r9,%rcx,4), %xmm0
movss %xmm0, 0x1a0(%rsp)
movss 0x2c0(%rsp,%r15,4), %xmm0
movss 0x2a0(%rsp,%r15,4), %xmm1
movss 0x2b0(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%rcx,4)
movq 0xbf8(%rsp), %rax
movq 0x8(%rax), %rax
movd %edx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movq 0x10(%rsp), %rcx
movd 0x50(%rcx,%r15,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x2d0(%rsp,%r15,4), %xmm4
movss 0x2e0(%rsp,%r15,4), %xmm5
movss 0x2f0(%rsp,%r15,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x310(%rsp)
movaps %xmm5, 0x320(%rsp)
movaps %xmm6, 0x330(%rsp)
movaps %xmm1, 0x340(%rsp)
movaps %xmm2, 0x350(%rsp)
movdqa %xmm3, 0x360(%rsp)
movdqa %xmm0, 0x370(%rsp)
leaq 0x380(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x390(%rsp)
movq 0x128(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x50(%rsp)
leaq 0x50(%rsp), %rcx
movq %rcx, 0x170(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x178(%rsp)
movq %rax, 0x180(%rsp)
movq %r9, 0x188(%rsp)
leaq 0x310(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x4, 0x198(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
je 0x198ba3
leaq 0x170(%rsp), %rdi
callq *%rax
movdqa 0x50(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x198c15
movq 0xbf8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x198bda
testb $0x2, (%rcx)
jne 0x198bd0
testb $0x40, 0x3e(%rbx)
je 0x198bda
leaq 0x170(%rsp), %rdi
callq *%rax
movdqa 0x50(%rsp), %xmm0
pcmpeqd 0x1d52e28(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d5322c(%rip), %xmm1 # 0x1eebe20
movq 0x188(%rsp), %rax
movaps 0x1d52dfd(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x198c25
pcmpeqd 0x1d52df3(%rip), %xmm1 # 0x1eeba10
pxor 0x1d531fb(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x198c54
movq 0x38(%rsp), %rcx
movq 0x30(%rsp), %rdx
movd 0x1a0(%rsp), %xmm0
movd %xmm0, 0x80(%rcx,%rdx,4)
btcq %r15, %r14
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rcx
movaps 0xe0(%rsp), %xmm5
movaps 0xd0(%rsp), %xmm6
movaps 0xc0(%rsp), %xmm7
movaps 0xb0(%rsp), %xmm8
movaps 0xa0(%rsp), %xmm9
movaps 0x90(%rsp), %xmm10
movaps 0x80(%rsp), %xmm11
movaps 0x70(%rsp), %xmm12
leaq 0x410(%rsp), %r12
movq 0x28(%rsp), %r13
movq 0x8(%rsp), %rsi
movq 0x168(%rsp), %rdi
movaps 0x100(%rsp), %xmm14
movaps 0x1b0(%rsp), %xmm13
jmp 0x198a0f
movaps 0x100(%rsp), %xmm4
mulps %xmm3, %xmm4
mulps %xmm2, %xmm0
addps %xmm4, %xmm0
mulps %xmm14, %xmm7
addps %xmm0, %xmm7
xorps %xmm7, %xmm1
movss 0x30(%r9,%rcx,4), %xmm4
movss 0x80(%r9,%rcx,4), %xmm9
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm6, %xmm4
cmpltps %xmm1, %xmm4
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulps %xmm6, %xmm9
movaps %xmm1, %xmm0
cmpleps %xmm9, %xmm0
andps %xmm4, %xmm0
andps %xmm8, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1987b6
movq %r12, 0x10(%rsp)
movaps %xmm15, 0x220(%rsp)
movaps %xmm5, 0x230(%rsp)
movaps %xmm1, 0x240(%rsp)
movaps %xmm6, 0x250(%rsp)
movaps %xmm14, 0x260(%rsp)
movaps %xmm2, 0x270(%rsp)
movaps %xmm3, 0x280(%rsp)
movaps %xmm0, 0x290(%rsp)
movdqa 0x3c0(%rsp), %xmm0
movdqa %xmm0, 0x300(%rsp)
movq 0xbf8(%rsp), %rdx
movq (%rdx), %rdi
rcpps %xmm6, %xmm8
mulps %xmm8, %xmm6
movaps 0x1d53c71(%rip), %xmm9 # 0x1eeca10
movaps %xmm9, %xmm4
subps %xmm6, %xmm4
mulps %xmm8, %xmm4
addps %xmm8, %xmm4
mulps %xmm4, %xmm1
movaps %xmm1, 0x2c0(%rsp)
mulps %xmm4, %xmm15
minps %xmm9, %xmm15
mulps %xmm5, %xmm4
minps %xmm9, %xmm4
movaps %xmm9, %xmm5
subps %xmm15, %xmm5
movaps %xmm9, %xmm6
subps %xmm4, %xmm6
blendvps %xmm0, %xmm5, %xmm15
movaps %xmm15, 0x2a0(%rsp)
blendvps %xmm0, %xmm6, %xmm4
movaps %xmm4, 0x2b0(%rsp)
movaps %xmm14, 0x2d0(%rsp)
movaps %xmm2, 0x2e0(%rsp)
movaps %xmm3, 0x2f0(%rsp)
movzbl %al, %r14d
movaps 0xe0(%rsp), %xmm5
movaps 0xd0(%rsp), %xmm6
movaps 0xc0(%rsp), %xmm7
movaps 0xb0(%rsp), %xmm8
movaps 0xa0(%rsp), %xmm9
movaps 0x90(%rsp), %xmm10
movaps 0x80(%rsp), %xmm11
movaps 0x70(%rsp), %xmm12
leaq 0x410(%rsp), %r12
movq 0x28(%rsp), %r13
movq 0x8(%rsp), %rsi
bsfq %r14, %r15
movq 0x10(%rsp), %rax
movl 0x40(%rax,%r15,4), %edx
movq 0x1e8(%rdi), %rax
movq (%rax,%rdx,8), %rbx
movl 0x90(%r9,%rcx,4), %eax
testl %eax, 0x34(%rbx)
je 0x198ea8
movq 0xbf8(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x198ec0
cmpq $0x0, 0x48(%rbx)
jne 0x198ec0
xorl %eax, %eax
jmp 0x198eae
btcq %r15, %r14
movb $0x1, %al
testb %al, %al
je 0x19921f
testq %r14, %r14
jne 0x198e64
jmp 0x1987b6
movq %rdi, 0x60(%rsp)
movss 0x80(%r9,%rcx,4), %xmm0
movss %xmm0, 0x110(%rsp)
movss 0x2c0(%rsp,%r15,4), %xmm0
movss 0x2a0(%rsp,%r15,4), %xmm1
movss 0x2b0(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%rcx,4)
movq 0xbf8(%rsp), %rax
movq 0x8(%rax), %rax
movd %edx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movq 0x10(%rsp), %rcx
movd 0x50(%rcx,%r15,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x2d0(%rsp,%r15,4), %xmm4
movss 0x2e0(%rsp,%r15,4), %xmm5
movss 0x2f0(%rsp,%r15,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x310(%rsp)
movaps %xmm5, 0x320(%rsp)
movaps %xmm6, 0x330(%rsp)
movaps %xmm1, 0x340(%rsp)
movaps %xmm2, 0x350(%rsp)
movdqa %xmm3, 0x360(%rsp)
movdqa %xmm0, 0x370(%rsp)
leaq 0x380(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x390(%rsp)
movq 0x128(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x50(%rsp)
leaq 0x50(%rsp), %rcx
movq %rcx, 0x170(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x178(%rsp)
movq %rax, 0x180(%rsp)
movq %r9, 0x188(%rsp)
leaq 0x310(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x4, 0x198(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
je 0x199035
leaq 0x170(%rsp), %rdi
callq *%rax
movdqa 0x50(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1990a7
movq 0xbf8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x19906c
testb $0x2, (%rcx)
jne 0x199062
testb $0x40, 0x3e(%rbx)
je 0x19906c
leaq 0x170(%rsp), %rdi
callq *%rax
movdqa 0x50(%rsp), %xmm0
pcmpeqd 0x1d52996(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d52d9a(%rip), %xmm1 # 0x1eebe20
movq 0x188(%rsp), %rax
movaps 0x1d5296b(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1990b7
pcmpeqd 0x1d52961(%rip), %xmm1 # 0x1eeba10
pxor 0x1d52d69(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x1990e6
movq 0x38(%rsp), %rcx
movq 0x30(%rsp), %rdx
movd 0x110(%rsp), %xmm0
movd %xmm0, 0x80(%rcx,%rdx,4)
btcq %r15, %r14
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rcx
movaps 0xe0(%rsp), %xmm5
movaps 0xd0(%rsp), %xmm6
movaps 0xc0(%rsp), %xmm7
movaps 0xb0(%rsp), %xmm8
movaps 0xa0(%rsp), %xmm9
movaps 0x90(%rsp), %xmm10
movaps 0x80(%rsp), %xmm11
movaps 0x70(%rsp), %xmm12
leaq 0x410(%rsp), %r12
movq 0x28(%rsp), %r13
movq 0x8(%rsp), %rsi
movq 0x60(%rsp), %rdi
jmp 0x198eae
movq 0x10(%rsp), %r12
movaps 0x40(%rsp), %xmm11
movaps 0x1f0(%rsp), %xmm12
movaps 0x1e0(%rsp), %xmm3
movaps 0x1c0(%rsp), %xmm5
movaps 0x3b0(%rsp), %xmm2
movaps 0x3a0(%rsp), %xmm1
jmp 0x19857b
movaps 0xe0(%rsp), %xmm5
movaps 0xd0(%rsp), %xmm6
movaps 0xc0(%rsp), %xmm7
movaps 0xb0(%rsp), %xmm8
movq 0x160(%rsp), %rdi
movaps 0xa0(%rsp), %xmm9
movaps 0x90(%rsp), %xmm10
movq 0x158(%rsp), %r8
movq 0x150(%rsp), %r10
movq 0x148(%rsp), %r11
movq 0x140(%rsp), %rbx
movq 0x138(%rsp), %r14
movq 0x130(%rsp), %r15
movaps 0x80(%rsp), %xmm11
movaps 0x70(%rsp), %xmm12
leaq 0x410(%rsp), %r12
movq 0x28(%rsp), %r13
movq 0x8(%rsp), %rsi
xorl %eax, %eax
testb $0x3, %al
je 0x197db8
jmp 0x199271
xorl %eax, %eax
testb $0x1, 0x7(%rsp)
movq 0x160(%rsp), %rdi
movq 0x158(%rsp), %r8
movq 0x150(%rsp), %r10
movq 0x148(%rsp), %r11
movq 0x140(%rsp), %rbx
movq 0x138(%rsp), %r14
movq 0x130(%rsp), %r15
je 0x199215
movl $0xff800000, 0x80(%r9,%rcx,4) # imm = 0xFF800000
pushq $0x1
popq %rax
jmp 0x199215
cmpq %r12, %r13
setne %al
addq $0xbb8, %rsp # imm = 0xBB8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::QuadMiMBIntersectorKPluecker<4, 4, true>>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::QuadMIntersectorKPluecker<4, 4, true>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x13c8, %rsp # imm = 0x13C8
movq %r9, %r14
movq %rcx, %r12
movq 0x1408(%rsp), %r10
leaq 0x490(%rsp), %r11
movq %rdx, -0x10(%r11)
andl $0x0, -0x8(%r11)
movq 0x1400(%rsp), %rax
movss (%rax,%rcx,4), %xmm8
movss 0x10(%rax,%rcx,4), %xmm9
movss 0x20(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm11
movss 0x70(%rax,%rcx,4), %xmm12
movss 0x80(%rax,%rcx,4), %xmm13
movss 0x1d86c0a(%rip), %xmm14 # 0x1f1ff10
movaps %xmm11, %xmm15
mulss %xmm14, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movaps %xmm12, %xmm6
mulss %xmm14, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulss %xmm13, %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movss 0x1d86be1(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulss %xmm0, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %rbx
movslq 0xa0(%rax,%rcx,4), %r15
movslq 0xb0(%rax,%rcx,4), %r13
movq %rbx, %rbp
xorq $0x10, %rbp
movq %r15, %rcx
xorq $0x10, %rcx
movq %rcx, 0x2c8(%rsp)
movq %r13, %rcx
xorq $0x10, %rcx
movq %rcx, 0x2c0(%rsp)
movss 0xc0(%rax,%r12,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0xd0(%rax,%r12,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
leaq 0x1fb6bcf(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x420(%rsp)
movaps 0xf0(%rax), %xmm0
movaps %xmm0, 0x410(%rsp)
pushq $0x1
popq %rdx
movl %r12d, %ecx
shll %cl, %edx
movslq %edx, %rcx
shlq $0x4, %rcx
addq %rax, %rcx
movq %rcx, 0x168(%rsp)
movq %r9, 0x2b0(%rsp)
movq %r12, 0x2a8(%rsp)
movaps %xmm8, 0x400(%rsp)
movaps %xmm9, 0x3f0(%rsp)
movaps %xmm10, 0x3e0(%rsp)
movaps %xmm11, 0x3d0(%rsp)
movaps %xmm12, 0x3c0(%rsp)
movaps %xmm13, 0x3b0(%rsp)
movaps %xmm14, 0x3a0(%rsp)
movaps %xmm15, 0x390(%rsp)
movaps %xmm6, 0x2f0(%rsp)
movq %rbx, 0x298(%rsp)
movq %r15, 0x290(%rsp)
movq %r13, 0x288(%rsp)
movq %rbp, 0x280(%rsp)
movaps %xmm7, 0x2e0(%rsp)
leaq 0x480(%rsp), %rax
cmpq %rax, %r11
je 0x19b1de
movss -0x8(%r11), %xmm0
addq $-0x10, %r11
ucomiss 0x80(%r14,%r12,4), %xmm0
ja 0x19946d
movq (%r11), %r9
testb $0x8, %r9b
jne 0x19958f
movq %r9, %rax
andq $-0x10, %rax
movss 0x70(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rax,%rbx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rbx), %xmm3
subps %xmm8, %xmm3
mulps %xmm15, %xmm3
movaps %xmm7, %xmm2
movaps 0x80(%rax,%r15), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%r15), %xmm4
maxps %xmm3, %xmm2
subps %xmm9, %xmm4
mulps %xmm6, %xmm4
movaps 0x80(%rax,%r13), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r13), %xmm3
subps %xmm10, %xmm3
mulps %xmm14, %xmm3
maxps %xmm3, %xmm4
maxps %xmm4, %xmm2
movaps 0x80(%rax,%rbp), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%rbp), %xmm4
subps %xmm8, %xmm4
movq 0x2c8(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm5
mulps %xmm0, %xmm5
addps 0x20(%rax,%rcx), %xmm5
mulps %xmm11, %xmm4
subps %xmm9, %xmm5
mulps %xmm12, %xmm5
movq 0x2c0(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rcx), %xmm3
subps %xmm10, %xmm3
mulps %xmm13, %xmm3
minps %xmm3, %xmm5
movaps %xmm1, %xmm3
minps %xmm4, %xmm3
minps %xmm5, %xmm3
movl %r9d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1995da
movaps %xmm2, %xmm0
cmpleps %xmm3, %xmm0
pslld $0x1f, %xmm0
movmskps %xmm0, %esi
movaps %xmm2, 0x170(%rsp)
testb $0x8, %r9b
jne 0x1995d6
testq %rsi, %rsi
je 0x1995fc
andq $-0x10, %r9
bsfq %rsi, %rcx
leaq -0x1(%rsi), %r8
xorl %eax, %eax
movq (%r9,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %r8
jne 0x199601
movq %rdx, %r9
testl %eax, %eax
je 0x199496
jmp 0x199800
pushq $0x6
jmp 0x1995fe
movaps %xmm2, %xmm4
cmpleps %xmm3, %xmm4
movaps 0xe0(%rax), %xmm3
cmpleps %xmm0, %xmm3
cmpltps 0xf0(%rax), %xmm0
andps %xmm3, %xmm0
andps %xmm4, %xmm0
jmp 0x19957f
pushq $0x4
popq %rax
jmp 0x1995c9
movq %rsi, 0x8(%rsp)
movl 0x170(%rsp,%rcx,4), %edi
movq %r9, %rsi
bsfq %r8, %r9
leaq -0x1(%r8), %rcx
movq %rsi, (%rsp)
movq (%rsi,%r9,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
movl 0x170(%rsp,%r9,4), %r9d
andq %r8, %rcx
jne 0x199671
leaq 0x10(%r11), %rcx
cmpl %r9d, %edi
jae 0x19965a
movq %rsi, (%r11)
movl %r9d, 0x8(%r11)
movq %rcx, %r11
movq %rdx, %r9
jmp 0x199667
movq %rdx, (%r11)
movl %edi, 0x8(%r11)
movq %rcx, %r11
movq %rsi, %r9
movq 0x8(%rsp), %rsi
jmp 0x1995c9
movq %rdx, %xmm2
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %rsi, %xmm4
movd %r9d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdx
movq (%rsp), %r8
movq (%r8,%rsi,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm3
movd 0x170(%rsp,%rsi,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rcx, %rdx
jne 0x19972c
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movaps %xmm2, (%r11)
movaps %xmm3, 0x10(%r11)
movq %xmm4, %r9
addq $0x20, %r11
jmp 0x199667
bsfq %rdx, %rcx
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movq %rdx, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movd 0x170(%rsp,%rcx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movaps 0x2e0(%rsp), %xmm7
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movaps 0x2f0(%rsp), %xmm6
movaps %xmm2, (%r11)
movaps %xmm5, 0x10(%r11)
movaps %xmm4, 0x20(%r11)
movq %xmm3, %r9
addq $0x30, %r11
jmp 0x199667
cmpl $0x6, %eax
jne 0x19946d
movq %r11, 0x2a0(%rsp)
movl %r9d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x2b8(%rsp)
je 0x19b14b
andq $-0x10, %r9
xorl %eax, %eax
movq %rsi, 0x8(%rsp)
movq %r9, (%rsp)
movq %rax, 0x2d8(%rsp)
imulq $0x60, %rax, %r8
movq (%r10), %rcx
movss 0x70(%r14,%r12,4), %xmm11
movl 0x40(%r9,%r8), %eax
movq %rcx, 0x2d0(%rsp)
movq 0x1e8(%rcx), %rsi
movq (%rsi,%rax,8), %rax
movss 0x28(%rax), %xmm1
movss 0x2c(%rax), %xmm2
movss 0x30(%rax), %xmm3
subss %xmm2, %xmm11
subss %xmm2, %xmm3
divss %xmm3, %xmm11
mulss %xmm1, %xmm11
roundss $0x9, %xmm11, %xmm2
addss 0x1d57135(%rip), %xmm1 # 0x1ef09cc
minss %xmm1, %xmm2
xorps %xmm6, %xmm6
maxss %xmm2, %xmm6
cvttss2si %xmm6, %ecx
movslq %ecx, %r14
movq 0xe0(%rax), %rbx
imulq $0x38, %r14, %r9
movq (%rbx,%r9), %rax
movq (%rsp), %rcx
movl (%rcx,%r8), %r10d
movq (%rsp), %rcx
movl 0x4(%rcx,%r8), %edx
movq %rdx, 0x250(%rsp)
movups (%rax,%r10,4), %xmm15
movq (%rsp), %rcx
movl 0x10(%rcx,%r8), %ecx
movq %rcx, 0xe0(%rsp)
movups (%rax,%rcx,4), %xmm14
movq (%rsp), %rcx
movl 0x20(%rcx,%r8), %ecx
movq %rcx, 0x70(%rsp)
movups (%rax,%rcx,4), %xmm13
movq (%rsp), %rcx
movl 0x30(%rcx,%r8), %ecx
movq %rcx, 0x100(%rsp)
movups (%rax,%rcx,4), %xmm0
movaps %xmm0, 0x30(%rsp)
movq (%rsp), %rax
movl 0x44(%rax,%r8), %eax
movq (%rsi,%rax,8), %rax
movq 0xe0(%rax), %rbp
movq (%rbp,%r9), %rax
movups (%rax,%rdx,4), %xmm8
movq (%rsp), %rcx
movl 0x14(%rcx,%r8), %ecx
movq %rcx, 0x260(%rsp)
movups (%rax,%rcx,4), %xmm2
movq (%rsp), %rcx
movl 0x24(%rcx,%r8), %ecx
movq %rcx, 0x60(%rsp)
movups (%rax,%rcx,4), %xmm4
movq (%rsp), %rcx
movl 0x34(%rcx,%r8), %ecx
movq %rcx, 0xa0(%rsp)
movups (%rax,%rcx,4), %xmm3
movq (%rsp), %rax
movl 0x48(%rax,%r8), %eax
movq (%rsi,%rax,8), %rax
movq 0xe0(%rax), %rdx
movq (%rdx,%r9), %rax
movq (%rsp), %rcx
movl 0x8(%rcx,%r8), %ecx
movups (%rax,%rcx,4), %xmm10
movq (%rsp), %rdi
movl 0x18(%rdi,%r8), %r13d
movups (%rax,%r13,4), %xmm9
movq (%rsp), %rdi
movl 0x28(%rdi,%r8), %r11d
movups (%rax,%r11,4), %xmm7
movq (%rsp), %rdi
movl 0x38(%rdi,%r8), %edi
movq %rdi, 0x270(%rsp)
movups (%rax,%rdi,4), %xmm5
movq (%rsp), %rax
movl 0x4c(%rax,%r8), %eax
movq (%rsi,%rax,8), %rax
subss %xmm6, %xmm11
movq 0xe0(%rax), %rdi
movq (%rdi,%r9), %rax
movq (%rsp), %rsi
movl 0xc(%rsi,%r8), %esi
movups (%rax,%rsi,4), %xmm6
movq (%rsp), %r9
movl 0x1c(%r9,%r8), %r12d
movups (%rax,%r12,4), %xmm1
movaps %xmm15, %xmm0
unpcklps %xmm10, %xmm0 # xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
unpckhps %xmm10, %xmm15 # xmm15 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
movaps %xmm8, %xmm10
unpcklps %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
unpckhps %xmm6, %xmm8 # xmm8 = xmm8[2],xmm6[2],xmm8[3],xmm6[3]
movq (%rsp), %r9
movl 0x2c(%r9,%r8), %r15d
movups (%rax,%r15,4), %xmm12
unpcklps %xmm8, %xmm15 # xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1]
movaps %xmm15, 0x50(%rsp)
movaps %xmm0, %xmm6
unpcklps %xmm10, %xmm6 # xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
movaps %xmm6, 0x10(%rsp)
unpckhps %xmm10, %xmm0 # xmm0 = xmm0[2],xmm10[2],xmm0[3],xmm10[3]
movaps %xmm0, 0xf0(%rsp)
movaps %xmm14, %xmm10
unpcklps %xmm9, %xmm10 # xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
unpckhps %xmm9, %xmm14 # xmm14 = xmm14[2],xmm9[2],xmm14[3],xmm9[3]
movaps %xmm2, %xmm8
unpcklps %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
unpckhps %xmm1, %xmm2 # xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
movq (%rsp), %r9
movl 0x3c(%r9,%r8), %r9d
movups (%rax,%r9,4), %xmm6
unpcklps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
movaps %xmm14, 0x140(%rsp)
movaps %xmm10, %xmm0
unpcklps %xmm8, %xmm0 # xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
movaps %xmm0, 0x120(%rsp)
unpckhps %xmm8, %xmm10 # xmm10 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
movaps %xmm10, 0x130(%rsp)
movaps %xmm13, %xmm1
unpcklps %xmm7, %xmm1 # xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
unpckhps %xmm7, %xmm13 # xmm13 = xmm13[2],xmm7[2],xmm13[3],xmm7[3]
movaps %xmm4, %xmm7
unpcklps %xmm12, %xmm7 # xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
unpckhps %xmm12, %xmm4 # xmm4 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
incl %r14d
movslq %r14d, %rax
imulq $0x38, %rax, %r14
movq (%rbx,%r14), %rbx
movups (%rbx,%r10,4), %xmm2
movq 0x1408(%rsp), %r10
unpcklps %xmm4, %xmm13 # xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
movaps %xmm13, 0x80(%rsp)
movaps %xmm1, %xmm0
unpcklps %xmm7, %xmm0 # xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
movaps %xmm0, 0x150(%rsp)
unpckhps %xmm7, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
movaps 0x30(%rsp), %xmm13
movaps %xmm13, %xmm7
unpcklps %xmm5, %xmm7 # xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
unpckhps %xmm5, %xmm13 # xmm13 = xmm13[2],xmm5[2],xmm13[3],xmm5[3]
movaps %xmm3, %xmm4
unpcklps %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
unpckhps %xmm6, %xmm3 # xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
movq (%rdx,%r14), %rdx
movups (%rdx,%rcx,4), %xmm5
unpcklps %xmm3, %xmm13 # xmm13 = xmm13[0],xmm3[0],xmm13[1],xmm3[1]
movaps %xmm13, 0x30(%rsp)
movaps %xmm7, %xmm0
unpcklps %xmm4, %xmm0 # xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
movaps %xmm0, 0x110(%rsp)
unpckhps %xmm4, %xmm7 # xmm7 = xmm7[2],xmm4[2],xmm7[3],xmm4[3]
movaps %xmm7, 0x20(%rsp)
movq (%rbp,%r14), %rcx
movaps %xmm2, %xmm8
unpcklps %xmm5, %xmm8 # xmm8 = xmm8[0],xmm5[0],xmm8[1],xmm5[1]
unpckhps %xmm5, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
movq 0x250(%rsp), %rax
movups (%rcx,%rax,4), %xmm3
movq (%rdi,%r14), %rdi
movq 0x2b0(%rsp), %r14
movups (%rdi,%rsi,4), %xmm4
movaps %xmm3, %xmm5
unpcklps %xmm4, %xmm5 # xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
unpckhps %xmm4, %xmm3 # xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
movaps %xmm8, %xmm13
unpcklps %xmm5, %xmm13 # xmm13 = xmm13[0],xmm5[0],xmm13[1],xmm5[1]
unpckhps %xmm5, %xmm8 # xmm8 = xmm8[2],xmm5[2],xmm8[3],xmm5[3]
movq 0xe0(%rsp), %rax
movups (%rbx,%rax,4), %xmm4
movups (%rdx,%r13,4), %xmm3
movaps %xmm4, %xmm7
unpcklps %xmm3, %xmm7 # xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
unpckhps %xmm3, %xmm4 # xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
movq 0x260(%rsp), %rax
movups (%rcx,%rax,4), %xmm3
movups (%rdi,%r12,4), %xmm5
movq 0x2a8(%rsp), %r12
movaps %xmm3, %xmm6
unpcklps %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
unpckhps %xmm5, %xmm3 # xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
unpcklps %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
movaps %xmm7, %xmm12
unpcklps %xmm6, %xmm12 # xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
unpckhps %xmm6, %xmm7 # xmm7 = xmm7[2],xmm6[2],xmm7[3],xmm6[3]
movq 0x70(%rsp), %rax
movups (%rbx,%rax,4), %xmm3
movups (%rdx,%r11,4), %xmm5
movaps %xmm3, %xmm6
unpcklps %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
unpckhps %xmm5, %xmm3 # xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
movq 0x60(%rsp), %rax
movups (%rcx,%rax,4), %xmm5
movups (%rdi,%r15,4), %xmm9
movaps %xmm5, %xmm10
unpcklps %xmm9, %xmm10 # xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
unpckhps %xmm9, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
unpcklps %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
movaps %xmm6, %xmm0
unpcklps %xmm10, %xmm0 # xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1]
unpckhps %xmm10, %xmm6 # xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
movq 0x100(%rsp), %rax
movups (%rbx,%rax,4), %xmm9
movq 0x270(%rsp), %rax
movups (%rdx,%rax,4), %xmm10
movaps %xmm9, %xmm5
unpcklps %xmm10, %xmm5 # xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
unpckhps %xmm10, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
movq 0xa0(%rsp), %rax
movups (%rcx,%rax,4), %xmm10
movups (%rdi,%r9,4), %xmm14
movq (%rsp), %r9
movaps %xmm10, %xmm15
unpcklps %xmm14, %xmm15 # xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
unpckhps %xmm14, %xmm10 # xmm10 = xmm10[2],xmm14[2],xmm10[3],xmm14[3]
unpcklps %xmm10, %xmm9 # xmm9 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
movaps %xmm5, %xmm10
unpcklps %xmm15, %xmm10 # xmm10 = xmm10[0],xmm15[0],xmm10[1],xmm15[1]
unpckhps %xmm15, %xmm5 # xmm5 = xmm5[2],xmm15[2],xmm5[3],xmm15[3]
movss 0x1d52abc(%rip), %xmm14 # 0x1eec714
subss %xmm11, %xmm14
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm11, %xmm13
movaps 0x10(%rsp), %xmm15
mulps %xmm14, %xmm15
addps %xmm13, %xmm15
movaps %xmm15, 0x10(%rsp)
mulps %xmm11, %xmm8
movaps 0xf0(%rsp), %xmm15
mulps %xmm14, %xmm15
addps %xmm8, %xmm15
mulps %xmm11, %xmm2
movaps 0x50(%rsp), %xmm13
mulps %xmm14, %xmm13
addps %xmm2, %xmm13
mulps %xmm11, %xmm12
movaps 0x120(%rsp), %xmm8
mulps %xmm14, %xmm8
addps %xmm12, %xmm8
mulps %xmm11, %xmm7
movaps 0x130(%rsp), %xmm2
mulps %xmm14, %xmm2
addps %xmm7, %xmm2
movaps %xmm2, %xmm7
mulps %xmm11, %xmm4
movaps 0x140(%rsp), %xmm12
mulps %xmm14, %xmm12
addps %xmm4, %xmm12
mulps %xmm11, %xmm0
movaps 0x150(%rsp), %xmm2
mulps %xmm14, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x150(%rsp)
mulps %xmm11, %xmm6
mulps %xmm14, %xmm1
addps %xmm6, %xmm1
movaps %xmm1, 0x100(%rsp)
mulps %xmm11, %xmm3
movaps 0x80(%rsp), %xmm1
mulps %xmm14, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x80(%rsp)
mulps %xmm11, %xmm10
mulps %xmm11, %xmm5
mulps %xmm9, %xmm11
movaps 0x110(%rsp), %xmm4
mulps %xmm14, %xmm4
addps %xmm10, %xmm4
movaps 0x20(%rsp), %xmm3
mulps %xmm14, %xmm3
addps %xmm5, %xmm3
movaps %xmm3, %xmm5
mulps 0x30(%rsp), %xmm14
addps %xmm11, %xmm14
movss (%r14,%r12,4), %xmm1
movss 0x10(%r14,%r12,4), %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps 0x10(%rsp), %xmm0
subps %xmm1, %xmm0
subps %xmm2, %xmm15
movaps %xmm8, 0x120(%rsp)
subps %xmm1, %xmm8
movaps %xmm8, 0x70(%rsp)
movaps %xmm4, 0x110(%rsp)
subps %xmm1, %xmm4
movaps %xmm4, %xmm1
movaps %xmm7, 0x130(%rsp)
movaps %xmm7, %xmm6
subps %xmm2, %xmm6
movaps %xmm3, 0x20(%rsp)
subps %xmm2, %xmm5
movss 0x20(%r14,%r12,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
subps %xmm3, %xmm13
movaps %xmm12, 0x140(%rsp)
movaps %xmm12, %xmm4
subps %xmm3, %xmm4
movaps %xmm14, 0x30(%rsp)
movaps %xmm14, %xmm2
subps %xmm3, %xmm2
movaps %xmm1, %xmm9
movaps %xmm1, 0x60(%rsp)
subps %xmm0, %xmm9
movaps %xmm5, %xmm11
subps %xmm15, %xmm11
movaps %xmm2, %xmm10
subps %xmm13, %xmm10
movaps %xmm5, %xmm3
addps %xmm15, %xmm3
movaps %xmm15, %xmm14
movaps %xmm2, %xmm7
addps %xmm13, %xmm7
movaps %xmm9, %xmm8
mulps %xmm3, %xmm8
mulps %xmm10, %xmm3
movaps %xmm11, %xmm12
mulps %xmm7, %xmm12
subps %xmm3, %xmm12
movaps %xmm1, %xmm3
movaps %xmm0, %xmm15
addps %xmm0, %xmm3
movaps %xmm9, 0xe0(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm9
movaps %xmm10, 0x270(%rsp)
mulps %xmm10, %xmm9
subps %xmm7, %xmm9
movaps %xmm11, 0x260(%rsp)
mulps %xmm11, %xmm3
subps %xmm3, %xmm8
movss 0x50(%r14,%r12,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0x60(%r14,%r12,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm11, %xmm8
mulps %xmm7, %xmm9
addps %xmm8, %xmm9
movss 0x40(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xa0(%rsp)
mulps %xmm0, %xmm12
addps %xmm9, %xmm12
movaps %xmm15, %xmm9
movaps 0x70(%rsp), %xmm1
subps %xmm1, %xmm9
movaps %xmm14, %xmm8
subps %xmm6, %xmm8
movaps %xmm13, %xmm0
subps %xmm4, %xmm0
movaps %xmm14, 0xf0(%rsp)
movaps %xmm14, %xmm10
addps %xmm6, %xmm10
movaps %xmm13, 0x50(%rsp)
addps %xmm4, %xmm13
movaps %xmm9, %xmm14
mulps %xmm10, %xmm14
mulps %xmm0, %xmm10
movaps %xmm8, %xmm3
mulps %xmm13, %xmm3
subps %xmm10, %xmm3
movaps %xmm15, 0x10(%rsp)
movaps %xmm15, %xmm10
addps %xmm1, %xmm10
mulps %xmm9, %xmm13
movaps %xmm10, %xmm15
movaps %xmm0, 0x250(%rsp)
mulps %xmm0, %xmm15
subps %xmm13, %xmm15
mulps %xmm8, %xmm10
subps %xmm10, %xmm14
mulps %xmm11, %xmm14
mulps %xmm7, %xmm15
addps %xmm14, %xmm15
mulps 0xa0(%rsp), %xmm3
addps %xmm15, %xmm3
movaps %xmm1, %xmm0
movaps %xmm1, %xmm14
movaps 0x60(%rsp), %xmm1
subps %xmm1, %xmm14
addps %xmm0, %xmm1
movaps %xmm1, %xmm0
movaps %xmm6, %xmm15
subps %xmm5, %xmm15
addps %xmm6, %xmm5
movaps %xmm4, %xmm13
subps %xmm2, %xmm13
addps %xmm4, %xmm2
movaps %xmm14, %xmm6
mulps %xmm5, %xmm6
mulps %xmm13, %xmm5
movaps %xmm15, %xmm4
mulps %xmm2, %xmm4
subps %xmm5, %xmm4
mulps %xmm14, %xmm2
mulps %xmm13, %xmm1
subps %xmm2, %xmm1
mulps %xmm15, %xmm0
subps %xmm0, %xmm6
movaps %xmm11, 0x60(%rsp)
mulps %xmm11, %xmm6
movaps 0xa0(%rsp), %xmm11
movaps %xmm7, 0x70(%rsp)
mulps %xmm7, %xmm1
addps %xmm6, %xmm1
mulps %xmm11, %xmm4
addps %xmm1, %xmm4
movaps %xmm12, %xmm1
addps %xmm3, %xmm1
addps %xmm4, %xmm1
movaps %xmm12, %xmm0
minps %xmm3, %xmm0
minps %xmm4, %xmm0
movaps %xmm12, 0x430(%rsp)
movaps %xmm12, %xmm6
movaps %xmm3, 0x450(%rsp)
maxps %xmm3, %xmm6
maxps %xmm4, %xmm6
movaps %xmm1, 0x440(%rsp)
movaps %xmm1, %xmm4
andps 0x1d526c8(%rip), %xmm4 # 0x1eec6c0
movaps %xmm4, %xmm1
mulps 0x1d57d6e(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d526c3(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
leaq (%r9,%r8), %r15
movmskps %xmm6, %eax
testl %eax, %eax
je 0x19a732
movaps %xmm9, %xmm7
movaps %xmm8, %xmm0
movaps 0x270(%rsp), %xmm9
mulps %xmm9, %xmm0
movaps %xmm7, %xmm1
movaps 0x260(%rsp), %xmm12
mulps %xmm12, %xmm1
movaps %xmm15, 0x470(%rsp)
movaps 0x250(%rsp), %xmm5
mulps %xmm5, %xmm15
movaps %xmm14, %xmm10
mulps %xmm8, %xmm10
mulps %xmm5, %xmm12
subps %xmm0, %xmm12
movaps %xmm13, %xmm3
movaps %xmm13, 0x460(%rsp)
movaps 0xe0(%rsp), %xmm13
movaps %xmm13, %xmm2
mulps %xmm8, %xmm13
mulps %xmm3, %xmm8
subps %xmm15, %xmm8
movaps 0x1d52626(%rip), %xmm3 # 0x1eec6c0
andps %xmm3, %xmm0
andps %xmm3, %xmm15
cmpltps %xmm15, %xmm0
blendvps %xmm0, %xmm12, %xmm8
movaps %xmm7, %xmm0
mulps 0x460(%rsp), %xmm0
mulps %xmm5, %xmm2
mulps %xmm5, %xmm14
mulps %xmm7, %xmm9
subps %xmm2, %xmm9
subps %xmm0, %xmm14
andps %xmm3, %xmm2
andps %xmm3, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm9, %xmm14
mulps 0x470(%rsp), %xmm7
subps %xmm1, %xmm13
subps %xmm10, %xmm7
andps %xmm3, %xmm1
andps %xmm3, %xmm10
cmpltps %xmm10, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm13, %xmm7
movaps 0x60(%rsp), %xmm1
mulps %xmm7, %xmm1
movaps 0x70(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm1, %xmm0
mulps %xmm8, %xmm11
addps %xmm0, %xmm11
addps %xmm11, %xmm11
movaps 0x50(%rsp), %xmm0
mulps %xmm7, %xmm0
movaps 0xf0(%rsp), %xmm1
mulps %xmm14, %xmm1
addps %xmm0, %xmm1
movaps 0x10(%rsp), %xmm3
mulps %xmm8, %xmm3
addps %xmm1, %xmm3
rcpps %xmm11, %xmm0
movaps %xmm11, %xmm1
mulps %xmm0, %xmm1
movaps 0x1d528b8(%rip), %xmm10 # 0x1eeca10
subps %xmm1, %xmm10
addps %xmm3, %xmm3
mulps %xmm0, %xmm10
addps %xmm0, %xmm10
mulps %xmm3, %xmm10
movss 0x80(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm10, %xmm1
cmpleps %xmm0, %xmm1
movss 0x30(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
cmpleps %xmm10, %xmm0
andps %xmm0, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19a732
cmpneqps 0x1d51865(%rip), %xmm11 # 0x1eeba10
andps %xmm11, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19a732
movaps 0x430(%rsp), %xmm5
movaps %xmm5, 0x170(%rsp)
movaps 0x450(%rsp), %xmm9
movaps %xmm9, 0x180(%rsp)
movaps 0x440(%rsp), %xmm11
movaps %xmm11, 0x190(%rsp)
movaps %xmm8, 0x1a0(%rsp)
movaps %xmm14, 0x1b0(%rsp)
movaps %xmm7, 0x1c0(%rsp)
movaps %xmm1, 0x1d0(%rsp)
movaps %xmm10, 0x200(%rsp)
movaps 0x420(%rsp), %xmm0
movaps %xmm0, 0x240(%rsp)
movaps %xmm1, 0x40(%rsp)
rcpps %xmm11, %xmm3
mulps %xmm3, %xmm11
movaps 0x1d527d3(%rip), %xmm6 # 0x1eeca10
movaps %xmm6, %xmm2
subps %xmm11, %xmm2
mulps %xmm3, %xmm2
addps %xmm3, %xmm2
cmpnltps 0x1d57aee(%rip), %xmm4 # 0x1ef1d40
andps %xmm2, %xmm4
mulps %xmm4, %xmm5
minps %xmm6, %xmm5
mulps %xmm9, %xmm4
minps %xmm6, %xmm4
movaps %xmm6, %xmm2
subps %xmm5, %xmm2
movaps %xmm6, %xmm3
subps %xmm4, %xmm3
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm5, 0x1e0(%rsp)
blendvps %xmm0, %xmm3, %xmm4
movaps %xmm4, 0x1f0(%rsp)
movaps %xmm8, 0x210(%rsp)
movaps %xmm14, 0x220(%rsp)
movaps %xmm7, 0x230(%rsp)
movaps 0x1d51747(%rip), %xmm2 # 0x1eeb9f0
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm10, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm0
shufps $0x4e, %xmm3, %xmm0 # xmm0 = xmm0[2,3],xmm3[0,1]
minps %xmm3, %xmm0
cmpeqps %xmm2, %xmm0
andps %xmm1, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x19a2d7
movaps %xmm0, %xmm1
movmskps %xmm1, %eax
bsfq %rax, %rbp
movq 0x2d0(%rsp), %r13
movl 0x40(%r15,%rbp,4), %eax
movq 0x1e8(%r13), %rcx
movq (%rcx,%rax,8), %rbx
movl 0x90(%r14,%r12,4), %ecx
testl %ecx, 0x34(%rbx)
je 0x19a5e6
movq 0x10(%r10), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x19a31d
cmpq $0x0, 0x40(%rbx)
je 0x19a68e
movss 0x1e0(%rsp,%rbp,4), %xmm0
movss 0x1f0(%rsp,%rbp,4), %xmm1
movss 0x210(%rsp,%rbp,4), %xmm2
movss 0x220(%rsp,%rbp,4), %xmm3
movq 0x8(%r10), %rcx
movd %eax, %xmm4
pshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movd 0x50(%r15,%rbp,4), %xmm5
pshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0x230(%rsp,%rbp,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm2, 0x300(%rsp)
movaps %xmm3, 0x310(%rsp)
movaps %xmm6, 0x320(%rsp)
movaps %xmm0, 0x330(%rsp)
movaps %xmm1, 0x340(%rsp)
movdqa %xmm5, 0x350(%rsp)
movdqa %xmm4, 0x360(%rsp)
leaq 0x370(%rsp), %rax
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rax)
movdqa %xmm0, (%rax)
movd (%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x370(%rsp)
movd 0x4(%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movss 0x80(%r14,%r12,4), %xmm0
movss %xmm0, 0x50(%rsp)
movss 0x200(%rsp,%rbp,4), %xmm0
movss %xmm0, 0x80(%r14,%r12,4)
movq 0x168(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x300(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
movaps %xmm10, 0x10(%rsp)
je 0x19a499
leaq 0xb0(%rsp), %rdi
callq *%rax
movaps 0x10(%rsp), %xmm10
movq (%rsp), %r9
movq 0x1408(%rsp), %r10
movdqa 0x90(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x19a639
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x19a4e1
testb $0x2, (%rcx)
jne 0x19a4c5
testb $0x40, 0x3e(%rbx)
je 0x19a4e1
leaq 0xb0(%rsp), %rdi
callq *%rax
movaps 0x10(%rsp), %xmm10
movq (%rsp), %r9
movq 0x1408(%rsp), %r10
movdqa 0x90(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d51519(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d5191d(%rip), %xmm1 # 0x1eebe20
je 0x19a649
movq 0xc8(%rsp), %rax
movq 0xd0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x19a649
andl $0x0, 0x40(%rsp,%rbp,4)
movaps 0x40(%rsp), %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x19a732
movaps 0x1d513ee(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm10, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19a62d
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %rbp
jmp 0x19a2e6
pcmpeqd 0x1d513cf(%rip), %xmm1 # 0x1eeba10
pxor 0x1d517d7(%rip), %xmm1 # 0x1eebe20
ptest 0x1d524ce(%rip), %xmm1 # 0x1eecb20
jne 0x19a664
movd 0x50(%rsp), %xmm0
movd %xmm0, 0x80(%r14,%r12,4)
andl $0x0, 0x40(%rsp,%rbp,4)
movss 0x80(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm10, %xmm1
cmpleps %xmm0, %xmm1
andps 0x40(%rsp), %xmm1
movaps %xmm1, 0x40(%rsp)
jmp 0x19a5eb
movss 0x1e0(%rsp,%rbp,4), %xmm0
movss 0x1f0(%rsp,%rbp,4), %xmm1
movss 0x200(%rsp,%rbp,4), %xmm2
movss %xmm2, 0x80(%r14,%r12,4)
movss 0x210(%rsp,%rbp,4), %xmm2
movss %xmm2, 0xc0(%r14,%r12,4)
movss 0x220(%rsp,%rbp,4), %xmm2
movss %xmm2, 0xd0(%r14,%r12,4)
movss 0x230(%rsp,%rbp,4), %xmm2
movss %xmm2, 0xe0(%r14,%r12,4)
movss %xmm0, 0xf0(%r14,%r12,4)
movss %xmm1, 0x100(%r14,%r12,4)
movl 0x50(%r15,%rbp,4), %ecx
movl %ecx, 0x110(%r14,%r12,4)
movl %eax, 0x120(%r14,%r12,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r14,%r12,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r14,%r12,4)
movss (%r14,%r12,4), %xmm1
movss 0x10(%r14,%r12,4), %xmm2
movss 0x20(%r14,%r12,4), %xmm0
movss 0x40(%r14,%r12,4), %xmm3
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0x10(%rsp)
movss 0x50(%r14,%r12,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps %xmm11, 0x50(%rsp)
movaps 0x150(%rsp), %xmm9
subps %xmm1, %xmm9
movaps 0x100(%rsp), %xmm3
subps %xmm2, %xmm3
movaps 0x80(%rsp), %xmm7
subps %xmm0, %xmm7
movaps 0x110(%rsp), %xmm14
subps %xmm1, %xmm14
movaps 0x20(%rsp), %xmm4
subps %xmm2, %xmm4
movaps %xmm4, 0x20(%rsp)
movaps 0x30(%rsp), %xmm4
subps %xmm0, %xmm4
movaps %xmm4, 0x30(%rsp)
movaps 0x120(%rsp), %xmm13
subps %xmm1, %xmm13
movaps 0x130(%rsp), %xmm12
subps %xmm2, %xmm12
movaps 0x140(%rsp), %xmm10
subps %xmm0, %xmm10
movaps %xmm13, %xmm5
subps %xmm9, %xmm5
movaps %xmm12, %xmm8
subps %xmm3, %xmm8
movaps %xmm10, %xmm6
subps %xmm7, %xmm6
movaps %xmm13, %xmm0
addps %xmm9, %xmm0
movaps %xmm12, %xmm1
addps %xmm3, %xmm1
movaps %xmm10, %xmm2
addps %xmm7, %xmm2
movaps %xmm5, %xmm4
mulps %xmm1, %xmm4
mulps %xmm6, %xmm1
movaps %xmm8, %xmm15
mulps %xmm2, %xmm15
subps %xmm1, %xmm15
movaps %xmm5, 0x140(%rsp)
mulps %xmm5, %xmm2
movaps %xmm0, %xmm1
movaps %xmm6, 0x130(%rsp)
mulps %xmm6, %xmm1
subps %xmm2, %xmm1
movss 0x60(%r14,%r12,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movaps %xmm8, 0x120(%rsp)
mulps %xmm8, %xmm0
subps %xmm0, %xmm4
movaps %xmm9, %xmm0
subps %xmm14, %xmm0
movaps %xmm0, %xmm6
mulps %xmm5, %xmm4
mulps %xmm11, %xmm1
addps %xmm4, %xmm1
movaps %xmm3, %xmm8
movaps 0x20(%rsp), %xmm4
subps %xmm4, %xmm8
mulps 0x10(%rsp), %xmm15
addps %xmm1, %xmm15
movaps %xmm7, %xmm11
movaps 0x30(%rsp), %xmm2
subps %xmm2, %xmm11
movaps %xmm3, 0x100(%rsp)
movaps %xmm3, %xmm0
addps %xmm4, %xmm0
movaps %xmm7, 0x80(%rsp)
addps %xmm2, %xmm7
movaps %xmm6, %xmm3
movaps %xmm6, %xmm2
mulps %xmm0, %xmm2
mulps %xmm11, %xmm0
movaps %xmm8, %xmm6
mulps %xmm7, %xmm6
subps %xmm0, %xmm6
movaps %xmm9, 0x150(%rsp)
movaps %xmm9, %xmm0
addps %xmm14, %xmm0
mulps %xmm3, %xmm7
movaps %xmm3, %xmm9
movaps %xmm0, %xmm4
movaps %xmm11, 0xf0(%rsp)
mulps %xmm11, %xmm4
subps %xmm7, %xmm4
mulps %xmm8, %xmm0
subps %xmm0, %xmm2
mulps %xmm5, %xmm2
movaps 0x50(%rsp), %xmm7
mulps %xmm7, %xmm4
addps %xmm2, %xmm4
movaps 0x10(%rsp), %xmm3
mulps %xmm3, %xmm6
addps %xmm4, %xmm6
movaps %xmm14, %xmm11
subps %xmm13, %xmm11
addps %xmm14, %xmm13
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm4
subps %xmm12, %xmm4
addps %xmm0, %xmm12
movaps 0x30(%rsp), %xmm0
movaps %xmm0, %xmm14
subps %xmm10, %xmm14
addps %xmm0, %xmm10
movaps %xmm11, %xmm1
mulps %xmm12, %xmm1
mulps %xmm14, %xmm12
movaps %xmm4, %xmm0
mulps %xmm10, %xmm0
subps %xmm12, %xmm0
mulps %xmm11, %xmm10
movaps %xmm13, %xmm2
movaps %xmm14, 0x20(%rsp)
mulps %xmm14, %xmm2
subps %xmm10, %xmm2
movaps %xmm4, %xmm12
mulps %xmm4, %xmm13
subps %xmm13, %xmm1
movaps %xmm5, 0x110(%rsp)
mulps %xmm5, %xmm1
mulps %xmm7, %xmm2
addps %xmm1, %xmm2
mulps %xmm3, %xmm0
addps %xmm2, %xmm0
movaps %xmm15, %xmm14
addps %xmm6, %xmm14
addps %xmm0, %xmm14
movaps %xmm15, %xmm1
minps %xmm6, %xmm1
minps %xmm0, %xmm1
movaps %xmm15, 0xe0(%rsp)
movaps %xmm15, %xmm4
movaps %xmm6, 0x70(%rsp)
maxps %xmm6, %xmm4
maxps %xmm0, %xmm4
movaps %xmm14, 0x60(%rsp)
andps 0x1d51cf5(%rip), %xmm14 # 0x1eec6c0
movaps %xmm14, %xmm0
mulps 0x1d5739a(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm4
xorps 0x1d51cef(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm4
movmskps %xmm4, %eax
testl %eax, %eax
movq 0x8(%rsp), %rsi
je 0x19b132
movaps %xmm9, %xmm6
movaps %xmm8, %xmm0
movaps 0x130(%rsp), %xmm13
mulps %xmm13, %xmm0
movaps %xmm9, %xmm1
movaps %xmm8, %xmm5
movaps %xmm11, 0x30(%rsp)
movaps 0x120(%rsp), %xmm11
mulps %xmm11, %xmm1
movaps %xmm12, 0xa0(%rsp)
movaps 0xf0(%rsp), %xmm15
mulps %xmm15, %xmm12
movaps 0x150(%rsp), %xmm3
movaps 0x100(%rsp), %xmm10
movaps 0x30(%rsp), %xmm9
mulps %xmm8, %xmm9
mulps %xmm15, %xmm11
subps %xmm0, %xmm11
movaps 0x140(%rsp), %xmm8
movaps %xmm8, %xmm2
mulps %xmm5, %xmm8
mulps 0x20(%rsp), %xmm5
subps %xmm12, %xmm5
movaps 0x1d51c3e(%rip), %xmm7 # 0x1eec6c0
andps %xmm7, %xmm0
andps %xmm7, %xmm12
cmpltps %xmm12, %xmm0
movaps 0x10(%rsp), %xmm12
blendvps %xmm0, %xmm11, %xmm5
movaps 0x30(%rsp), %xmm11
movaps %xmm6, %xmm0
mulps 0x20(%rsp), %xmm0
mulps %xmm15, %xmm2
mulps %xmm15, %xmm11
mulps %xmm6, %xmm13
subps %xmm2, %xmm13
subps %xmm0, %xmm11
andps %xmm7, %xmm2
andps %xmm7, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm13, %xmm11
mulps 0xa0(%rsp), %xmm6
subps %xmm1, %xmm8
subps %xmm9, %xmm6
andps %xmm7, %xmm1
andps %xmm7, %xmm9
cmpltps %xmm9, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm8, %xmm6
movaps 0x110(%rsp), %xmm1
mulps %xmm6, %xmm1
movaps 0x50(%rsp), %xmm0
mulps %xmm11, %xmm0
addps %xmm1, %xmm0
mulps %xmm5, %xmm12
addps %xmm0, %xmm12
addps %xmm12, %xmm12
movaps 0x80(%rsp), %xmm0
mulps %xmm6, %xmm0
mulps %xmm11, %xmm10
addps %xmm0, %xmm10
mulps %xmm5, %xmm3
addps %xmm10, %xmm3
rcpps %xmm12, %xmm0
movaps %xmm12, %xmm1
mulps %xmm0, %xmm1
movaps 0x1d51ecd(%rip), %xmm7 # 0x1eeca10
subps %xmm1, %xmm7
addps %xmm3, %xmm3
mulps %xmm0, %xmm7
addps %xmm0, %xmm7
mulps %xmm3, %xmm7
movss 0x80(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm7, %xmm1
cmpleps %xmm0, %xmm1
movss 0x30(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
cmpleps %xmm7, %xmm0
andps %xmm0, %xmm1
andps %xmm4, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19b132
cmpneqps 0x1d50e80(%rip), %xmm12 # 0x1eeba10
andps %xmm12, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19b132
movaps 0xe0(%rsp), %xmm10
movaps %xmm10, 0x170(%rsp)
movaps %xmm6, %xmm8
movaps 0x70(%rsp), %xmm6
movaps %xmm6, 0x180(%rsp)
movaps 0x60(%rsp), %xmm9
movaps %xmm9, 0x190(%rsp)
movaps %xmm5, 0x1a0(%rsp)
movaps %xmm11, 0x1b0(%rsp)
movaps %xmm8, 0x1c0(%rsp)
movaps %xmm1, 0x1d0(%rsp)
movaps %xmm7, 0x200(%rsp)
movaps 0x410(%rsp), %xmm0
movaps %xmm0, 0x240(%rsp)
movq (%r10), %rbp
movaps %xmm1, 0x40(%rsp)
rcpps %xmm9, %xmm3
mulps %xmm3, %xmm9
movaps 0x1d51dee(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm2
subps %xmm9, %xmm2
mulps %xmm3, %xmm2
addps %xmm3, %xmm2
cmpnltps 0x1d57108(%rip), %xmm14 # 0x1ef1d40
andps %xmm2, %xmm14
mulps %xmm14, %xmm10
minps %xmm4, %xmm10
mulps %xmm6, %xmm14
minps %xmm4, %xmm14
movaps %xmm4, %xmm2
subps %xmm10, %xmm2
movaps %xmm4, %xmm3
subps %xmm14, %xmm3
blendvps %xmm0, %xmm2, %xmm10
movaps %xmm10, 0x1e0(%rsp)
blendvps %xmm0, %xmm3, %xmm14
movaps %xmm14, 0x1f0(%rsp)
movaps %xmm5, 0x210(%rsp)
movaps %xmm11, 0x220(%rsp)
movaps %xmm8, 0x230(%rsp)
movaps 0x1d50d57(%rip), %xmm2 # 0x1eeb9f0
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm7, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm0
shufps $0x4e, %xmm3, %xmm0 # xmm0 = xmm0[2,3],xmm3[0,1]
minps %xmm3, %xmm0
cmpeqps %xmm2, %xmm0
andps %xmm1, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x19acc6
movaps %xmm0, %xmm1
movmskps %xmm1, %eax
bsfq %rax, %r13
movl 0x40(%r15,%r13,4), %eax
movq 0x1e8(%rbp), %rcx
movq (%rcx,%rax,8), %rbx
movl 0x90(%r14,%r12,4), %ecx
testl %ecx, 0x34(%rbx)
je 0x19afdd
movq 0x10(%r10), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x19ad04
cmpq $0x0, 0x40(%rbx)
je 0x19b088
movss 0x1e0(%rsp,%r13,4), %xmm0
movss 0x1f0(%rsp,%r13,4), %xmm1
movss 0x210(%rsp,%r13,4), %xmm2
movss 0x220(%rsp,%r13,4), %xmm3
movq 0x8(%r10), %rcx
movd %eax, %xmm4
pshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movd 0x50(%r15,%r13,4), %xmm5
pshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0x230(%rsp,%r13,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm2, 0x300(%rsp)
movaps %xmm3, 0x310(%rsp)
movaps %xmm6, 0x320(%rsp)
movaps %xmm0, 0x330(%rsp)
movaps %xmm1, 0x340(%rsp)
movdqa %xmm5, 0x350(%rsp)
movdqa %xmm4, 0x360(%rsp)
leaq 0x370(%rsp), %rax
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rax)
movdqa %xmm0, (%rax)
movd (%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x370(%rsp)
movd 0x4(%rcx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movss 0x80(%r14,%r12,4), %xmm0
movss %xmm0, 0x80(%rsp)
movss 0x200(%rsp,%r13,4), %xmm0
movss %xmm0, 0x80(%r14,%r12,4)
movq 0x168(%rsp), %rax
movdqa (%rax), %xmm0
movdqa %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x300(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
movaps %xmm7, 0x20(%rsp)
je 0x19ae8c
leaq 0xb0(%rsp), %rdi
callq *%rax
movaps 0x20(%rsp), %xmm7
movq (%rsp), %r9
movq 0x8(%rsp), %rsi
movq 0x1408(%rsp), %r10
movdqa 0x90(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x19b030
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x19aed8
testb $0x2, (%rcx)
jne 0x19aeb8
testb $0x40, 0x3e(%rbx)
je 0x19aed8
leaq 0xb0(%rsp), %rdi
callq *%rax
movaps 0x20(%rsp), %xmm7
movq (%rsp), %r9
movq 0x8(%rsp), %rsi
movq 0x1408(%rsp), %r10
movdqa 0x90(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d50b22(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d50f26(%rip), %xmm1 # 0x1eebe20
je 0x19b040
movq 0xc8(%rsp), %rax
movq 0xd0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x19b040
andl $0x0, 0x40(%rsp,%r13,4)
movaps 0x40(%rsp), %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x19b132
movaps 0x1d509f6(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm7, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19b024
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %r13
jmp 0x19accd
pcmpeqd 0x1d509d8(%rip), %xmm1 # 0x1eeba10
pxor 0x1d50de0(%rip), %xmm1 # 0x1eebe20
ptest 0x1d51ad7(%rip), %xmm1 # 0x1eecb20
jne 0x19b05e
movd 0x80(%rsp), %xmm0
movd %xmm0, 0x80(%r14,%r12,4)
andl $0x0, 0x40(%rsp,%r13,4)
movss 0x80(%r14,%r12,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm7, %xmm1
cmpleps %xmm0, %xmm1
andps 0x40(%rsp), %xmm1
movaps %xmm1, 0x40(%rsp)
jmp 0x19afe3
movss 0x1e0(%rsp,%r13,4), %xmm0
movss 0x1f0(%rsp,%r13,4), %xmm1
movss 0x200(%rsp,%r13,4), %xmm2
movss %xmm2, 0x80(%r14,%r12,4)
movss 0x210(%rsp,%r13,4), %xmm2
movss %xmm2, 0xc0(%r14,%r12,4)
movss 0x220(%rsp,%r13,4), %xmm2
movss %xmm2, 0xd0(%r14,%r12,4)
movss 0x230(%rsp,%r13,4), %xmm2
movss %xmm2, 0xe0(%r14,%r12,4)
movss %xmm0, 0xf0(%r14,%r12,4)
movss %xmm1, 0x100(%r14,%r12,4)
movl 0x50(%r15,%r13,4), %ecx
movl %ecx, 0x110(%r14,%r12,4)
movl %eax, 0x120(%r14,%r12,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r14,%r12,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r14,%r12,4)
movq 0x2d8(%rsp), %rax
incq %rax
cmpq 0x2b8(%rsp), %rax
jne 0x199838
movss 0x80(%r14,%r12,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq 0x2a0(%rsp), %r11
movaps 0x400(%rsp), %xmm8
movaps 0x3f0(%rsp), %xmm9
movaps 0x3e0(%rsp), %xmm10
movaps 0x3d0(%rsp), %xmm11
movaps 0x3c0(%rsp), %xmm12
movaps 0x3b0(%rsp), %xmm13
movaps 0x3a0(%rsp), %xmm14
movaps 0x390(%rsp), %xmm15
movaps 0x2f0(%rsp), %xmm6
movq 0x298(%rsp), %rbx
movq 0x290(%rsp), %r15
movq 0x288(%rsp), %r13
movq 0x280(%rsp), %rbp
movaps 0x2e0(%rsp), %xmm7
jmp 0x19946d
addq $0x13c8, %rsp # imm = 0x13C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::QuadMiMBIntersectorKPluecker<4, 4, true>>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::QuadMIntersectorKPluecker<4, 4, true>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xca8, %rsp # imm = 0xCA8
movq %rcx, %r8
movq 0xce8(%rsp), %rsi
movq 0xce0(%rsp), %rax
leaq 0x508(%rsp), %rdi
movq %rdx, -0x8(%rdi)
movss (%rax,%rcx,4), %xmm7
movss 0x10(%rax,%rcx,4), %xmm8
movss 0x20(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm10
movss 0x70(%rax,%rcx,4), %xmm11
movss 0x80(%rax,%rcx,4), %xmm12
movss 0x1d84cae(%rip), %xmm13 # 0x1f1ff10
movaps %xmm10, %xmm14
mulss %xmm13, %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm11, %xmm15
mulss %xmm13, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulss %xmm12, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movss 0x1d84c84(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r10
movslq 0xa0(%rax,%rcx,4), %r11
movslq 0xb0(%rax,%rcx,4), %r15
movq %r10, %r13
xorq $0x10, %r13
movq %r11, %rbp
xorq $0x10, %rbp
movq %r15, %rcx
xorq $0x10, %rcx
movq %rcx, 0x278(%rsp)
movss 0xc0(%rax,%r8,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movss 0xd0(%rax,%r8,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
leaq 0x1fb4c7a(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x480(%rsp)
movaps 0xf0(%rax), %xmm0
movaps %xmm0, 0x3f0(%rsp)
pushq $0x1
popq %rdx
movl %r8d, %ecx
shll %cl, %edx
movslq %edx, %rcx
shlq $0x4, %rcx
addq %rax, %rcx
movq %rcx, 0x1d8(%rsp)
movq %r9, 0x38(%rsp)
movq %r8, 0x30(%rsp)
movaps %xmm7, 0xe0(%rsp)
movaps %xmm8, 0xd0(%rsp)
movaps %xmm9, 0xc0(%rsp)
movaps %xmm10, 0xb0(%rsp)
movaps %xmm11, 0xa0(%rsp)
movaps %xmm12, 0x90(%rsp)
movaps %xmm13, 0x80(%rsp)
movaps %xmm14, 0x70(%rsp)
movaps %xmm15, 0x60(%rsp)
movq %r10, 0x200(%rsp)
movq %r11, 0x1f8(%rsp)
movq %r15, 0x1f0(%rsp)
movq %r13, 0x1e8(%rsp)
movq %rbp, 0x1e0(%rsp)
movaps %xmm5, 0x100(%rsp)
movaps %xmm6, 0xf0(%rsp)
movq %rdi, %rcx
leaq 0x500(%rsp), %rax
cmpq %rax, %rdi
je 0x19cdee
leaq -0x8(%rcx), %rdi
movq %rcx, 0x48(%rsp)
movq -0x8(%rcx), %r12
testb $0x8, %r12b
jne 0x19b4d0
movq %r12, %rax
andq $-0x10, %rax
movss 0x70(%r9,%r8,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rax,%r10), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%r10), %xmm2
subps %xmm7, %xmm2
mulps %xmm14, %xmm2
movaps %xmm5, %xmm1
movaps 0x80(%rax,%r11), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r11), %xmm3
maxps %xmm2, %xmm1
subps %xmm8, %xmm3
mulps %xmm15, %xmm3
movaps 0x80(%rax,%r15), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%r15), %xmm2
subps %xmm9, %xmm2
mulps %xmm13, %xmm2
maxps %xmm2, %xmm3
maxps %xmm3, %xmm1
movaps 0x80(%rax,%r13), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r13), %xmm3
subps %xmm7, %xmm3
mulps %xmm10, %xmm3
movaps %xmm6, %xmm2
minps %xmm3, %xmm2
movaps 0x80(%rax,%rbp), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rbp), %xmm3
subps %xmm8, %xmm3
movq 0x278(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%rcx), %xmm4
mulps %xmm11, %xmm3
subps %xmm9, %xmm4
mulps %xmm12, %xmm4
minps %xmm4, %xmm3
minps %xmm3, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpleps %xmm2, %xmm1
cmpl $0x6, %ecx
je 0x19b51a
pslld $0x1f, %xmm1
movmskps %xmm1, %ebx
testb $0x8, %r12b
jne 0x19b516
testq %rbx, %rbx
je 0x19b535
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdx
jne 0x19b53b
movq %rax, %r12
testl %r14d, %r14d
je 0x19b3e7
jmp 0x19b57e
pushq $0x6
jmp 0x19b537
movaps 0xe0(%rax), %xmm2
cmpleps %xmm0, %xmm2
cmpltps 0xf0(%rax), %xmm0
andps %xmm2, %xmm0
andps %xmm0, %xmm1
jmp 0x19b4c8
pushq $0x4
popq %r14
jmp 0x19b50b
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x19b579
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x19b54a
movq %rcx, %r12
jmp 0x19b50b
cmpl $0x6, %r14d
jne 0x19cd91
movl %r12d, %eax
andl $0xf, %eax
xorl %r14d, %r14d
addq $-0x8, %rax
setne %cl
je 0x19cd91
movq %rdi, 0x208(%rsp)
andq $-0x10, %r12
xorl %edx, %edx
movq %rax, 0x268(%rsp)
movb %cl, 0xf(%rsp)
movq %rdx, 0x270(%rsp)
imulq $0x60, %rdx, %rdx
movq (%rsi), %rcx
movss 0x70(%r9,%r8,4), %xmm12
movl 0x40(%r12,%rdx), %eax
movq %rcx, 0x1a0(%rsp)
movq 0x1e8(%rcx), %rbp
movq (%rbp,%rax,8), %rax
movss 0x28(%rax), %xmm0
movss 0x2c(%rax), %xmm1
movss 0x30(%rax), %xmm2
subss %xmm1, %xmm12
subss %xmm1, %xmm2
divss %xmm2, %xmm12
mulss %xmm0, %xmm12
roundss $0x9, %xmm12, %xmm1
addss 0x1d553b4(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm1, %xmm0
subss %xmm0, %xmm12
cvttss2si %xmm0, %esi
movslq %esi, %rdi
movq 0xe0(%rax), %rsi
imulq $0x38, %rdi, %r10
movq (%rsi,%r10), %rax
movq %rax, 0x140(%rsp)
movl (%r12,%rdx), %r8d
movl 0x4(%r12,%rdx), %r13d
movups (%rax,%r8,4), %xmm14
movl 0x10(%r12,%rdx), %r11d
movups (%rax,%r11,4), %xmm11
movl 0x30(%r12,%rdx), %ecx
movq %rcx, 0x120(%rsp)
movups (%rax,%rcx,4), %xmm0
movl 0x44(%r12,%rdx), %eax
movq (%rbp,%rax,8), %rax
movq 0xe0(%rax), %r9
movq (%r9,%r10), %rcx
movups (%rcx,%r13,4), %xmm2
movl 0x48(%r12,%rdx), %eax
movq (%rbp,%rax,8), %rax
movq %rax, 0x10(%rsp)
movl 0x4c(%r12,%rdx), %eax
movq (%rbp,%rax,8), %rbp
movl 0x14(%r12,%rdx), %eax
movq %rax, 0x110(%rsp)
movups (%rcx,%rax,4), %xmm3
movq %rcx, 0x190(%rsp)
incl %edi
movslq %edi, %rax
imulq $0x38, %rax, %rax
movq (%rsi,%rax), %rdi
movups (%rdi,%r8,4), %xmm1
movl 0x34(%r12,%rdx), %r15d
movups (%rcx,%r15,4), %xmm5
movq 0x10(%rsp), %rcx
movq 0xe0(%rcx), %rsi
movups (%rdi,%r11,4), %xmm4
movaps %xmm4, 0x20(%rsp)
movq %rdi, %r8
movq %rdi, 0x150(%rsp)
movq (%rsi,%r10), %rcx
movq (%r9,%rax), %r11
movups (%r11,%r13,4), %xmm4
movl 0x8(%r12,%rdx), %edi
movups (%rcx,%rdi,4), %xmm6
movq (%rsi,%rax), %r9
movups (%r9,%rdi,4), %xmm7
movl 0x18(%r12,%rdx), %r13d
movups (%rcx,%r13,4), %xmm8
movq %rcx, %rdi
movq %rcx, 0x180(%rsp)
movq 0xe0(%rbp), %rcx
movq (%rcx,%r10), %rsi
movq (%rcx,%rax), %r10
movl 0x38(%r12,%rdx), %ebp
movups (%rdi,%rbp,4), %xmm9
movl 0xc(%r12,%rdx), %eax
movups (%rsi,%rax,4), %xmm10
movaps %xmm14, %xmm13
unpcklps %xmm6, %xmm13 # xmm13 = xmm13[0],xmm6[0],xmm13[1],xmm6[1]
unpckhps %xmm6, %xmm14 # xmm14 = xmm14[2],xmm6[2],xmm14[3],xmm6[3]
movaps %xmm2, %xmm6
unpcklps %xmm10, %xmm6 # xmm6 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
unpckhps %xmm10, %xmm2 # xmm2 = xmm2[2],xmm10[2],xmm2[3],xmm10[3]
movl 0x1c(%r12,%rdx), %ecx
movups (%rsi,%rcx,4), %xmm10
movq %rsi, %rdi
movq %rsi, 0x170(%rsp)
unpcklps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
movaps %xmm14, 0x1c0(%rsp)
movaps %xmm13, %xmm2
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
movaps %xmm2, 0x10(%rsp)
unpckhps %xmm6, %xmm13 # xmm13 = xmm13[2],xmm6[2],xmm13[3],xmm6[3]
movaps %xmm11, %xmm2
movaps %xmm11, %xmm15
unpcklps %xmm8, %xmm15 # xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1]
unpckhps %xmm8, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
movaps %xmm2, %xmm8
movaps %xmm3, %xmm2
unpcklps %xmm10, %xmm2 # xmm2 = xmm2[0],xmm10[0],xmm2[1],xmm10[1]
unpckhps %xmm10, %xmm3 # xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
movl 0x3c(%r12,%rdx), %esi
movups (%rdi,%rsi,4), %xmm6
unpcklps %xmm3, %xmm8 # xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
movaps %xmm8, 0x1b0(%rsp)
movaps %xmm15, %xmm14
unpcklps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0],xmm14[1],xmm2[1]
unpckhps %xmm2, %xmm15 # xmm15 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
movaps %xmm0, %xmm11
unpcklps %xmm9, %xmm11 # xmm11 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
unpckhps %xmm9, %xmm0 # xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
movaps %xmm5, %xmm2
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
unpckhps %xmm6, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
movups (%r10,%rax,4), %xmm3
unpcklps %xmm5, %xmm0 # xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
movaps %xmm0, 0x50(%rsp)
movaps %xmm11, %xmm0
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
unpckhps %xmm2, %xmm11 # xmm11 = xmm11[2],xmm2[2],xmm11[3],xmm2[3]
movaps %xmm1, %xmm6
unpcklps %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
unpckhps %xmm7, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
movaps %xmm4, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
unpckhps %xmm3, %xmm4 # xmm4 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
movq %r9, 0x160(%rsp)
movups (%r9,%r13,4), %xmm5
unpcklps %xmm4, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
movaps %xmm6, %xmm8
unpcklps %xmm2, %xmm8 # xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
unpckhps %xmm2, %xmm6 # xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
movaps 0x20(%rsp), %xmm7
movaps %xmm7, %xmm3
unpcklps %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
unpckhps %xmm5, %xmm7 # xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
movq 0x110(%rsp), %rax
movups (%r11,%rax,4), %xmm2
movups (%r10,%rcx,4), %xmm4
movaps %xmm2, %xmm5
unpcklps %xmm4, %xmm5 # xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
unpckhps %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
unpcklps %xmm2, %xmm7 # xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1]
movaps %xmm7, 0x20(%rsp)
movaps %xmm3, %xmm7
unpcklps %xmm5, %xmm7 # xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
unpckhps %xmm5, %xmm3 # xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
movq 0x120(%rsp), %rax
movups (%r8,%rax,4), %xmm2
movups (%r9,%rbp,4), %xmm5
movaps %xmm2, %xmm4
unpcklps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
unpckhps %xmm5, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
movups (%r11,%r15,4), %xmm5
movq 0x30(%rsp), %r8
movq 0x38(%rsp), %r9
movups (%r10,%rsi,4), %xmm9
movaps %xmm5, %xmm10
unpcklps %xmm9, %xmm10 # xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
unpckhps %xmm9, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
unpcklps %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
movaps %xmm4, %xmm5
unpcklps %xmm10, %xmm5 # xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
unpckhps %xmm10, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
movss 0x1d50e55(%rip), %xmm9 # 0x1eec714
subss %xmm12, %xmm9
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulps %xmm12, %xmm8
movaps 0x10(%rsp), %xmm10
mulps %xmm9, %xmm10
addps %xmm8, %xmm10
movaps %xmm10, %xmm8
mulps %xmm12, %xmm6
mulps %xmm9, %xmm13
addps %xmm6, %xmm13
mulps %xmm12, %xmm1
movaps 0x1c0(%rsp), %xmm10
mulps %xmm9, %xmm10
addps %xmm1, %xmm10
mulps %xmm12, %xmm7
mulps %xmm9, %xmm14
addps %xmm7, %xmm14
mulps %xmm12, %xmm3
movaps 0x20(%rsp), %xmm1
mulps %xmm12, %xmm1
mulps %xmm9, %xmm15
addps %xmm3, %xmm15
movaps 0x1b0(%rsp), %xmm6
mulps %xmm9, %xmm6
addps %xmm1, %xmm6
mulps %xmm12, %xmm5
mulps %xmm12, %xmm4
movaps %xmm12, 0x470(%rsp)
mulps %xmm12, %xmm2
mulps %xmm9, %xmm0
addps %xmm5, %xmm0
mulps %xmm9, %xmm11
addps %xmm4, %xmm11
movaps %xmm9, 0x420(%rsp)
movaps 0x50(%rsp), %xmm12
mulps %xmm9, %xmm12
addps %xmm2, %xmm12
movss (%r9,%r8,4), %xmm1
movss 0x10(%r9,%r8,4), %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm8, %xmm3
subps %xmm1, %xmm3
subps %xmm2, %xmm13
movaps %xmm14, 0x460(%rsp)
subps %xmm1, %xmm14
movaps %xmm14, 0x120(%rsp)
movaps %xmm15, 0x450(%rsp)
subps %xmm2, %xmm15
movaps %xmm0, 0x430(%rsp)
movaps %xmm0, %xmm9
subps %xmm1, %xmm9
movaps %xmm11, 0x440(%rsp)
movaps %xmm11, %xmm1
subps %xmm2, %xmm1
movss 0x20(%r9,%r8,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
subps %xmm4, %xmm10
movaps %xmm6, 0x1b0(%rsp)
subps %xmm4, %xmm6
movaps %xmm12, 0x50(%rsp)
movaps %xmm12, %xmm2
subps %xmm4, %xmm2
movaps %xmm9, %xmm8
subps %xmm3, %xmm8
movaps %xmm1, %xmm14
subps %xmm13, %xmm14
movaps %xmm2, %xmm11
subps %xmm10, %xmm11
movaps %xmm1, %xmm4
addps %xmm13, %xmm4
movaps %xmm2, %xmm5
addps %xmm10, %xmm5
movaps %xmm8, %xmm7
mulps %xmm4, %xmm7
mulps %xmm11, %xmm4
movaps %xmm14, %xmm12
mulps %xmm5, %xmm12
subps %xmm4, %xmm12
movaps %xmm9, %xmm4
addps %xmm3, %xmm4
movaps %xmm3, 0x10(%rsp)
movaps %xmm8, 0x4f0(%rsp)
mulps %xmm8, %xmm5
movaps %xmm4, %xmm8
movaps %xmm11, 0x4e0(%rsp)
mulps %xmm11, %xmm8
subps %xmm5, %xmm8
movaps %xmm14, 0x4d0(%rsp)
mulps %xmm14, %xmm4
subps %xmm4, %xmm7
movss 0x50(%r9,%r8,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x20(%rsp)
movss 0x60(%r9,%r8,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm11, %xmm7
mulps %xmm0, %xmm8
addps %xmm7, %xmm8
movss 0x40(%r9,%r8,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm0, %xmm12
addps %xmm8, %xmm12
movaps %xmm3, %xmm8
movaps 0x120(%rsp), %xmm3
subps %xmm3, %xmm8
movaps %xmm13, %xmm4
movaps %xmm15, 0x110(%rsp)
subps %xmm15, %xmm13
movaps %xmm10, %xmm7
subps %xmm6, %xmm7
movaps %xmm4, 0x250(%rsp)
movaps %xmm4, %xmm5
addps %xmm15, %xmm5
movaps %xmm10, 0x1c0(%rsp)
addps %xmm6, %xmm10
movaps %xmm8, %xmm14
mulps %xmm5, %xmm14
mulps %xmm7, %xmm5
movaps %xmm13, %xmm4
mulps %xmm10, %xmm4
subps %xmm5, %xmm4
movaps 0x10(%rsp), %xmm5
addps %xmm3, %xmm5
mulps %xmm8, %xmm10
movaps %xmm5, %xmm15
movaps %xmm7, 0x4c0(%rsp)
mulps %xmm7, %xmm15
movaps %xmm0, %xmm7
subps %xmm10, %xmm15
mulps %xmm13, %xmm5
subps %xmm5, %xmm14
mulps %xmm11, %xmm14
movaps 0x20(%rsp), %xmm0
mulps %xmm0, %xmm15
addps %xmm14, %xmm15
mulps %xmm7, %xmm4
addps %xmm15, %xmm4
movaps %xmm3, %xmm5
subps %xmm9, %xmm5
addps %xmm3, %xmm9
movaps 0x110(%rsp), %xmm3
movaps %xmm3, %xmm15
subps %xmm1, %xmm15
addps %xmm3, %xmm1
movaps %xmm6, %xmm14
subps %xmm2, %xmm14
addps %xmm6, %xmm2
movaps %xmm5, %xmm6
mulps %xmm1, %xmm6
mulps %xmm14, %xmm1
movaps %xmm15, %xmm3
mulps %xmm2, %xmm3
subps %xmm1, %xmm3
mulps %xmm5, %xmm2
movaps %xmm9, %xmm1
mulps %xmm14, %xmm1
subps %xmm2, %xmm1
mulps %xmm15, %xmm9
subps %xmm9, %xmm6
movaps %xmm11, 0x120(%rsp)
mulps %xmm11, %xmm6
mulps %xmm0, %xmm1
addps %xmm6, %xmm1
mulps %xmm7, %xmm3
addps %xmm1, %xmm3
movaps %xmm12, %xmm1
addps %xmm4, %xmm1
addps %xmm3, %xmm1
movaps %xmm12, %xmm0
minps %xmm4, %xmm0
minps %xmm3, %xmm0
movaps %xmm12, %xmm6
movaps %xmm4, 0x4a0(%rsp)
maxps %xmm4, %xmm6
maxps %xmm3, %xmm6
movaps %xmm1, 0x490(%rsp)
movaps %xmm1, %xmm3
andps 0x1d50acf(%rip), %xmm3 # 0x1eec6c0
movaps %xmm3, %xmm1
mulps 0x1d56175(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d50aca(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
leaq (%r12,%rdx), %r13
movl 0x20(%r12,%rdx), %eax
movl 0x24(%r12,%rdx), %ecx
movl 0x28(%r12,%rdx), %esi
movl 0x2c(%r12,%rdx), %edx
movmskps %xmm6, %edi
movq 0x140(%rsp), %rbp
movups (%rbp,%rax,4), %xmm0
movaps %xmm0, 0x240(%rsp)
movq 0x150(%rsp), %r15
movups (%r15,%rax,4), %xmm10
movq 0x190(%rsp), %rax
movups (%rax,%rcx,4), %xmm0
movaps %xmm0, 0x410(%rsp)
movups (%r11,%rcx,4), %xmm0
movaps %xmm0, 0x150(%rsp)
movq 0x180(%rsp), %rax
movups (%rax,%rsi,4), %xmm0
movaps %xmm0, 0x400(%rsp)
movq 0x160(%rsp), %rax
movups (%rax,%rsi,4), %xmm0
movaps %xmm0, 0x160(%rsp)
movq 0xce8(%rsp), %rsi
movq 0x170(%rsp), %rax
movups (%rax,%rdx,4), %xmm0
movaps %xmm0, 0x170(%rsp)
movups (%r10,%rdx,4), %xmm0
movaps %xmm0, 0x180(%rsp)
movb 0xf(%rsp), %dl
testl %edi, %edi
movq %r13, %rdi
je 0x19c338
movaps %xmm13, %xmm11
movaps %xmm13, %xmm0
movaps %xmm14, %xmm4
movaps %xmm14, 0x110(%rsp)
movaps 0x4e0(%rsp), %xmm14
mulps %xmm14, %xmm0
movaps %xmm8, %xmm1
movaps 0x4d0(%rsp), %xmm13
mulps %xmm13, %xmm1
movaps %xmm15, 0x4b0(%rsp)
movaps %xmm7, 0x190(%rsp)
movaps 0x4c0(%rsp), %xmm7
mulps %xmm7, %xmm15
movaps %xmm10, 0x140(%rsp)
movaps %xmm5, %xmm10
mulps %xmm11, %xmm10
mulps %xmm7, %xmm13
subps %xmm0, %xmm13
movaps 0x4f0(%rsp), %xmm9
movaps %xmm9, %xmm2
mulps %xmm11, %xmm9
mulps %xmm4, %xmm11
subps %xmm15, %xmm11
movaps 0x1d5096c(%rip), %xmm4 # 0x1eec6c0
andps %xmm4, %xmm0
andps %xmm4, %xmm15
cmpltps %xmm15, %xmm0
blendvps %xmm0, %xmm13, %xmm11
movaps %xmm8, %xmm0
mulps 0x110(%rsp), %xmm0
mulps %xmm7, %xmm2
mulps %xmm7, %xmm5
movaps 0x190(%rsp), %xmm7
mulps %xmm8, %xmm14
subps %xmm2, %xmm14
subps %xmm0, %xmm5
andps %xmm4, %xmm2
andps %xmm4, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm14, %xmm5
mulps 0x4b0(%rsp), %xmm8
subps %xmm1, %xmm9
subps %xmm10, %xmm8
andps %xmm4, %xmm1
andps %xmm4, %xmm10
cmpltps %xmm10, %xmm1
movaps 0x140(%rsp), %xmm10
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm9, %xmm8
movaps 0x120(%rsp), %xmm1
mulps %xmm8, %xmm1
movaps 0x20(%rsp), %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm7
addps %xmm0, %xmm7
addps %xmm7, %xmm7
movaps 0x1c0(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x250(%rsp), %xmm1
mulps %xmm5, %xmm1
addps %xmm0, %xmm1
movaps 0x10(%rsp), %xmm4
mulps %xmm11, %xmm4
addps %xmm1, %xmm4
rcpps %xmm7, %xmm1
movaps %xmm7, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d50bec(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm4, %xmm4
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm4, %xmm0
movss 0x80(%r9,%r8,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r8,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19c338
cmpneqps 0x1d4fba0(%rip), %xmm7 # 0x1eeba10
andps %xmm7, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19c338
movaps %xmm12, 0x280(%rsp)
movaps 0x4a0(%rsp), %xmm9
movaps %xmm9, 0x290(%rsp)
movaps 0x490(%rsp), %xmm6
movaps %xmm6, 0x2a0(%rsp)
movaps %xmm11, 0x2b0(%rsp)
movaps %xmm5, 0x2c0(%rsp)
movaps %xmm8, 0x2d0(%rsp)
movaps %xmm1, 0x2e0(%rsp)
movaps %xmm0, 0x310(%rsp)
movdqa 0x480(%rsp), %xmm0
movdqa %xmm0, 0x350(%rsp)
rcpps %xmm6, %xmm2
mulps %xmm2, %xmm6
movaps 0x1d50b1e(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm1
subps %xmm6, %xmm1
mulps %xmm2, %xmm1
addps %xmm2, %xmm1
cmpnltps 0x1d55e3a(%rip), %xmm3 # 0x1ef1d40
andps %xmm1, %xmm3
mulps %xmm3, %xmm12
minps %xmm4, %xmm12
mulps %xmm9, %xmm3
minps %xmm4, %xmm3
movaps %xmm4, %xmm1
subps %xmm12, %xmm1
movaps %xmm4, %xmm2
subps %xmm3, %xmm2
blendvps %xmm0, %xmm1, %xmm12
movaps %xmm12, 0x2f0(%rsp)
blendvps %xmm0, %xmm2, %xmm3
movaps %xmm3, 0x300(%rsp)
movaps %xmm11, 0x320(%rsp)
movaps %xmm5, 0x330(%rsp)
movaps %xmm8, 0x340(%rsp)
movzbl %al, %ebp
movaps 0xe0(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
movaps 0x70(%rsp), %xmm14
movaps 0x60(%rsp), %xmm15
movaps 0x100(%rsp), %xmm5
movaps 0xf0(%rsp), %xmm6
movq 0x1a0(%rsp), %r10
bsfq %rbp, %r15
movl 0x40(%rdi,%r15,4), %ecx
movq 0x1e8(%r10), %rax
movq (%rax,%rcx,8), %r13
movl 0x90(%r9,%r8,4), %eax
testl %eax, 0x34(%r13)
je 0x19bff8
movq 0x10(%rsi), %rax
cmpq $0x0, 0x10(%rax)
jne 0x19c010
cmpq $0x0, 0x48(%r13)
jne 0x19c010
xorl %eax, %eax
jmp 0x19bffe
btcq %r15, %rbp
movb $0x1, %al
testb %al, %al
je 0x19cda2
testq %rbp, %rbp
jne 0x19bfc0
jmp 0x19c32f
movss 0x80(%r9,%r8,4), %xmm0
movss %xmm0, 0x10(%rsp)
movss 0x310(%rsp,%r15,4), %xmm0
movss 0x2f0(%rsp,%r15,4), %xmm1
movss 0x300(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%r8,4)
movq 0x8(%rsi), %rax
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movd 0x50(%rdi,%r15,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x320(%rsp,%r15,4), %xmm4
movss 0x330(%rsp,%r15,4), %xmm5
movss 0x340(%rsp,%r15,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x360(%rsp)
movaps %xmm5, 0x370(%rsp)
movaps %xmm6, 0x380(%rsp)
movaps %xmm1, 0x390(%rsp)
movaps %xmm2, 0x3a0(%rsp)
movdqa %xmm3, 0x3b0(%rsp)
movdqa %xmm0, 0x3c0(%rsp)
leaq 0x3d0(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3d0(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3e0(%rsp)
movq 0x1d8(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x130(%rsp)
leaq 0x130(%rsp), %rcx
movq %rcx, 0x210(%rsp)
movq 0x18(%r13), %rcx
movq %rcx, 0x218(%rsp)
movq %rax, 0x220(%rsp)
movq %r9, 0x228(%rsp)
leaq 0x360(%rsp), %rax
movq %rax, 0x230(%rsp)
movl $0x4, 0x238(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
je 0x19c1e8
movq %rdi, 0x40(%rsp)
leaq 0x210(%rsp), %rdi
callq *%rax
movq 0x1a0(%rsp), %r10
movq 0x40(%rsp), %rdi
movb 0xf(%rsp), %dl
movaps 0x60(%rsp), %xmm15
movaps 0x70(%rsp), %xmm14
movaps 0x80(%rsp), %xmm13
movaps 0x90(%rsp), %xmm12
movaps 0xa0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm7
movq 0xce8(%rsp), %rsi
movq 0x30(%rsp), %r8
movq 0x38(%rsp), %r9
movdqa 0x130(%rsp), %xmm1
ptest %xmm1, %xmm1
movaps 0x100(%rsp), %xmm5
movaps 0xf0(%rsp), %xmm6
je 0x19c2f3
movq 0x10(%rsi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x19c2b5
testb $0x2, (%rcx)
jne 0x19c22d
testb $0x40, 0x3e(%r13)
je 0x19c2b5
movq %rdi, %r13
leaq 0x210(%rsp), %rdi
callq *%rax
movq 0x1a0(%rsp), %r10
movq %r13, %rdi
movb 0xf(%rsp), %dl
movaps 0xf0(%rsp), %xmm6
movaps 0x100(%rsp), %xmm5
movaps 0x60(%rsp), %xmm15
movaps 0x70(%rsp), %xmm14
movaps 0x80(%rsp), %xmm13
movaps 0x90(%rsp), %xmm12
movaps 0xa0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm7
movq 0xce8(%rsp), %rsi
movq 0x30(%rsp), %r8
movq 0x38(%rsp), %r9
movdqa 0x130(%rsp), %xmm0
pcmpeqd 0x1d4f74a(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4fb4e(%rip), %xmm1 # 0x1eebe20
movq 0x228(%rsp), %rax
movaps 0x1d4f71f(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x19c303
pcmpeqd 0x1d4f715(%rip), %xmm1 # 0x1eeba10
pxor 0x1d4fb1d(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x19bffe
movd 0x10(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%r8,4)
btcq %r15, %rbp
jmp 0x19bffe
movaps 0x140(%rsp), %xmm10
movaps 0x240(%rsp), %xmm4
movaps %xmm4, %xmm3
movaps 0x400(%rsp), %xmm0
unpcklps %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
unpckhps %xmm0, %xmm4 # xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
movaps 0x410(%rsp), %xmm1
movaps %xmm1, %xmm0
movaps 0x170(%rsp), %xmm2
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
unpcklps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
movaps %xmm3, %xmm8
unpcklps %xmm0, %xmm8 # xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
unpckhps %xmm0, %xmm3 # xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
movaps %xmm10, %xmm0
movaps 0x160(%rsp), %xmm1
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
unpckhps %xmm1, %xmm10 # xmm10 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
movaps 0x150(%rsp), %xmm2
movaps %xmm2, %xmm1
movaps 0x180(%rsp), %xmm5
unpcklps %xmm5, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
unpckhps %xmm5, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
unpcklps %xmm2, %xmm10 # xmm10 = xmm10[0],xmm2[0],xmm10[1],xmm2[1]
movaps %xmm0, %xmm2
unpcklps %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
unpckhps %xmm1, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movaps 0x470(%rsp), %xmm1
mulps %xmm1, %xmm2
mulps %xmm1, %xmm0
mulps %xmm1, %xmm10
movaps 0x420(%rsp), %xmm1
mulps %xmm1, %xmm8
addps %xmm2, %xmm8
mulps %xmm1, %xmm3
addps %xmm0, %xmm3
mulps %xmm1, %xmm4
addps %xmm10, %xmm4
movss (%r9,%r8,4), %xmm1
movss 0x10(%r9,%r8,4), %xmm2
movss 0x20(%r9,%r8,4), %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm1, %xmm8
subps %xmm2, %xmm3
subps %xmm0, %xmm4
movaps 0x430(%rsp), %xmm5
subps %xmm1, %xmm5
movaps %xmm5, %xmm9
movaps 0x440(%rsp), %xmm12
subps %xmm2, %xmm12
movaps 0x50(%rsp), %xmm5
subps %xmm0, %xmm5
movaps %xmm5, 0x50(%rsp)
movaps 0x460(%rsp), %xmm15
subps %xmm1, %xmm15
movaps 0x450(%rsp), %xmm14
subps %xmm2, %xmm14
movaps 0x1b0(%rsp), %xmm13
subps %xmm0, %xmm13
movaps %xmm15, %xmm6
subps %xmm8, %xmm6
movaps %xmm14, %xmm10
subps %xmm3, %xmm10
movaps %xmm13, %xmm7
subps %xmm4, %xmm7
movaps %xmm15, %xmm0
addps %xmm8, %xmm0
movaps %xmm14, %xmm1
addps %xmm3, %xmm1
movaps %xmm13, %xmm2
addps %xmm4, %xmm2
movaps %xmm6, %xmm5
mulps %xmm1, %xmm5
mulps %xmm7, %xmm1
movaps %xmm10, %xmm11
mulps %xmm2, %xmm11
subps %xmm1, %xmm11
movaps %xmm6, 0x1b0(%rsp)
mulps %xmm6, %xmm2
movaps %xmm0, %xmm1
movaps %xmm7, 0x1a0(%rsp)
mulps %xmm7, %xmm1
subps %xmm2, %xmm1
movss 0x50(%r9,%r8,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm2, 0x10(%rsp)
movaps %xmm10, 0x140(%rsp)
mulps %xmm10, %xmm0
subps %xmm0, %xmm5
movss 0x60(%r9,%r8,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm6, %xmm5
mulps %xmm2, %xmm1
addps %xmm5, %xmm1
movss 0x40(%r9,%r8,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x20(%rsp)
mulps %xmm0, %xmm11
addps %xmm1, %xmm11
movaps %xmm8, %xmm0
movaps %xmm9, %xmm5
subps %xmm9, %xmm0
movaps %xmm0, %xmm7
movaps %xmm3, %xmm9
movaps %xmm12, %xmm10
subps %xmm12, %xmm9
movaps %xmm4, %xmm12
movaps 0x50(%rsp), %xmm2
subps %xmm2, %xmm12
movaps %xmm3, 0x190(%rsp)
movaps %xmm3, %xmm0
addps %xmm10, %xmm0
movaps %xmm4, 0x240(%rsp)
movaps %xmm4, %xmm1
addps %xmm2, %xmm1
movaps %xmm7, %xmm3
movaps %xmm7, 0x1c0(%rsp)
movaps %xmm7, %xmm2
mulps %xmm0, %xmm2
mulps %xmm12, %xmm0
movaps %xmm9, %xmm7
mulps %xmm1, %xmm7
subps %xmm0, %xmm7
movaps %xmm8, 0x180(%rsp)
addps %xmm5, %xmm8
movaps %xmm5, %xmm4
mulps %xmm3, %xmm1
movaps %xmm8, %xmm5
movaps %xmm12, 0x170(%rsp)
mulps %xmm12, %xmm5
subps %xmm1, %xmm5
movaps %xmm9, 0x160(%rsp)
mulps %xmm9, %xmm8
subps %xmm8, %xmm2
mulps %xmm6, %xmm2
movaps 0x10(%rsp), %xmm3
mulps %xmm3, %xmm5
addps %xmm2, %xmm5
movaps 0x20(%rsp), %xmm8
mulps %xmm8, %xmm7
addps %xmm5, %xmm7
movaps %xmm4, %xmm0
subps %xmm15, %xmm0
addps %xmm4, %xmm15
movaps %xmm0, %xmm2
movaps %xmm10, %xmm9
subps %xmm14, %xmm9
addps %xmm10, %xmm14
movaps 0x50(%rsp), %xmm0
movaps %xmm0, %xmm12
subps %xmm13, %xmm12
addps %xmm0, %xmm13
movaps %xmm2, %xmm1
mulps %xmm14, %xmm1
mulps %xmm12, %xmm14
movaps %xmm9, %xmm0
mulps %xmm13, %xmm0
subps %xmm14, %xmm0
movaps %xmm2, %xmm4
mulps %xmm2, %xmm13
movaps %xmm15, %xmm2
mulps %xmm12, %xmm2
subps %xmm13, %xmm2
mulps %xmm9, %xmm15
subps %xmm15, %xmm1
movaps %xmm6, 0x50(%rsp)
mulps %xmm6, %xmm1
mulps %xmm3, %xmm2
addps %xmm1, %xmm2
mulps %xmm8, %xmm0
addps %xmm2, %xmm0
movaps %xmm11, %xmm15
addps %xmm7, %xmm15
addps %xmm0, %xmm15
movaps %xmm11, %xmm1
minps %xmm7, %xmm1
minps %xmm0, %xmm1
movaps %xmm11, 0x110(%rsp)
movaps %xmm11, %xmm5
movaps %xmm7, 0x120(%rsp)
maxps %xmm7, %xmm5
movaps 0x1c0(%rsp), %xmm7
maxps %xmm0, %xmm5
movaps %xmm15, 0x250(%rsp)
andps 0x1d50049(%rip), %xmm15 # 0x1eec6c0
movaps %xmm15, %xmm0
mulps 0x1d556ee(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm5
xorps 0x1d50043(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm5
movmskps %xmm5, %eax
testl %eax, %eax
je 0x19cce8
movaps 0x160(%rsp), %xmm6
movaps %xmm6, %xmm0
movaps 0x1a0(%rsp), %xmm3
mulps %xmm3, %xmm0
movaps %xmm7, %xmm1
movaps 0x140(%rsp), %xmm11
mulps %xmm11, %xmm1
movaps %xmm9, %xmm14
movaps 0x170(%rsp), %xmm10
mulps %xmm10, %xmm14
movaps %xmm9, 0x150(%rsp)
movaps %xmm4, %xmm9
mulps %xmm6, %xmm4
mulps %xmm10, %xmm11
subps %xmm0, %xmm11
movaps 0x1b0(%rsp), %xmm13
movaps %xmm13, %xmm2
mulps %xmm6, %xmm13
mulps %xmm12, %xmm6
subps %xmm14, %xmm6
movaps 0x1d4ffb1(%rip), %xmm8 # 0x1eec6c0
andps %xmm8, %xmm0
andps %xmm8, %xmm14
cmpltps %xmm14, %xmm0
blendvps %xmm0, %xmm11, %xmm6
movaps %xmm7, %xmm0
mulps %xmm12, %xmm0
mulps %xmm10, %xmm2
mulps %xmm10, %xmm9
movaps 0x20(%rsp), %xmm10
mulps %xmm7, %xmm3
subps %xmm2, %xmm3
subps %xmm0, %xmm9
andps %xmm8, %xmm2
andps %xmm8, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm3, %xmm9
mulps 0x150(%rsp), %xmm7
subps %xmm1, %xmm13
subps %xmm4, %xmm7
andps %xmm8, %xmm1
andps %xmm8, %xmm4
cmpltps %xmm4, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm13, %xmm7
movaps 0x50(%rsp), %xmm1
mulps %xmm7, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm9, %xmm0
addps %xmm1, %xmm0
mulps %xmm6, %xmm10
addps %xmm0, %xmm10
addps %xmm10, %xmm10
movaps 0x240(%rsp), %xmm0
mulps %xmm7, %xmm0
movaps 0x190(%rsp), %xmm1
mulps %xmm9, %xmm1
addps %xmm0, %xmm1
movaps 0x180(%rsp), %xmm0
mulps %xmm6, %xmm0
addps %xmm1, %xmm0
movaps %xmm0, %xmm3
rcpps %xmm10, %xmm1
movaps %xmm10, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d50239(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movss 0x80(%r9,%r8,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r8,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm5, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19cce8
cmpneqps 0x1d4f1ec(%rip), %xmm10 # 0x1eeba10
andps %xmm10, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x19cce8
movaps 0x110(%rsp), %xmm5
movaps %xmm5, 0x280(%rsp)
movaps 0x120(%rsp), %xmm4
movaps %xmm4, 0x290(%rsp)
movaps 0x250(%rsp), %xmm8
movaps %xmm8, 0x2a0(%rsp)
movaps %xmm6, 0x2b0(%rsp)
movaps %xmm9, 0x2c0(%rsp)
movaps %xmm7, 0x2d0(%rsp)
movaps %xmm1, 0x2e0(%rsp)
movaps %xmm0, 0x310(%rsp)
movdqa 0x3f0(%rsp), %xmm0
movdqa %xmm0, 0x350(%rsp)
movq (%rsi), %r10
rcpps %xmm8, %xmm2
mulps %xmm2, %xmm8
movaps 0x1d5015e(%rip), %xmm3 # 0x1eeca10
movaps %xmm3, %xmm1
subps %xmm8, %xmm1
mulps %xmm2, %xmm1
addps %xmm2, %xmm1
cmpnltps 0x1d55478(%rip), %xmm15 # 0x1ef1d40
andps %xmm1, %xmm15
mulps %xmm15, %xmm5
minps %xmm3, %xmm5
mulps %xmm4, %xmm15
minps %xmm3, %xmm15
movaps %xmm3, %xmm1
subps %xmm5, %xmm1
movaps %xmm3, %xmm2
subps %xmm15, %xmm2
blendvps %xmm0, %xmm1, %xmm5
movaps %xmm5, 0x2f0(%rsp)
blendvps %xmm0, %xmm2, %xmm15
movaps %xmm15, 0x300(%rsp)
movaps %xmm6, 0x320(%rsp)
movaps %xmm9, 0x330(%rsp)
movaps %xmm7, 0x340(%rsp)
movzbl %al, %ebp
movaps 0xe0(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
movaps 0x70(%rsp), %xmm14
movaps 0x60(%rsp), %xmm15
movaps 0x100(%rsp), %xmm5
movaps 0xf0(%rsp), %xmm6
bsfq %rbp, %r15
movl 0x40(%rdi,%r15,4), %ecx
movq 0x1e8(%r10), %rax
movq (%rax,%rcx,8), %r13
movl 0x90(%r9,%r8,4), %eax
testl %eax, 0x34(%r13)
je 0x19c9b2
movq 0x10(%rsi), %rax
cmpq $0x0, 0x10(%rax)
jne 0x19c9ca
cmpq $0x0, 0x48(%r13)
jne 0x19c9ca
xorl %eax, %eax
jmp 0x19c9b8
btcq %r15, %rbp
movb $0x1, %al
testb %al, %al
je 0x19cda2
testq %rbp, %rbp
jne 0x19c97a
jmp 0x19cce8
movss 0x80(%r9,%r8,4), %xmm0
movss %xmm0, 0x10(%rsp)
movss 0x310(%rsp,%r15,4), %xmm0
movss 0x2f0(%rsp,%r15,4), %xmm1
movss 0x300(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%r8,4)
movq 0x8(%rsi), %rax
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movd 0x50(%rdi,%r15,4), %xmm3
pshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x320(%rsp,%r15,4), %xmm4
movss 0x330(%rsp,%r15,4), %xmm5
movss 0x340(%rsp,%r15,4), %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm4, 0x360(%rsp)
movaps %xmm5, 0x370(%rsp)
movaps %xmm6, 0x380(%rsp)
movaps %xmm1, 0x390(%rsp)
movaps %xmm2, 0x3a0(%rsp)
movdqa %xmm3, 0x3b0(%rsp)
movdqa %xmm0, 0x3c0(%rsp)
leaq 0x3d0(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3d0(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3e0(%rsp)
movq 0x1d8(%rsp), %rcx
movdqa (%rcx), %xmm0
movdqa %xmm0, 0x130(%rsp)
leaq 0x130(%rsp), %rcx
movq %rcx, 0x210(%rsp)
movq 0x18(%r13), %rcx
movq %rcx, 0x218(%rsp)
movq %rax, 0x220(%rsp)
movq %r9, 0x228(%rsp)
leaq 0x360(%rsp), %rax
movq %rax, 0x230(%rsp)
movl $0x4, 0x238(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %rdi, 0x40(%rsp)
je 0x19cba4
leaq 0x210(%rsp), %rdi
movq %r10, 0x20(%rsp)
callq *%rax
movq 0x20(%rsp), %r10
movq 0x40(%rsp), %rdi
movb 0xf(%rsp), %dl
movaps 0x60(%rsp), %xmm15
movaps 0x70(%rsp), %xmm14
movaps 0x80(%rsp), %xmm13
movaps 0x90(%rsp), %xmm12
movaps 0xa0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm7
movq 0xce8(%rsp), %rsi
movq 0x30(%rsp), %r8
movq 0x38(%rsp), %r9
movdqa 0x130(%rsp), %xmm1
ptest %xmm1, %xmm1
movaps 0x100(%rsp), %xmm5
movaps 0xf0(%rsp), %xmm6
je 0x19ccac
movq 0x10(%rsi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x19cc6e
testb $0x2, (%rcx)
jne 0x19cbe9
testb $0x40, 0x3e(%r13)
je 0x19cc6e
leaq 0x210(%rsp), %rdi
movq %r10, %r13
callq *%rax
movq %r13, %r10
movq 0x40(%rsp), %rdi
movb 0xf(%rsp), %dl
movaps 0xf0(%rsp), %xmm6
movaps 0x100(%rsp), %xmm5
movaps 0x60(%rsp), %xmm15
movaps 0x70(%rsp), %xmm14
movaps 0x80(%rsp), %xmm13
movaps 0x90(%rsp), %xmm12
movaps 0xa0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm7
movq 0xce8(%rsp), %rsi
movq 0x30(%rsp), %r8
movq 0x38(%rsp), %r9
movdqa 0x130(%rsp), %xmm0
pcmpeqd 0x1d4ed91(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4f195(%rip), %xmm1 # 0x1eebe20
movq 0x228(%rsp), %rax
movaps 0x1d4ed66(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x19ccbc
pcmpeqd 0x1d4ed5c(%rip), %xmm1 # 0x1eeba10
pxor 0x1d4f164(%rip), %xmm1 # 0x1eebe20
pslld $0x1f, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
sete %al
jne 0x19c9b8
movd 0x10(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%r8,4)
btcq %r15, %rbp
jmp 0x19c9b8
movq 0x270(%rsp), %rdx
incq %rdx
movq 0x268(%rsp), %rax
cmpq %rax, %rdx
setb %cl
jne 0x19b5b4
movaps 0xe0(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movq 0x208(%rsp), %rdi
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
movaps 0x70(%rsp), %xmm14
movaps 0x60(%rsp), %xmm15
movq 0x200(%rsp), %r10
movq 0x1f8(%rsp), %r11
movq 0x1f0(%rsp), %r15
movq 0x1e8(%rsp), %r13
movq 0x1e0(%rsp), %rbp
movaps 0x100(%rsp), %xmm5
movaps 0xf0(%rsp), %xmm6
movq 0x48(%rsp), %rcx
testb $0x3, %r14b
je 0x19b3c6
jmp 0x19cdee
testb $0x1, %dl
movq 0x208(%rsp), %rdi
movq 0x200(%rsp), %r10
movq 0x1f8(%rsp), %r11
movq 0x1f0(%rsp), %r15
movq 0x1e8(%rsp), %r13
movq 0x1e0(%rsp), %rbp
movq 0x48(%rsp), %rcx
je 0x19cd96
movl $0xff800000, 0x80(%r9,%r8,4) # imm = 0xFF800000
pushq $0x1
popq %r14
jmp 0x19cd96
leaq 0x500(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xca8, %rsp # imm = 0xCA8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 257, false, embree::sse42::VirtualCurveIntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x898, %rsp # imm = 0x898
movq %r9, 0x8(%rsp)
movq %r8, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
movq 0x8d0(%rsp), %rax
leaq 0xf8(%rsp), %rdi
movq %rdx, -0x8(%rdi)
movss (%rax,%rcx,4), %xmm14
movss 0x10(%rax,%rcx,4), %xmm15
movss 0x20(%rax,%rcx,4), %xmm12
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xc0(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xb0(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xa0(%rsp)
movss 0x60(%rax,%rcx,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r9
movslq 0xa0(%rax,%rcx,4), %r15
movslq 0xb0(%rax,%rcx,4), %rbp
movq %r9, %r13
xorq $0x10, %r13
movq %r15, %r14
xorq $0x10, %r14
movq %rbp, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movq %rcx, (%rsp)
movss 0xd0(%rax,%rcx,4), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
leaq 0xf0(%rsp), %r10
movaps %xmm12, 0x90(%rsp)
movaps %xmm4, 0x80(%rsp)
movaps %xmm5, 0x70(%rsp)
movaps %xmm6, 0x60(%rsp)
movaps %xmm7, 0x50(%rsp)
movaps %xmm8, 0x40(%rsp)
movaps %xmm14, 0xe0(%rsp)
movaps %xmm15, 0xd0(%rsp)
movq %r9, 0x20(%rsp)
movq %rdi, %r11
cmpq %r10, %rdi
je 0x19dacc
leaq -0x8(%r11), %rdi
movq -0x8(%r11), %r8
movq %r8, %rax
andq $0xf, %rax
jne 0x19d785
movaps 0x20(%r8,%r9), %xmm0
subps %xmm14, %xmm0
mulps %xmm4, %xmm0
movaps 0x20(%r8,%r15), %xmm1
subps %xmm15, %xmm1
mulps %xmm5, %xmm1
pmaxsd %xmm0, %xmm1
movaps 0x20(%r8,%rbp), %xmm0
subps %xmm12, %xmm0
mulps %xmm6, %xmm0
movaps 0x20(%r8,%r13), %xmm2
subps %xmm14, %xmm2
mulps %xmm4, %xmm2
movaps 0x20(%r8,%r14), %xmm3
subps %xmm15, %xmm3
mulps %xmm5, %xmm3
pminsd %xmm2, %xmm3
movaps 0x20(%r8,%r12), %xmm2
subps %xmm12, %xmm2
mulps %xmm6, %xmm2
pmaxsd %xmm7, %xmm0
pmaxsd %xmm1, %xmm0
pminsd %xmm8, %xmm2
pminsd %xmm3, %xmm2
pcmpgtd %xmm2, %xmm0
movmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %ebx
movb $0x1, %al
testb %al, %al
je 0x19d78e
testq %rbx, %rbx
je 0x19d792
andq $-0x10, %r8
bsfq %rbx, %rax
leaq -0x1(%rbx), %rsi
xorl %ecx, %ecx
movq (%r8,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rsi
jne 0x19d797
movq %rax, %r8
testl %ecx, %ecx
je 0x19d6b8
jmp 0x19da15
cmpl $0x2, %eax
je 0x19d7da
xorl %eax, %eax
jmp 0x19d740
pushq $0x6
jmp 0x19d794
pushq $0x4
popq %rcx
jmp 0x19d778
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rax
movq (%r8,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %rax
je 0x19d7d5
movq %rdx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rdx
leaq -0x1(%rax), %rsi
jmp 0x19d7a6
movq %rdx, %r8
jmp 0x19d778
movq %r8, %rax
andq $-0x10, %rax
movaps 0x80(%rax), %xmm9
movaps 0xa0(%rsp), %xmm10
movaps %xmm10, %xmm0
mulps %xmm9, %xmm0
movaps 0x90(%rax), %xmm8
movaps %xmm10, %xmm4
mulps %xmm8, %xmm4
movaps 0xa0(%rax), %xmm6
mulps %xmm6, %xmm10
movaps 0x20(%rax), %xmm3
movaps 0x30(%rax), %xmm2
movaps 0x40(%rax), %xmm1
movaps 0x50(%rax), %xmm7
movaps 0xb0(%rsp), %xmm13
movaps %xmm13, %xmm11
mulps %xmm7, %xmm11
addps %xmm0, %xmm11
movaps 0x60(%rax), %xmm5
movaps %xmm13, %xmm0
mulps %xmm5, %xmm0
addps %xmm4, %xmm0
movaps 0x70(%rax), %xmm4
movaps %xmm4, 0x10(%rsp)
mulps %xmm4, %xmm13
addps %xmm10, %xmm13
movaps 0xc0(%rsp), %xmm12
movaps %xmm12, %xmm10
mulps %xmm3, %xmm10
addps %xmm11, %xmm10
movaps %xmm12, %xmm11
mulps %xmm2, %xmm11
addps %xmm0, %xmm11
mulps %xmm1, %xmm12
addps %xmm13, %xmm12
movaps %xmm10, %xmm0
movaps 0x1d4ee32(%rip), %xmm13 # 0x1eec6c0
andps %xmm13, %xmm0
movaps 0x1d544a7(%rip), %xmm4 # 0x1ef1d40
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm10
movaps %xmm11, %xmm0
andps %xmm13, %xmm0
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm11
movaps %xmm12, %xmm0
andps %xmm13, %xmm0
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm12
rcpps %xmm10, %xmm13
mulps %xmm13, %xmm10
movaps 0x1d4f13a(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm0
subps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm13, %xmm0
rcpps %xmm11, %xmm13
mulps %xmm13, %xmm11
movaps %xmm4, %xmm10
subps %xmm11, %xmm10
mulps %xmm13, %xmm10
addps %xmm13, %xmm10
rcpps %xmm12, %xmm13
mulps %xmm13, %xmm12
movaps %xmm4, %xmm11
subps %xmm12, %xmm11
movaps 0x90(%rsp), %xmm12
mulps %xmm13, %xmm11
addps %xmm13, %xmm11
mulps %xmm12, %xmm9
addps 0xb0(%rax), %xmm9
mulps %xmm15, %xmm7
addps %xmm9, %xmm7
mulps %xmm12, %xmm8
addps 0xc0(%rax), %xmm8
mulps %xmm15, %xmm5
addps %xmm8, %xmm5
movaps 0x40(%rsp), %xmm8
mulps %xmm12, %xmm6
addps 0xd0(%rax), %xmm6
movaps 0x10(%rsp), %xmm4
mulps %xmm15, %xmm4
addps %xmm6, %xmm4
movaps %xmm4, %xmm9
mulps %xmm14, %xmm3
addps %xmm7, %xmm3
movaps 0x50(%rsp), %xmm7
movaps %xmm0, %xmm6
movaps 0x1d4ed53(%rip), %xmm4 # 0x1eec6d0
xorps %xmm4, %xmm6
mulps %xmm6, %xmm3
movaps 0x60(%rsp), %xmm6
mulps %xmm14, %xmm2
addps %xmm5, %xmm2
movaps %xmm10, %xmm5
xorps %xmm4, %xmm5
mulps %xmm5, %xmm2
movaps %xmm11, %xmm5
xorps %xmm4, %xmm5
mulps %xmm14, %xmm1
addps %xmm9, %xmm1
mulps %xmm5, %xmm1
addps %xmm3, %xmm0
addps %xmm2, %xmm10
addps %xmm1, %xmm11
movaps %xmm2, %xmm4
pminsd %xmm10, %xmm4
movaps %xmm1, %xmm5
pminsd %xmm11, %xmm5
maxps %xmm5, %xmm4
movaps %xmm3, %xmm5
pminsd %xmm0, %xmm5
pmaxsd %xmm3, %xmm0
pmaxsd %xmm2, %xmm10
pmaxsd %xmm1, %xmm11
minps %xmm11, %xmm10
movaps %xmm7, %xmm1
maxps %xmm5, %xmm1
movaps 0x70(%rsp), %xmm5
maxps %xmm4, %xmm1
movaps 0x80(%rsp), %xmm4
movaps %xmm8, %xmm2
minps %xmm0, %xmm2
minps %xmm10, %xmm2
cmpleps %xmm2, %xmm1
movmskps %xmm1, %ebx
jmp 0x19d73e
cmpl $0x6, %ecx
jne 0x19dac3
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x28(%rsp), %rcx
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq %rdi, 0x10(%rsp)
movq 0x30(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq (%rsp), %rdx
movq 0x8d8(%rsp), %rcx
movq %r11, 0x38(%rsp)
callq *0x18(%r9,%rax)
movq 0x38(%rsp), %r11
leaq 0xf0(%rsp), %r10
movaps 0x40(%rsp), %xmm8
movaps 0x50(%rsp), %xmm7
movq 0x20(%rsp), %r9
movaps 0x60(%rsp), %xmm6
movaps 0x70(%rsp), %xmm5
movaps 0x80(%rsp), %xmm4
movq 0x10(%rsp), %rdi
movaps 0x90(%rsp), %xmm12
movaps 0xd0(%rsp), %xmm15
movaps 0xe0(%rsp), %xmm14
xorl %ecx, %ecx
testb %al, %al
je 0x19dac3
movq 0x8(%rsp), %rax
movq (%rsp), %rcx
movl $0xff800000, 0x80(%rax,%rcx,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
testb $0x3, %cl
je 0x19d6a4
cmpq %r10, %r11
setne %al
addq $0x898, %rsp # imm = 0x898
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16781328, false, embree::sse42::VirtualCurveIntersectorK<4>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10a8, %rsp # imm = 0x10A8
movq %r9, %rsi
movq %r8, 0x60(%rsp)
movq %rcx, %r10
movq %rdi, 0x58(%rsp)
leaq 0x170(%rsp), %rdi
movq %rdx, -0x10(%rdi)
andl $0x0, -0x8(%rdi)
movq 0x10e0(%rsp), %rax
movss (%rax,%rcx,4), %xmm11
movss 0x10(%rax,%rcx,4), %xmm12
movss 0x20(%rax,%rcx,4), %xmm13
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xf0(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xe0(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xd0(%rsp)
movss 0x60(%rax,%rcx,4), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r11
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %r11, %r13
xorq $0x10, %r13
movq %rbx, %r14
xorq $0x10, %r14
movq %r15, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq %rcx, 0x8(%rsp)
movaps %xmm11, 0xc0(%rsp)
movaps %xmm12, 0x30(%rsp)
movaps %xmm13, 0x40(%rsp)
movaps %xmm8, 0xb0(%rsp)
movaps %xmm9, 0xa0(%rsp)
movaps %xmm10, 0x90(%rsp)
movq %r11, (%rsp)
movaps %xmm15, 0x80(%rsp)
movq %r9, 0x68(%rsp)
leaq 0x160(%rsp), %rax
cmpq %rax, %rdi
je 0x19e3a0
movss -0x8(%rdi), %xmm0
addq $-0x10, %rdi
ucomiss 0x80(%rsi,%r10,4), %xmm0
ja 0x19dc2d
movq (%rdi), %r8
testb $0x8, %r8b
jne 0x19dd51
movss 0x70(%rsi,%r10,4), %xmm2
movl %r8d, %ecx
andl $0x7, %ecx
movq %r8, %rax
andq $-0x10, %rax
cmpq $0x3, %rcx
je 0x19dda7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps 0x80(%rax,%r11), %xmm0
mulps %xmm2, %xmm0
addps 0x20(%rax,%r11), %xmm0
subps %xmm11, %xmm0
movaps 0x80(%rax,%rbx), %xmm3
mulps %xmm2, %xmm3
addps 0x20(%rax,%rbx), %xmm3
mulps %xmm8, %xmm0
subps %xmm12, %xmm3
mulps %xmm9, %xmm3
movaps 0x80(%rax,%r15), %xmm4
mulps %xmm2, %xmm4
addps 0x20(%rax,%r15), %xmm4
subps %xmm13, %xmm4
mulps %xmm10, %xmm4
maxps %xmm4, %xmm3
movaps 0x80(%rax,%r13), %xmm5
mulps %xmm2, %xmm5
addps 0x20(%rax,%r13), %xmm5
subps %xmm11, %xmm5
mulps %xmm8, %xmm5
movaps 0x80(%rax,%r14), %xmm6
mulps %xmm2, %xmm6
addps 0x20(%rax,%r14), %xmm6
subps %xmm12, %xmm6
mulps %xmm9, %xmm6
movaps 0x80(%rax,%r12), %xmm4
mulps %xmm2, %xmm4
addps 0x20(%rax,%r12), %xmm4
subps %xmm13, %xmm4
mulps %xmm10, %xmm4
minps %xmm4, %xmm6
movaps %xmm15, %xmm4
maxps %xmm0, %xmm4
maxps %xmm3, %xmm4
movaps %xmm1, %xmm0
minps %xmm5, %xmm0
minps %xmm6, %xmm0
cmpl $0x6, %ecx
je 0x19e0fa
movaps %xmm4, %xmm2
cmpleps %xmm0, %xmm2
pslld $0x1f, %xmm2
movmskps %xmm2, %ebp
movaps %xmm4, 0x20(%rsp)
testb $0x8, %r8b
jne 0x19dda0
testq %rbp, %rbp
je 0x19e0f2
andq $-0x10, %r8
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r9
xorl %eax, %eax
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rbp, %r9
jne 0x19e11f
movq %rdx, %r8
testl %eax, %eax
je 0x19dc55
jmp 0x19e30a
pushq $0x6
jmp 0x19e0f4
movaps 0x20(%rax), %xmm11
movaps %xmm11, 0x130(%rsp)
movaps 0x30(%rax), %xmm9
movaps %xmm9, 0x120(%rsp)
movaps 0x50(%rax), %xmm8
movaps %xmm8, 0x100(%rsp)
movaps 0x60(%rax), %xmm15
movaps %xmm15, 0x110(%rsp)
movaps 0x70(%rax), %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x80(%rax), %xmm13
movaps 0x90(%rax), %xmm12
movaps 0xa0(%rax), %xmm0
movaps %xmm0, 0x70(%rsp)
movss 0x1d4e908(%rip), %xmm7 # 0x1eec714
subss %xmm2, %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm7, %xmm10
movaps 0xe0(%rax), %xmm0
mulps %xmm2, %xmm0
movaps %xmm0, %xmm4
movaps 0xf0(%rax), %xmm0
mulps %xmm2, %xmm0
movaps %xmm0, %xmm3
movaps 0x100(%rax), %xmm0
mulps %xmm2, %xmm0
mulss 0x1d4dbe0(%rip), %xmm7 # 0x1eeba24
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
addps %xmm7, %xmm4
movaps %xmm4, 0x140(%rsp)
addps %xmm7, %xmm3
movaps %xmm3, 0x150(%rsp)
addps %xmm0, %xmm7
movaps 0xd0(%rsp), %xmm14
movaps %xmm14, %xmm0
mulps %xmm13, %xmm0
movaps %xmm14, %xmm3
mulps %xmm12, %xmm3
movaps 0xe0(%rsp), %xmm6
movaps %xmm6, %xmm5
mulps %xmm8, %xmm5
addps %xmm0, %xmm5
movaps %xmm6, %xmm0
mulps %xmm15, %xmm0
addps %xmm3, %xmm0
movaps 0xf0(%rsp), %xmm3
movaps %xmm3, %xmm4
mulps %xmm11, %xmm4
addps %xmm5, %xmm4
movaps %xmm3, %xmm5
mulps %xmm9, %xmm5
addps %xmm0, %xmm5
movaps %xmm4, %xmm0
movaps 0x1d4e803(%rip), %xmm11 # 0x1eec6c0
andps %xmm11, %xmm0
movaps 0x1d53e77(%rip), %xmm8 # 0x1ef1d40
cmpltps %xmm8, %xmm0
blendvps %xmm0, %xmm8, %xmm4
movaps %xmm5, %xmm0
andps %xmm11, %xmm0
cmpltps %xmm8, %xmm0
blendvps %xmm0, %xmm8, %xmm5
movaps %xmm14, %xmm0
mulps 0x70(%rsp), %xmm0
mulps 0x10(%rsp), %xmm6
addps %xmm0, %xmm6
movaps 0x40(%rax), %xmm15
mulps %xmm15, %xmm3
addps %xmm6, %xmm3
movaps %xmm3, %xmm0
andps %xmm11, %xmm0
cmpltps %xmm8, %xmm0
blendvps %xmm0, %xmm8, %xmm3
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movaps 0x110(%rax), %xmm6
mulps %xmm2, %xmm6
movaps 0x120(%rax), %xmm14
mulps %xmm2, %xmm14
mulps 0x130(%rax), %xmm2
addps %xmm10, %xmm6
addps %xmm10, %xmm14
addps %xmm10, %xmm2
rcpps %xmm4, %xmm0
mulps %xmm0, %xmm4
movaps 0x1d4eabf(%rip), %xmm11 # 0x1eeca10
movaps %xmm11, %xmm10
subps %xmm4, %xmm10
mulps %xmm0, %xmm10
addps %xmm0, %xmm10
rcpps %xmm5, %xmm0
mulps %xmm0, %xmm5
movaps %xmm11, %xmm4
subps %xmm5, %xmm4
mulps %xmm0, %xmm4
addps %xmm0, %xmm4
rcpps %xmm3, %xmm5
mulps %xmm5, %xmm3
movaps %xmm11, %xmm0
subps %xmm3, %xmm0
mulps %xmm5, %xmm0
addps %xmm5, %xmm0
mulps 0x40(%rsp), %xmm13
addps 0xb0(%rax), %xmm13
movaps 0x100(%rsp), %xmm9
mulps 0x30(%rsp), %xmm9
addps %xmm13, %xmm9
movaps %xmm9, %xmm8
movaps 0x40(%rsp), %xmm13
mulps %xmm13, %xmm12
addps 0xc0(%rax), %xmm12
movaps 0x110(%rsp), %xmm9
mulps 0x30(%rsp), %xmm9
addps %xmm12, %xmm9
movaps %xmm9, %xmm5
movaps 0x30(%rsp), %xmm12
movaps 0x70(%rsp), %xmm3
mulps %xmm13, %xmm3
addps 0xd0(%rax), %xmm3
movaps %xmm3, %xmm9
movaps 0x10(%rsp), %xmm3
mulps %xmm12, %xmm3
addps %xmm9, %xmm3
movaps %xmm3, %xmm9
movaps 0xc0(%rsp), %xmm11
movaps 0x130(%rsp), %xmm3
mulps %xmm11, %xmm3
addps %xmm8, %xmm3
movaps %xmm3, %xmm8
movaps 0x120(%rsp), %xmm3
mulps %xmm11, %xmm3
addps %xmm5, %xmm3
movaps %xmm3, %xmm5
mulps %xmm11, %xmm15
addps %xmm9, %xmm15
movaps 0x140(%rsp), %xmm9
subps %xmm8, %xmm9
subps %xmm8, %xmm6
movaps 0x150(%rsp), %xmm3
subps %xmm5, %xmm3
subps %xmm5, %xmm14
subps %xmm15, %xmm7
subps %xmm15, %xmm2
movaps 0x80(%rsp), %xmm15
mulps %xmm10, %xmm9
mulps %xmm10, %xmm6
movaps 0x90(%rsp), %xmm10
mulps %xmm4, %xmm3
mulps %xmm0, %xmm7
mulps %xmm4, %xmm14
mulps %xmm0, %xmm2
movaps %xmm3, %xmm0
movaps %xmm3, %xmm4
pminsd %xmm14, %xmm0
movaps %xmm7, %xmm3
pminsd %xmm2, %xmm3
maxps %xmm3, %xmm0
movaps %xmm9, %xmm3
pminsd %xmm6, %xmm3
pmaxsd %xmm9, %xmm6
movaps 0xa0(%rsp), %xmm9
pmaxsd %xmm4, %xmm14
movaps 0xb0(%rsp), %xmm8
pmaxsd %xmm7, %xmm2
minps %xmm2, %xmm14
movaps %xmm15, %xmm2
maxps %xmm3, %xmm2
maxps %xmm0, %xmm2
movaps %xmm1, %xmm0
minps %xmm6, %xmm0
minps %xmm14, %xmm0
movaps %xmm2, 0x20(%rsp)
cmpleps %xmm0, %xmm2
movmskps %xmm2, %ebp
jmp 0x19dd51
pushq $0x4
popq %rax
jmp 0x19dd93
movaps %xmm4, %xmm3
cmpleps %xmm0, %xmm3
movaps 0xe0(%rax), %xmm0
cmpleps %xmm2, %xmm0
cmpltps 0xf0(%rax), %xmm2
andps %xmm0, %xmm2
andps %xmm3, %xmm2
jmp 0x19dd44
movq %rdi, 0x10(%rsp)
movl 0x20(%rsp,%rcx,4), %edi
bsfq %r9, %r10
leaq -0x1(%r9), %rcx
movq (%r8,%r10,8), %r11
prefetcht0 (%r11)
prefetcht0 0x40(%r11)
prefetcht0 0x80(%r11)
prefetcht0 0xc0(%r11)
movl 0x20(%rsp,%r10,4), %r10d
andq %r9, %rcx
jne 0x19e18f
movq 0x10(%rsp), %r8
leaq 0x10(%r8), %rcx
cmpl %r10d, %edi
jae 0x19e174
movq %r11, (%r8)
movl %r10d, 0x8(%r8)
movq %rcx, %rdi
movq %rdx, %r8
jmp 0x19e181
movq %rdx, (%r8)
movl %edi, 0x8(%r8)
movq %rcx, %rdi
movq %r11, %r8
movq 0x8(%rsp), %r10
movq (%rsp), %r11
jmp 0x19dd93
movq %rdx, %xmm2
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %r11, %xmm4
movd %r10d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rcx, %r9
leaq -0x1(%rcx), %rdx
movq (%r8,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm3
movd 0x20(%rsp,%r9,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rcx, %rdx
jne 0x19e247
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movq 0x10(%rsp), %rdi
movaps %xmm2, (%rdi)
movaps %xmm3, 0x10(%rdi)
movq %xmm4, %r8
addq $0x20, %rdi
jmp 0x19e181
bsfq %rdx, %rcx
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movq %rdx, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movd 0x20(%rsp,%rcx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movq 0x10(%rsp), %rdi
movaps %xmm2, (%rdi)
movaps %xmm5, 0x10(%rdi)
movaps %xmm4, 0x20(%rdi)
movq %xmm3, %r8
addq $0x30, %rdi
jmp 0x19e181
cmpl $0x6, %eax
jne 0x19dc2d
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x58(%rsp), %rcx
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq %rdi, 0x10(%rsp)
movq 0x60(%rsp), %rdi
movq %r10, %rdx
movq 0x10e8(%rsp), %rcx
callq *0x10(%r9,%rax)
movaps 0x80(%rsp), %xmm15
movq (%rsp), %r11
movaps 0x90(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm8
movaps 0x40(%rsp), %xmm13
movaps 0x30(%rsp), %xmm12
movaps 0xc0(%rsp), %xmm11
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %r10
movq 0x68(%rsp), %rsi
movss 0x80(%rsi,%r10,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
jmp 0x19dc2d
addq $0x10a8, %rsp # imm = 0x10A8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16781328, false, embree::sse42::VirtualCurveIntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8e8, %rsp # imm = 0x8E8
movq %r9, %rsi
movq %r8, 0x48(%rsp)
movq %rcx, %r10
movq %rdi, 0x38(%rsp)
movq 0x920(%rsp), %rax
leaq 0x148(%rsp), %r9
movq %rdx, -0x8(%r9)
movss (%rax,%rcx,4), %xmm6
movss 0x10(%rax,%rcx,4), %xmm12
movss 0x20(%rax,%rcx,4), %xmm7
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x130(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x120(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x110(%rsp)
movss 0x60(%rax,%rcx,4), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r11
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %r11, %r13
xorq $0x10, %r13
movq %rbx, %r14
xorq $0x10, %r14
movq %r15, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
leaq 0x140(%rsp), %rdx
movaps %xmm6, 0xb0(%rsp)
movaps %xmm12, 0x20(%rsp)
movaps %xmm7, (%rsp)
movaps %xmm8, 0xa0(%rsp)
movaps %xmm9, 0x90(%rsp)
movaps %xmm10, 0x80(%rsp)
movaps %xmm11, 0x70(%rsp)
movaps %xmm13, 0x60(%rsp)
movq %rsi, 0x50(%rsp)
movq %rcx, 0x40(%rsp)
movq %r11, 0x30(%rsp)
movq %r9, %rax
cmpq %rdx, %r9
je 0x19ea89
leaq -0x8(%rax), %r9
movq %rax, 0x58(%rsp)
movq -0x8(%rax), %r8
testb $0x8, %r8b
jne 0x19e608
movss 0x70(%rsi,%r10,4), %xmm1
movl %r8d, %ecx
andl $0x7, %ecx
movq %r8, %rax
andq $-0x10, %rax
cmpq $0x3, %rcx
je 0x19e65e
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps 0x80(%rax,%r11), %xmm0
mulps %xmm1, %xmm0
addps 0x20(%rax,%r11), %xmm0
subps %xmm6, %xmm0
movaps 0x80(%rax,%rbx), %xmm2
mulps %xmm1, %xmm2
addps 0x20(%rax,%rbx), %xmm2
mulps %xmm8, %xmm0
subps %xmm12, %xmm2
mulps %xmm9, %xmm2
movaps 0x80(%rax,%r15), %xmm3
mulps %xmm1, %xmm3
addps 0x20(%rax,%r15), %xmm3
subps %xmm7, %xmm3
mulps %xmm10, %xmm3
maxps %xmm3, %xmm2
movaps 0x80(%rax,%r13), %xmm3
mulps %xmm1, %xmm3
addps 0x20(%rax,%r13), %xmm3
subps %xmm6, %xmm3
mulps %xmm8, %xmm3
movaps 0x80(%rax,%r14), %xmm5
mulps %xmm1, %xmm5
addps 0x20(%rax,%r14), %xmm5
subps %xmm12, %xmm5
mulps %xmm9, %xmm5
movaps 0x80(%rax,%r12), %xmm4
mulps %xmm1, %xmm4
addps 0x20(%rax,%r12), %xmm4
subps %xmm7, %xmm4
mulps %xmm10, %xmm4
minps %xmm4, %xmm5
movaps %xmm11, %xmm4
maxps %xmm0, %xmm4
maxps %xmm2, %xmm4
movaps %xmm13, %xmm0
minps %xmm3, %xmm0
minps %xmm5, %xmm0
cmpleps %xmm0, %xmm4
cmpl $0x6, %ecx
je 0x19e96e
pslld $0x1f, %xmm4
movmskps %xmm4, %ebp
testb $0x8, %r8b
jne 0x19e657
testq %rbp, %rbp
je 0x19e966
andq $-0x10, %r8
bsfq %rbp, %rax
leaq -0x1(%rbp), %rdi
xorl %ecx, %ecx
movq (%r8,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbp, %rdi
jne 0x19e98c
movq %rax, %r8
testl %ecx, %ecx
je 0x19e517
jmp 0x19e9da
pushq $0x6
jmp 0x19e968
movaps 0x50(%rax), %xmm13
movaps %xmm13, 0x10(%rsp)
movaps 0x60(%rax), %xmm12
movaps %xmm12, 0xc0(%rsp)
movaps 0x70(%rax), %xmm4
movaps %xmm4, 0xd0(%rsp)
movaps 0x80(%rax), %xmm10
movaps 0x90(%rax), %xmm6
movaps %xmm6, 0xf0(%rsp)
movaps 0xa0(%rax), %xmm5
movaps %xmm5, 0xe0(%rsp)
movss 0x1d4e063(%rip), %xmm7 # 0x1eec714
subss %xmm1, %xmm7
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm7, %xmm0
shufps $0x0, %xmm7, %xmm0 # xmm0 = xmm0[0,0],xmm7[0,0]
movaps 0xe0(%rax), %xmm11
mulps %xmm1, %xmm11
movaps 0xf0(%rax), %xmm2
mulps %xmm1, %xmm2
movaps %xmm2, %xmm3
movaps 0x100(%rax), %xmm2
mulps %xmm1, %xmm2
mulss 0x1d4d339(%rip), %xmm7 # 0x1eeba24
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
addps %xmm7, %xmm11
addps %xmm7, %xmm3
movaps %xmm3, 0x100(%rsp)
addps %xmm2, %xmm7
movaps 0x110(%rax), %xmm9
mulps %xmm1, %xmm9
movaps 0x120(%rax), %xmm8
mulps %xmm1, %xmm8
mulps 0x130(%rax), %xmm1
addps %xmm0, %xmm9
addps %xmm0, %xmm8
addps %xmm0, %xmm1
movaps 0x110(%rsp), %xmm3
movaps %xmm3, %xmm0
mulps %xmm10, %xmm0
movaps %xmm3, %xmm2
mulps %xmm6, %xmm2
mulps %xmm5, %xmm3
movaps 0x120(%rsp), %xmm6
movaps %xmm6, %xmm5
mulps %xmm13, %xmm5
addps %xmm0, %xmm5
movaps %xmm6, %xmm0
mulps %xmm12, %xmm0
addps %xmm2, %xmm0
mulps %xmm4, %xmm6
addps %xmm3, %xmm6
movaps 0x20(%rax), %xmm13
movaps 0x130(%rsp), %xmm3
movaps %xmm3, %xmm4
mulps %xmm13, %xmm4
addps %xmm5, %xmm4
movaps 0x30(%rax), %xmm14
movaps %xmm3, %xmm2
mulps %xmm14, %xmm2
addps %xmm0, %xmm2
movaps 0x40(%rax), %xmm15
mulps %xmm15, %xmm3
addps %xmm6, %xmm3
movaps 0xb0(%rsp), %xmm6
movaps %xmm4, %xmm0
movaps 0x1d4df17(%rip), %xmm5 # 0x1eec6c0
andps %xmm5, %xmm0
movaps 0x1d5358c(%rip), %xmm12 # 0x1ef1d40
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm4
movaps %xmm2, %xmm0
andps %xmm5, %xmm0
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm2
movaps %xmm3, %xmm0
andps %xmm5, %xmm0
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm3
rcpps %xmm4, %xmm0
mulps %xmm0, %xmm4
movaps 0x1d4e221(%rip), %xmm12 # 0x1eeca10
movaps %xmm12, %xmm5
subps %xmm4, %xmm5
mulps %xmm0, %xmm5
addps %xmm0, %xmm5
rcpps %xmm2, %xmm0
mulps %xmm0, %xmm2
movaps %xmm12, %xmm4
subps %xmm2, %xmm4
mulps %xmm0, %xmm4
addps %xmm0, %xmm4
rcpps %xmm3, %xmm2
mulps %xmm2, %xmm3
movaps %xmm12, %xmm0
subps %xmm3, %xmm0
mulps %xmm2, %xmm0
addps %xmm2, %xmm0
mulps (%rsp), %xmm10
addps 0xb0(%rax), %xmm10
movaps 0x10(%rsp), %xmm2
mulps 0x20(%rsp), %xmm2
addps %xmm10, %xmm2
movaps %xmm2, 0x10(%rsp)
movaps 0x20(%rsp), %xmm12
movaps 0xf0(%rsp), %xmm10
mulps (%rsp), %xmm10
addps 0xc0(%rax), %xmm10
movaps 0xe0(%rsp), %xmm3
mulps (%rsp), %xmm3
addps 0xd0(%rax), %xmm3
movaps 0xc0(%rsp), %xmm2
mulps %xmm12, %xmm2
addps %xmm10, %xmm2
movaps %xmm2, %xmm10
movaps 0xd0(%rsp), %xmm2
mulps %xmm12, %xmm2
addps %xmm3, %xmm2
mulps %xmm6, %xmm13
addps 0x10(%rsp), %xmm13
mulps %xmm6, %xmm14
addps %xmm10, %xmm14
mulps %xmm6, %xmm15
addps %xmm2, %xmm15
subps %xmm13, %xmm11
subps %xmm13, %xmm9
movaps 0x60(%rsp), %xmm13
movaps 0x100(%rsp), %xmm2
subps %xmm14, %xmm2
subps %xmm14, %xmm8
subps %xmm15, %xmm7
subps %xmm15, %xmm1
mulps %xmm5, %xmm11
mulps %xmm5, %xmm9
mulps %xmm4, %xmm2
mulps %xmm0, %xmm7
mulps %xmm4, %xmm8
mulps %xmm0, %xmm1
movaps %xmm2, %xmm0
movaps %xmm2, %xmm3
pminsd %xmm8, %xmm0
movaps %xmm7, %xmm2
pminsd %xmm1, %xmm2
maxps %xmm2, %xmm0
movaps %xmm11, %xmm2
pminsd %xmm9, %xmm2
pmaxsd %xmm11, %xmm9
movaps 0x70(%rsp), %xmm11
pmaxsd %xmm3, %xmm8
movaps 0x80(%rsp), %xmm10
pmaxsd %xmm7, %xmm1
movaps (%rsp), %xmm7
minps %xmm1, %xmm8
movaps %xmm11, %xmm1
maxps %xmm2, %xmm1
maxps %xmm0, %xmm1
movaps %xmm13, %xmm0
minps %xmm9, %xmm0
movaps 0x90(%rsp), %xmm9
minps %xmm8, %xmm0
movaps 0xa0(%rsp), %xmm8
cmpleps %xmm0, %xmm1
movmskps %xmm1, %ebp
jmp 0x19e608
pushq $0x4
popq %rcx
jmp 0x19e64a
movaps 0xe0(%rax), %xmm0
cmpleps %xmm1, %xmm0
cmpltps 0xf0(%rax), %xmm1
andps %xmm0, %xmm1
andps %xmm1, %xmm4
jmp 0x19e600
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %rdx
leaq -0x1(%rdi), %rax
movq (%r8,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rdi, %rax
je 0x19e9ca
movq %rdx, (%r9)
addq $0x8, %r9
bsfq %rax, %rdx
leaq -0x1(%rax), %rdi
jmp 0x19e99b
movq %rdx, %r8
leaq 0x140(%rsp), %rdx
jmp 0x19e64a
cmpl $0x6, %ecx
jne 0x19ea7b
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x38(%rsp), %rcx
movq %r9, 0x10(%rsp)
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq 0x48(%rsp), %rdi
movq %r10, %rdx
movq 0x928(%rsp), %rcx
callq *0x18(%r9,%rax)
leaq 0x140(%rsp), %rdx
movaps 0x60(%rsp), %xmm13
movaps 0x70(%rsp), %xmm11
movq 0x30(%rsp), %r11
movaps 0x80(%rsp), %xmm10
movaps 0x90(%rsp), %xmm9
movaps 0xa0(%rsp), %xmm8
movq 0x10(%rsp), %r9
movaps (%rsp), %xmm7
movaps 0x20(%rsp), %xmm12
movaps 0xb0(%rsp), %xmm6
movq 0x40(%rsp), %r10
movq 0x50(%rsp), %rsi
xorl %ecx, %ecx
testb %al, %al
je 0x19ea7b
movl $0xff800000, 0x80(%rsi,%r10,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
testb $0x3, %cl
movq 0x58(%rsp), %rax
je 0x19e4fe
cmpq %rdx, %rax
setne %al
addq $0x8e8, %rsp # imm = 0x8E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 257, true, embree::sse42::VirtualCurveIntersectorK<4>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1078, %rsp # imm = 0x1078
movq %r9, %rsi
movq %r8, 0x78(%rsp)
movq %rcx, %r15
movq %rdi, 0x70(%rsp)
movq 0x10b0(%rsp), %rax
leaq 0x140(%rsp), %rbp
movq %rdx, -0x10(%rbp)
andl $0x0, -0x8(%rbp)
movss (%rax,%rcx,4), %xmm15
movss 0x10(%rax,%rcx,4), %xmm13
movss 0x20(%rax,%rcx,4), %xmm14
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x110(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x100(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xf0(%rsp)
movss 0x60(%rax,%rcx,4), %xmm8
movss 0x70(%rax,%rcx,4), %xmm9
movss 0x80(%rax,%rcx,4), %xmm10
movss 0x1d813bb(%rip), %xmm11 # 0x1f1ff10
movaps %xmm8, %xmm12
mulss %xmm11, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movaps %xmm9, %xmm6
mulss %xmm11, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulss %xmm10, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x1d81392(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulss %xmm0, %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulss %xmm0, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r13
movslq 0xa0(%rax,%rcx,4), %rdi
movslq 0xb0(%rax,%rcx,4), %r10
movq %r13, %r11
xorq $0x10, %r11
movq %rdi, %r14
xorq $0x10, %r14
movq %r10, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm13, 0xe0(%rsp)
movaps %xmm14, 0xd0(%rsp)
movq %rdi, 0x18(%rsp)
movq %r10, 0x10(%rsp)
movq %r11, 0x8(%rsp)
movaps %xmm7, 0x40(%rsp)
movaps %xmm15, 0x120(%rsp)
movaps %xmm8, 0xc0(%rsp)
movaps %xmm9, 0xb0(%rsp)
movaps %xmm10, 0xa0(%rsp)
movaps %xmm11, 0x90(%rsp)
movaps %xmm12, 0x80(%rsp)
movaps %xmm6, 0x50(%rsp)
movq %r14, 0x68(%rsp)
leaq 0x130(%rsp), %rax
cmpq %rax, %rbp
je 0x19f272
movss -0x8(%rbp), %xmm0
addq $-0x10, %rbp
ucomiss 0x80(%rsi,%r15,4), %xmm0
ja 0x19ec4d
movq (%rbp), %r8
movq %r8, %rax
andq $0xf, %rax
jne 0x19ed3b
movaps 0x20(%r8,%r13), %xmm0
subps %xmm15, %xmm0
mulps %xmm12, %xmm0
movaps 0x20(%r8,%rdi), %xmm2
subps %xmm13, %xmm2
mulps %xmm6, %xmm2
maxps %xmm2, %xmm0
movaps 0x20(%r8,%r10), %xmm2
subps %xmm14, %xmm2
mulps %xmm11, %xmm2
movaps 0x20(%r8,%r11), %xmm3
subps %xmm15, %xmm3
mulps %xmm8, %xmm3
movaps 0x20(%r8,%r14), %xmm4
subps %xmm13, %xmm4
mulps %xmm9, %xmm4
minps %xmm4, %xmm3
movaps 0x20(%r8,%r12), %xmm4
subps %xmm14, %xmm4
mulps %xmm10, %xmm4
maxps %xmm7, %xmm2
maxps %xmm2, %xmm0
minps %xmm1, %xmm4
minps %xmm4, %xmm3
movaps %xmm0, 0x20(%rsp)
cmpleps %xmm3, %xmm0
movmskps %xmm0, %ebx
movb $0x1, %al
testb %al, %al
je 0x19ed44
testq %rbx, %rbx
je 0x19ed48
andq $-0x10, %r8
bsfq %rbx, %rcx
leaq -0x1(%rbx), %r9
xorl %eax, %eax
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rbx, %r9
jne 0x19ed4d
movq %rdx, %r8
testl %eax, %eax
je 0x19ec76
jmp 0x19f1af
cmpl $0x2, %eax
je 0x19edbe
xorl %eax, %eax
jmp 0x19ecf6
pushq $0x6
jmp 0x19ed4a
pushq $0x4
popq %rax
jmp 0x19ed2e
movl 0x20(%rsp,%rcx,4), %edi
bsfq %r9, %r10
leaq -0x1(%r9), %rcx
movq (%r8,%r10,8), %r11
prefetcht0 (%r11)
prefetcht0 0x40(%r11)
prefetcht0 0x80(%r11)
prefetcht0 0xc0(%r11)
movl 0x20(%rsp,%r10,4), %r10d
andq %r9, %rcx
jne 0x19f032
leaq 0x10(%rbp), %rcx
cmpl %r10d, %edi
jae 0x19ed9d
movq %r11, (%rbp)
movl %r10d, 0x8(%rbp)
movq %rcx, %rbp
movq %rdx, %r8
jmp 0x19edaa
movq %rdx, (%rbp)
movl %edi, 0x8(%rbp)
movq %rcx, %rbp
movq %r11, %r8
movq 0x18(%rsp), %rdi
movq 0x10(%rsp), %r10
movq 0x8(%rsp), %r11
jmp 0x19ed2e
movq %r8, %rax
andq $-0x10, %rax
movaps 0x80(%rax), %xmm10
movaps 0xf0(%rsp), %xmm11
movaps %xmm11, %xmm0
mulps %xmm10, %xmm0
movaps 0x90(%rax), %xmm8
movaps %xmm11, %xmm5
mulps %xmm8, %xmm5
movaps 0xa0(%rax), %xmm7
mulps %xmm7, %xmm11
movaps 0x20(%rax), %xmm4
movaps 0x30(%rax), %xmm3
movaps 0x40(%rax), %xmm2
movaps 0x50(%rax), %xmm9
movaps 0x100(%rsp), %xmm14
movaps %xmm14, %xmm12
mulps %xmm9, %xmm12
addps %xmm0, %xmm12
movaps 0x60(%rax), %xmm6
movaps %xmm14, %xmm0
mulps %xmm6, %xmm0
addps %xmm5, %xmm0
movaps 0x70(%rax), %xmm5
movaps %xmm5, 0x30(%rsp)
mulps %xmm5, %xmm14
addps %xmm11, %xmm14
movaps 0x110(%rsp), %xmm13
movaps %xmm13, %xmm11
mulps %xmm4, %xmm11
addps %xmm12, %xmm11
movaps %xmm13, %xmm12
mulps %xmm3, %xmm12
addps %xmm0, %xmm12
mulps %xmm2, %xmm13
addps %xmm14, %xmm13
movaps %xmm11, %xmm0
movaps 0x1d4d84d(%rip), %xmm14 # 0x1eec6c0
andps %xmm14, %xmm0
movaps 0x1d52ec2(%rip), %xmm5 # 0x1ef1d40
cmpltps %xmm5, %xmm0
blendvps %xmm0, %xmm5, %xmm11
movaps %xmm12, %xmm0
andps %xmm14, %xmm0
cmpltps %xmm5, %xmm0
blendvps %xmm0, %xmm5, %xmm12
movaps %xmm13, %xmm0
andps %xmm14, %xmm0
cmpltps %xmm5, %xmm0
blendvps %xmm0, %xmm5, %xmm13
rcpps %xmm11, %xmm14
mulps %xmm14, %xmm11
movaps 0x1d4db55(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm0
subps %xmm11, %xmm0
mulps %xmm14, %xmm0
addps %xmm14, %xmm0
rcpps %xmm12, %xmm14
mulps %xmm14, %xmm12
movaps %xmm5, %xmm11
subps %xmm12, %xmm11
mulps %xmm14, %xmm11
addps %xmm14, %xmm11
rcpps %xmm13, %xmm14
mulps %xmm14, %xmm13
movaps %xmm5, %xmm12
subps %xmm13, %xmm12
movaps 0xe0(%rsp), %xmm13
mulps %xmm14, %xmm12
addps %xmm14, %xmm12
movaps 0xd0(%rsp), %xmm14
mulps %xmm14, %xmm10
addps 0xb0(%rax), %xmm10
mulps %xmm13, %xmm9
addps %xmm10, %xmm9
movaps 0xa0(%rsp), %xmm10
mulps %xmm14, %xmm8
addps 0xc0(%rax), %xmm8
mulps %xmm14, %xmm7
addps 0xd0(%rax), %xmm7
mulps %xmm13, %xmm6
addps %xmm8, %xmm6
movaps 0xc0(%rsp), %xmm8
movaps 0x30(%rsp), %xmm5
mulps %xmm13, %xmm5
addps %xmm7, %xmm5
movaps %xmm5, 0x30(%rsp)
mulps %xmm15, %xmm4
addps %xmm9, %xmm4
movaps 0xb0(%rsp), %xmm9
movaps %xmm0, %xmm7
movaps 0x1d4d753(%rip), %xmm5 # 0x1eec6d0
xorps %xmm5, %xmm7
mulps %xmm7, %xmm4
movaps 0x40(%rsp), %xmm7
mulps %xmm15, %xmm3
addps %xmm6, %xmm3
movaps %xmm11, %xmm6
xorps %xmm5, %xmm6
mulps %xmm6, %xmm3
movaps %xmm12, %xmm6
xorps %xmm5, %xmm6
mulps %xmm15, %xmm2
addps 0x30(%rsp), %xmm2
mulps %xmm6, %xmm2
addps %xmm4, %xmm0
addps %xmm3, %xmm11
addps %xmm2, %xmm12
movaps %xmm3, %xmm5
pminsd %xmm11, %xmm5
movaps %xmm2, %xmm6
pminsd %xmm12, %xmm6
maxps %xmm6, %xmm5
movaps %xmm4, %xmm6
pminsd %xmm0, %xmm6
pmaxsd %xmm4, %xmm0
pmaxsd %xmm3, %xmm11
pmaxsd %xmm2, %xmm12
minps %xmm12, %xmm11
movaps 0x80(%rsp), %xmm12
movaps %xmm7, %xmm2
maxps %xmm6, %xmm2
movaps 0x50(%rsp), %xmm6
maxps %xmm5, %xmm2
movaps %xmm1, %xmm3
minps %xmm0, %xmm3
minps %xmm11, %xmm3
movaps 0x90(%rsp), %xmm11
mulps 0x1d52d66(%rip), %xmm2 # 0x1ef1d80
mulps 0x1d52d6f(%rip), %xmm3 # 0x1ef1d90
movaps %xmm2, 0x20(%rsp)
cmpleps %xmm3, %xmm2
movmskps %xmm2, %ebx
jmp 0x19ecf4
movq %rdx, %xmm2
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %r11, %xmm4
movd %r10d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rcx, %r9
leaq -0x1(%rcx), %rdx
movq (%r8,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm3
movd 0x20(%rsp,%r9,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rcx, %rdx
jne 0x19f0e6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movaps %xmm2, (%rbp)
movaps %xmm3, 0x10(%rbp)
movq %xmm4, %r8
addq $0x20, %rbp
jmp 0x19edaa
bsfq %rdx, %rcx
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movq %rdx, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movd 0x20(%rsp,%rcx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movaps 0x40(%rsp), %xmm7
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movaps 0x50(%rsp), %xmm6
movaps %xmm2, (%rbp)
movaps %xmm5, 0x10(%rbp)
movaps %xmm4, 0x20(%rbp)
movq %xmm3, %r8
addq $0x30, %rbp
jmp 0x19edaa
cmpl $0x6, %eax
jne 0x19ec4d
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x70(%rsp), %rcx
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq 0x78(%rsp), %rdi
movq %r15, %rdx
movq 0x10b8(%rsp), %rcx
movq %r13, %r14
movq %rbp, %r13
movq %r15, %rbp
movq %rsi, %r15
callq *0x10(%r9,%rax)
movaps 0x40(%rsp), %xmm7
movq 0x8(%rsp), %r11
movq 0x10(%rsp), %r10
movq 0x18(%rsp), %rdi
movaps 0x50(%rsp), %xmm6
movaps 0x80(%rsp), %xmm12
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm14
movaps 0xe0(%rsp), %xmm13
movaps 0x120(%rsp), %xmm15
movq %r15, %rsi
movq %rbp, %r15
movq %r13, %rbp
movq %r14, %r13
movq 0x68(%rsp), %r14
movss 0x80(%rsi,%r15,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
jmp 0x19ec4d
addq $0x1078, %rsp # imm = 0x1078
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 257, true, embree::sse42::VirtualCurveIntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8c8, %rsp # imm = 0x8C8
movq %r9, 0x18(%rsp)
movq %r8, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
movq 0x900(%rsp), %rax
leaq 0x128(%rsp), %rdi
movq %rdx, -0x8(%rdi)
movss (%rax,%rcx,4), %xmm14
movss 0x10(%rax,%rcx,4), %xmm15
movss 0x20(%rax,%rcx,4), %xmm12
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xf0(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xe0(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0xd0(%rsp)
movss 0x60(%rax,%rcx,4), %xmm4
movss 0x70(%rax,%rcx,4), %xmm5
movss 0x80(%rax,%rcx,4), %xmm6
movss 0x1d80be2(%rip), %xmm7 # 0x1f1ff10
movaps %xmm4, %xmm8
mulss %xmm7, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movaps %xmm5, %xmm9
mulss %xmm7, %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulss %xmm6, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0x1d80bba(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulss %xmm0, %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulss %xmm0, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r9
movslq 0xa0(%rax,%rcx,4), %r15
movslq 0xb0(%rax,%rcx,4), %rbp
movq %r9, %r13
xorq $0x10, %r13
movq %r15, %r14
xorq $0x10, %r14
movq %rbp, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movq %rcx, 0x10(%rsp)
movss 0xd0(%rax,%rcx,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
leaq 0x120(%rsp), %r10
movaps %xmm12, 0xc0(%rsp)
movaps %xmm4, 0xb0(%rsp)
movaps %xmm5, 0xa0(%rsp)
movaps %xmm6, 0x90(%rsp)
movaps %xmm7, 0x80(%rsp)
movaps %xmm8, 0x70(%rsp)
movaps %xmm9, 0x60(%rsp)
movaps %xmm11, 0x50(%rsp)
movaps %xmm13, 0x40(%rsp)
movaps %xmm14, 0x110(%rsp)
movaps %xmm15, 0x100(%rsp)
movq %r9, 0x20(%rsp)
movq %rdi, %r11
cmpq %r10, %rdi
je 0x19f883
leaq -0x8(%r11), %rdi
movq -0x8(%r11), %r8
movq %r8, %rax
andq $0xf, %rax
jne 0x19f4f5
movaps 0x20(%r8,%r9), %xmm0
subps %xmm14, %xmm0
mulps %xmm8, %xmm0
movaps 0x20(%r8,%r15), %xmm1
subps %xmm15, %xmm1
mulps %xmm9, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%r8,%rbp), %xmm1
subps %xmm12, %xmm1
mulps %xmm7, %xmm1
movaps 0x20(%r8,%r13), %xmm2
subps %xmm14, %xmm2
mulps %xmm4, %xmm2
movaps 0x20(%r8,%r14), %xmm3
subps %xmm15, %xmm3
mulps %xmm5, %xmm3
minps %xmm3, %xmm2
movaps 0x20(%r8,%r12), %xmm3
subps %xmm12, %xmm3
mulps %xmm6, %xmm3
maxps %xmm11, %xmm1
maxps %xmm1, %xmm0
minps %xmm13, %xmm3
minps %xmm3, %xmm2
cmpleps %xmm2, %xmm0
movmskps %xmm0, %ebx
movb $0x1, %al
testb %al, %al
je 0x19f4fe
testq %rbx, %rbx
je 0x19f502
andq $-0x10, %r8
bsfq %rbx, %rax
leaq -0x1(%rbx), %rsi
xorl %ecx, %ecx
movq (%r8,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rsi
jne 0x19f507
movq %rax, %r8
testl %ecx, %ecx
je 0x19f436
jmp 0x19f7b1
cmpl $0x2, %eax
je 0x19f54a
xorl %eax, %eax
jmp 0x19f4b0
pushq $0x6
jmp 0x19f504
pushq $0x4
popq %rcx
jmp 0x19f4e8
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rax
movq (%r8,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %rax
je 0x19f545
movq %rdx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rdx
leaq -0x1(%rax), %rsi
jmp 0x19f516
movq %rdx, %r8
jmp 0x19f4e8
movq %r8, %rax
andq $-0x10, %rax
movaps 0x80(%rax), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps %xmm10, %xmm0
mulps %xmm9, %xmm0
movaps 0x90(%rax), %xmm8
movaps %xmm10, %xmm4
mulps %xmm8, %xmm4
movaps 0xa0(%rax), %xmm6
mulps %xmm6, %xmm10
movaps 0x20(%rax), %xmm3
movaps 0x30(%rax), %xmm2
movaps 0x40(%rax), %xmm1
movaps 0x50(%rax), %xmm7
movaps 0xe0(%rsp), %xmm13
movaps %xmm13, %xmm11
mulps %xmm7, %xmm11
addps %xmm0, %xmm11
movaps 0x60(%rax), %xmm5
movaps %xmm13, %xmm0
mulps %xmm5, %xmm0
addps %xmm4, %xmm0
movaps 0x70(%rax), %xmm4
movaps %xmm4, (%rsp)
mulps %xmm4, %xmm13
addps %xmm10, %xmm13
movaps 0xf0(%rsp), %xmm12
movaps %xmm12, %xmm10
mulps %xmm3, %xmm10
addps %xmm11, %xmm10
movaps %xmm12, %xmm11
mulps %xmm2, %xmm11
addps %xmm0, %xmm11
mulps %xmm1, %xmm12
movaps %xmm10, %xmm0
movaps 0x1d4d0c8(%rip), %xmm4 # 0x1eec6c0
andps %xmm4, %xmm0
movaps 0x1d5273e(%rip), %xmm4 # 0x1ef1d40
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm10
movaps %xmm11, %xmm0
andps 0x1d4d0a9(%rip), %xmm0 # 0x1eec6c0
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm11
addps %xmm13, %xmm12
movaps %xmm12, %xmm0
andps 0x1d4d090(%rip), %xmm0 # 0x1eec6c0
cmpltps %xmm4, %xmm0
blendvps %xmm0, %xmm4, %xmm12
rcpps %xmm10, %xmm13
mulps %xmm13, %xmm10
movaps 0x1d4d3c7(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm0
subps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm13, %xmm0
rcpps %xmm11, %xmm13
mulps %xmm13, %xmm11
movaps %xmm4, %xmm10
subps %xmm11, %xmm10
mulps %xmm13, %xmm10
addps %xmm13, %xmm10
rcpps %xmm12, %xmm13
mulps %xmm13, %xmm12
movaps %xmm4, %xmm11
subps %xmm12, %xmm11
movaps 0xc0(%rsp), %xmm12
mulps %xmm13, %xmm11
addps %xmm13, %xmm11
movaps 0x40(%rsp), %xmm13
mulps %xmm12, %xmm9
addps 0xb0(%rax), %xmm9
mulps %xmm12, %xmm8
addps 0xc0(%rax), %xmm8
mulps %xmm15, %xmm7
addps %xmm9, %xmm7
movaps 0x60(%rsp), %xmm9
mulps %xmm15, %xmm5
addps %xmm8, %xmm5
movaps 0x70(%rsp), %xmm8
mulps %xmm12, %xmm6
addps 0xd0(%rax), %xmm6
movaps (%rsp), %xmm4
mulps %xmm15, %xmm4
addps %xmm6, %xmm4
movaps %xmm4, (%rsp)
mulps %xmm14, %xmm3
addps %xmm7, %xmm3
movaps 0x80(%rsp), %xmm7
movaps %xmm0, %xmm6
movaps 0x1d4cfd2(%rip), %xmm4 # 0x1eec6d0
xorps %xmm4, %xmm6
mulps %xmm6, %xmm3
movaps 0x90(%rsp), %xmm6
mulps %xmm14, %xmm2
addps %xmm5, %xmm2
movaps %xmm10, %xmm5
xorps %xmm4, %xmm5
mulps %xmm5, %xmm2
movaps %xmm11, %xmm5
xorps %xmm4, %xmm5
mulps %xmm14, %xmm1
addps (%rsp), %xmm1
mulps %xmm5, %xmm1
addps %xmm3, %xmm0
addps %xmm2, %xmm10
addps %xmm1, %xmm11
movaps %xmm2, %xmm4
pminsd %xmm10, %xmm4
movaps %xmm1, %xmm5
pminsd %xmm11, %xmm5
maxps %xmm5, %xmm4
movaps %xmm3, %xmm5
pminsd %xmm0, %xmm5
pmaxsd %xmm3, %xmm0
pmaxsd %xmm2, %xmm10
pmaxsd %xmm1, %xmm11
minps %xmm11, %xmm10
movaps 0x50(%rsp), %xmm11
movaps %xmm11, %xmm1
maxps %xmm5, %xmm1
movaps 0xa0(%rsp), %xmm5
maxps %xmm4, %xmm1
movaps 0xb0(%rsp), %xmm4
movaps %xmm13, %xmm2
minps %xmm0, %xmm2
minps %xmm10, %xmm2
mulps 0x1d525e2(%rip), %xmm1 # 0x1ef1d80
mulps 0x1d525eb(%rip), %xmm2 # 0x1ef1d90
cmpleps %xmm2, %xmm1
movmskps %xmm1, %ebx
jmp 0x19f4ae
cmpl $0x6, %ecx
jne 0x19f87a
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x28(%rsp), %rcx
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq %rdi, (%rsp)
movq 0x30(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x10(%rsp), %rdx
movq 0x908(%rsp), %rcx
movq %r11, 0x38(%rsp)
callq *0x18(%r9,%rax)
movq 0x38(%rsp), %r11
leaq 0x120(%rsp), %r10
movaps 0x40(%rsp), %xmm13
movaps 0x50(%rsp), %xmm11
movq 0x20(%rsp), %r9
movaps 0x60(%rsp), %xmm9
movaps 0x70(%rsp), %xmm8
movaps 0x80(%rsp), %xmm7
movaps 0x90(%rsp), %xmm6
movaps 0xa0(%rsp), %xmm5
movaps 0xb0(%rsp), %xmm4
movq (%rsp), %rdi
movaps 0xc0(%rsp), %xmm12
movaps 0x100(%rsp), %xmm15
movaps 0x110(%rsp), %xmm14
xorl %ecx, %ecx
testb %al, %al
je 0x19f87a
movq 0x18(%rsp), %rax
movq 0x10(%rsp), %rcx
movl $0xff800000, 0x80(%rax,%rcx,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
testb $0x3, %cl
je 0x19f422
cmpq %r10, %r11
setne %al
addq $0x8c8, %rsp # imm = 0x8C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16781328, true, embree::sse42::VirtualCurveIntersectorK<4>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10d8, %rsp # imm = 0x10D8
movq %r9, %rsi
movq %r8, 0x80(%rsp)
movq %rcx, %r10
movq %rdi, 0x78(%rsp)
leaq 0x1a0(%rsp), %rdi
movq %rdx, -0x10(%rdi)
andl $0x0, -0x8(%rdi)
movq 0x1110(%rsp), %rax
movss (%rax,%rcx,4), %xmm8
movss 0x10(%rax,%rcx,4), %xmm14
movss 0x20(%rax,%rcx,4), %xmm9
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x120(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x110(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x100(%rsp)
movss 0x60(%rax,%rcx,4), %xmm10
movss 0x70(%rax,%rcx,4), %xmm11
movss 0x80(%rax,%rcx,4), %xmm12
movss 0x1d805be(%rip), %xmm13 # 0x1f1ff10
movaps %xmm10, %xmm15
mulss %xmm13, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movaps %xmm11, %xmm6
mulss %xmm13, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulss %xmm12, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movss 0x1d80595(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r11
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %r11, %r13
xorq $0x10, %r13
movq %rbx, %r14
xorq $0x10, %r14
movq %r15, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movq %rcx, 0x8(%rsp)
movaps %xmm8, 0xf0(%rsp)
movaps %xmm14, 0x60(%rsp)
movaps %xmm9, 0x20(%rsp)
movaps %xmm10, 0xe0(%rsp)
movaps %xmm11, 0xd0(%rsp)
movaps %xmm12, 0xc0(%rsp)
movaps %xmm13, 0xb0(%rsp)
movaps %xmm15, 0xa0(%rsp)
movaps %xmm6, 0x50(%rsp)
movq %r11, (%rsp)
movaps %xmm7, 0x40(%rsp)
movq %r9, 0x88(%rsp)
leaq 0x190(%rsp), %rax
cmpq %rax, %rdi
je 0x1a01f2
movss -0x8(%rdi), %xmm0
addq $-0x10, %rdi
ucomiss 0x80(%rsi,%r10,4), %xmm0
ja 0x19fa41
movq (%rdi), %r8
testb $0x8, %r8b
jne 0x19fb63
movss 0x70(%rsi,%r10,4), %xmm2
movl %r8d, %ecx
andl $0x7, %ecx
movq %r8, %rax
andq $-0x10, %rax
cmpq $0x3, %rcx
je 0x19fbb9
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps 0x80(%rax,%r11), %xmm3
mulps %xmm2, %xmm3
addps 0x20(%rax,%r11), %xmm3
subps %xmm8, %xmm3
movaps 0x80(%rax,%rbx), %xmm4
mulps %xmm2, %xmm4
addps 0x20(%rax,%rbx), %xmm4
mulps %xmm15, %xmm3
subps %xmm14, %xmm4
mulps %xmm6, %xmm4
movaps 0x80(%rax,%r15), %xmm0
mulps %xmm2, %xmm0
addps 0x20(%rax,%r15), %xmm0
subps %xmm9, %xmm0
mulps %xmm13, %xmm0
maxps %xmm0, %xmm4
movaps %xmm7, %xmm0
maxps %xmm3, %xmm0
movaps 0x80(%rax,%r13), %xmm5
mulps %xmm2, %xmm5
addps 0x20(%rax,%r13), %xmm5
maxps %xmm4, %xmm0
subps %xmm8, %xmm5
mulps %xmm10, %xmm5
movaps 0x80(%rax,%r14), %xmm4
mulps %xmm2, %xmm4
addps 0x20(%rax,%r14), %xmm4
subps %xmm14, %xmm4
movaps 0x80(%rax,%r12), %xmm3
mulps %xmm2, %xmm3
addps 0x20(%rax,%r12), %xmm3
mulps %xmm11, %xmm4
subps %xmm9, %xmm3
mulps %xmm12, %xmm3
minps %xmm3, %xmm4
movaps %xmm1, %xmm3
minps %xmm5, %xmm3
minps %xmm4, %xmm3
cmpl $0x6, %ecx
je 0x19ff29
movaps %xmm0, %xmm2
cmpleps %xmm3, %xmm2
pslld $0x1f, %xmm2
movmskps %xmm2, %ebp
movaps %xmm0, 0x30(%rsp)
testb $0x8, %r8b
jne 0x19fbb2
testq %rbp, %rbp
je 0x19ff21
andq $-0x10, %r8
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r9
xorl %eax, %eax
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rbp, %r9
jne 0x19ff4e
movq %rdx, %r8
testl %eax, %eax
je 0x19fa69
jmp 0x1a0143
pushq $0x6
jmp 0x19ff23
movaps 0x20(%rax), %xmm4
movaps %xmm4, 0x170(%rsp)
movaps 0x30(%rax), %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x50(%rax), %xmm6
movaps %xmm6, 0x150(%rsp)
movaps 0x60(%rax), %xmm8
movaps %xmm8, 0x160(%rsp)
movaps 0x80(%rax), %xmm10
movaps %xmm10, 0x130(%rsp)
movaps 0x90(%rax), %xmm5
movaps %xmm5, 0x140(%rsp)
movaps 0xa0(%rax), %xmm0
movaps %xmm0, 0x90(%rsp)
movss 0x1d4caf5(%rip), %xmm7 # 0x1eec714
subss %xmm2, %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm7, %xmm0
shufps $0x0, %xmm7, %xmm0 # xmm0 = xmm0[0,0],xmm7[0,0]
movaps 0xe0(%rax), %xmm12
mulps %xmm2, %xmm12
movaps 0xf0(%rax), %xmm11
mulps %xmm2, %xmm11
movaps 0x100(%rax), %xmm3
mulps %xmm2, %xmm3
mulss 0x1d4bdcc(%rip), %xmm7 # 0x1eeba24
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
addps %xmm7, %xmm12
addps %xmm7, %xmm11
addps %xmm3, %xmm7
movaps 0x110(%rax), %xmm3
mulps %xmm2, %xmm3
movaps 0x120(%rax), %xmm9
mulps %xmm2, %xmm9
mulps 0x130(%rax), %xmm2
addps %xmm0, %xmm3
movaps %xmm3, 0x180(%rsp)
addps %xmm0, %xmm9
addps %xmm0, %xmm2
movaps 0x100(%rsp), %xmm13
movaps %xmm13, %xmm0
mulps %xmm10, %xmm0
movaps %xmm13, %xmm3
mulps %xmm5, %xmm3
movaps 0x110(%rsp), %xmm15
movaps %xmm15, %xmm5
mulps %xmm6, %xmm5
addps %xmm0, %xmm5
movaps %xmm15, %xmm6
mulps %xmm8, %xmm6
addps %xmm3, %xmm6
movaps 0x120(%rsp), %xmm3
movaps %xmm3, %xmm8
mulps %xmm4, %xmm8
addps %xmm5, %xmm8
movaps %xmm8, %xmm0
movaps 0x1d4c9d4(%rip), %xmm14 # 0x1eec6c0
andps %xmm14, %xmm0
movaps 0x1d52048(%rip), %xmm10 # 0x1ef1d40
cmpltps %xmm10, %xmm0
blendvps %xmm0, %xmm10, %xmm8
movaps %xmm3, %xmm4
mulps 0x10(%rsp), %xmm4
addps %xmm6, %xmm4
movaps %xmm4, %xmm0
andps %xmm14, %xmm0
cmpltps %xmm10, %xmm0
blendvps %xmm0, %xmm10, %xmm4
movaps %xmm13, %xmm0
mulps 0x90(%rsp), %xmm0
movaps 0x70(%rax), %xmm13
movaps %xmm15, %xmm5
mulps %xmm13, %xmm5
addps %xmm0, %xmm5
movaps 0x40(%rax), %xmm15
mulps %xmm15, %xmm3
addps %xmm5, %xmm3
movaps %xmm3, %xmm0
andps %xmm14, %xmm0
cmpltps %xmm10, %xmm0
blendvps %xmm0, %xmm10, %xmm3
rcpps %xmm8, %xmm0
mulps %xmm0, %xmm8
movaps 0x1d4cca6(%rip), %xmm14 # 0x1eeca10
movaps %xmm14, %xmm6
subps %xmm8, %xmm6
movaps 0xf0(%rsp), %xmm8
mulps %xmm0, %xmm6
addps %xmm0, %xmm6
rcpps %xmm4, %xmm0
mulps %xmm0, %xmm4
movaps %xmm14, %xmm5
subps %xmm4, %xmm5
mulps %xmm0, %xmm5
addps %xmm0, %xmm5
rcpps %xmm3, %xmm4
mulps %xmm4, %xmm3
movaps %xmm14, %xmm0
subps %xmm3, %xmm0
mulps %xmm4, %xmm0
addps %xmm4, %xmm0
movaps 0x130(%rsp), %xmm3
mulps 0x20(%rsp), %xmm3
addps 0xb0(%rax), %xmm3
movaps %xmm3, %xmm4
movaps 0x140(%rsp), %xmm3
mulps 0x20(%rsp), %xmm3
addps 0xc0(%rax), %xmm3
movaps %xmm3, %xmm10
movaps 0x150(%rsp), %xmm3
mulps 0x60(%rsp), %xmm3
addps %xmm4, %xmm3
movaps %xmm3, %xmm4
movaps 0x60(%rsp), %xmm14
movaps 0x160(%rsp), %xmm3
mulps %xmm14, %xmm3
addps %xmm10, %xmm3
movaps %xmm3, %xmm10
movaps 0x90(%rsp), %xmm3
mulps 0x20(%rsp), %xmm3
addps 0xd0(%rax), %xmm3
mulps %xmm14, %xmm13
addps %xmm3, %xmm13
movaps 0x170(%rsp), %xmm3
mulps %xmm8, %xmm3
addps %xmm4, %xmm3
movaps %xmm3, %xmm4
movaps 0x10(%rsp), %xmm3
mulps %xmm8, %xmm3
addps %xmm10, %xmm3
mulps %xmm8, %xmm15
addps %xmm13, %xmm15
movaps 0xb0(%rsp), %xmm13
subps %xmm4, %xmm12
movaps 0x180(%rsp), %xmm10
subps %xmm4, %xmm10
subps %xmm3, %xmm11
subps %xmm3, %xmm9
subps %xmm15, %xmm7
subps %xmm15, %xmm2
movaps 0xa0(%rsp), %xmm15
mulps %xmm6, %xmm12
mulps %xmm6, %xmm10
movaps 0x50(%rsp), %xmm6
mulps %xmm5, %xmm11
mulps %xmm0, %xmm7
mulps %xmm5, %xmm9
mulps %xmm0, %xmm2
movaps %xmm11, %xmm0
pminsd %xmm9, %xmm0
movaps %xmm7, %xmm3
pminsd %xmm2, %xmm3
maxps %xmm3, %xmm0
movaps %xmm12, %xmm3
pminsd %xmm10, %xmm3
pmaxsd %xmm12, %xmm10
movaps 0xc0(%rsp), %xmm12
pmaxsd %xmm11, %xmm9
movaps 0xd0(%rsp), %xmm11
pmaxsd %xmm7, %xmm2
movaps 0x40(%rsp), %xmm7
minps %xmm2, %xmm9
movaps %xmm7, %xmm2
maxps %xmm3, %xmm2
maxps %xmm0, %xmm2
movaps %xmm1, %xmm0
minps %xmm10, %xmm0
movaps 0xe0(%rsp), %xmm10
minps %xmm9, %xmm0
movaps 0x20(%rsp), %xmm9
mulps 0x1d51e77(%rip), %xmm2 # 0x1ef1d80
mulps 0x1d51e80(%rip), %xmm0 # 0x1ef1d90
movaps %xmm2, 0x30(%rsp)
cmpleps %xmm0, %xmm2
movmskps %xmm2, %ebp
jmp 0x19fb63
pushq $0x4
popq %rax
jmp 0x19fba5
movaps %xmm0, %xmm4
cmpleps %xmm3, %xmm4
movaps 0xe0(%rax), %xmm3
cmpleps %xmm2, %xmm3
cmpltps 0xf0(%rax), %xmm2
andps %xmm3, %xmm2
andps %xmm4, %xmm2
jmp 0x19fb56
movq %rdi, 0x10(%rsp)
movl 0x30(%rsp,%rcx,4), %edi
bsfq %r9, %r10
leaq -0x1(%r9), %rcx
movq (%r8,%r10,8), %r11
prefetcht0 (%r11)
prefetcht0 0x40(%r11)
prefetcht0 0x80(%r11)
prefetcht0 0xc0(%r11)
movl 0x30(%rsp,%r10,4), %r10d
andq %r9, %rcx
jne 0x19ffbe
movq 0x10(%rsp), %r8
leaq 0x10(%r8), %rcx
cmpl %r10d, %edi
jae 0x19ffa3
movq %r11, (%r8)
movl %r10d, 0x8(%r8)
movq %rcx, %rdi
movq %rdx, %r8
jmp 0x19ffb0
movq %rdx, (%r8)
movl %edi, 0x8(%r8)
movq %rcx, %rdi
movq %r11, %r8
movq 0x8(%rsp), %r10
movq (%rsp), %r11
jmp 0x19fba5
movq %rdx, %xmm2
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %r11, %xmm4
movd %r10d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rcx, %r9
leaq -0x1(%rcx), %rdx
movq (%r8,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm3
movd 0x30(%rsp,%r9,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rcx, %rdx
jne 0x1a0076
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movq 0x10(%rsp), %rdi
movaps %xmm2, (%rdi)
movaps %xmm3, 0x10(%rdi)
movq %xmm4, %r8
addq $0x20, %rdi
jmp 0x19ffb0
bsfq %rdx, %rcx
movq (%r8,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movq %rdx, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movd 0x30(%rsp,%rcx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movaps 0x40(%rsp), %xmm7
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movaps 0x50(%rsp), %xmm6
movq 0x10(%rsp), %rdi
movaps %xmm2, (%rdi)
movaps %xmm5, 0x10(%rdi)
movaps %xmm4, 0x20(%rdi)
movq %xmm3, %r8
addq $0x30, %rdi
jmp 0x19ffb0
cmpl $0x6, %eax
jne 0x19fa41
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x78(%rsp), %rcx
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq %rdi, 0x10(%rsp)
movq 0x80(%rsp), %rdi
movq %r10, %rdx
movq 0x1118(%rsp), %rcx
callq *0x10(%r9,%rax)
movaps 0x40(%rsp), %xmm7
movq (%rsp), %r11
movaps 0x50(%rsp), %xmm6
movaps 0xa0(%rsp), %xmm15
movaps 0xb0(%rsp), %xmm13
movaps 0xc0(%rsp), %xmm12
movaps 0xd0(%rsp), %xmm11
movaps 0xe0(%rsp), %xmm10
movaps 0x20(%rsp), %xmm9
movaps 0x60(%rsp), %xmm14
movaps 0xf0(%rsp), %xmm8
movq 0x10(%rsp), %rdi
movq 0x8(%rsp), %r10
movq 0x88(%rsp), %rsi
movss 0x80(%rsi,%r10,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
jmp 0x19fa41
addq $0x10d8, %rsp # imm = 0x10D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16781328, true, embree::sse42::VirtualCurveIntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::CurvePrecalculationsK<4>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x918, %rsp # imm = 0x918
movq %r9, %rsi
movq %r8, 0x48(%rsp)
movq %rcx, %r10
movq %rdi, 0x38(%rsp)
movq 0x950(%rsp), %rax
leaq 0x178(%rsp), %r9
movq %rdx, -0x8(%r9)
movss (%rax,%rcx,4), %xmm6
movss 0x10(%rax,%rcx,4), %xmm12
movss 0x20(%rax,%rcx,4), %xmm5
movss 0x30(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x160(%rsp)
movss 0x40(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x150(%rsp)
movss 0x50(%rax,%rcx,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x140(%rsp)
movss 0x60(%rax,%rcx,4), %xmm7
movss 0x70(%rax,%rcx,4), %xmm8
movss 0x80(%rax,%rcx,4), %xmm9
movss 0x1d7fc62(%rip), %xmm10 # 0x1f1ff10
movaps %xmm7, %xmm11
mulss %xmm10, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps %xmm8, %xmm13
mulss %xmm10, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
mulss %xmm9, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movss 0x1d7fc38(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulss %xmm0, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulss %xmm0, %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r11
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %r11, %r13
xorq $0x10, %r13
movq %rbx, %r14
xorq $0x10, %r14
movq %r15, %r12
xorq $0x10, %r12
movss 0xc0(%rax,%rcx,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
leaq 0x170(%rsp), %rdx
movaps %xmm6, 0xe0(%rsp)
movaps %xmm12, 0x20(%rsp)
movaps %xmm5, (%rsp)
movaps %xmm7, 0xd0(%rsp)
movaps %xmm8, 0xc0(%rsp)
movaps %xmm9, 0xb0(%rsp)
movaps %xmm10, 0xa0(%rsp)
movaps %xmm11, 0x90(%rsp)
movaps %xmm13, 0x80(%rsp)
movaps %xmm14, 0x70(%rsp)
movaps %xmm15, 0x60(%rsp)
movq %rsi, 0x50(%rsp)
movq %rcx, 0x40(%rsp)
movq %r11, 0x30(%rsp)
movq %r9, %rax
cmpq %rdx, %r9
je 0x1a0979
leaq -0x8(%rax), %r9
movq %rax, 0x58(%rsp)
movq -0x8(%rax), %r8
testb $0x8, %r8b
jne 0x1a04b6
movss 0x70(%rsi,%r10,4), %xmm1
movl %r8d, %ecx
andl $0x7, %ecx
movq %r8, %rax
andq $-0x10, %rax
cmpq $0x3, %rcx
je 0x1a050c
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps 0x80(%rax,%r11), %xmm2
mulps %xmm1, %xmm2
addps 0x20(%rax,%r11), %xmm2
subps %xmm6, %xmm2
movaps 0x80(%rax,%rbx), %xmm3
mulps %xmm1, %xmm3
addps 0x20(%rax,%rbx), %xmm3
mulps %xmm11, %xmm2
subps %xmm12, %xmm3
mulps %xmm13, %xmm3
movaps 0x80(%rax,%r15), %xmm0
mulps %xmm1, %xmm0
addps 0x20(%rax,%r15), %xmm0
subps %xmm5, %xmm0
mulps %xmm10, %xmm0
maxps %xmm0, %xmm3
movaps %xmm14, %xmm0
maxps %xmm2, %xmm0
movaps 0x80(%rax,%r13), %xmm4
mulps %xmm1, %xmm4
addps 0x20(%rax,%r13), %xmm4
maxps %xmm3, %xmm0
subps %xmm6, %xmm4
mulps %xmm7, %xmm4
movaps 0x80(%rax,%r14), %xmm3
mulps %xmm1, %xmm3
addps 0x20(%rax,%r14), %xmm3
subps %xmm12, %xmm3
movaps 0x80(%rax,%r12), %xmm2
mulps %xmm1, %xmm2
addps 0x20(%rax,%r12), %xmm2
mulps %xmm8, %xmm3
subps %xmm5, %xmm2
mulps %xmm9, %xmm2
minps %xmm2, %xmm3
movaps %xmm15, %xmm2
minps %xmm4, %xmm2
minps %xmm3, %xmm2
cmpleps %xmm2, %xmm0
cmpl $0x6, %ecx
je 0x1a0844
pslld $0x1f, %xmm0
movmskps %xmm0, %ebp
testb $0x8, %r8b
jne 0x1a0505
testq %rbp, %rbp
je 0x1a083c
andq $-0x10, %r8
bsfq %rbp, %rax
leaq -0x1(%rbp), %rdi
xorl %ecx, %ecx
movq (%r8,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbp, %rdi
jne 0x1a0862
movq %rax, %r8
testl %ecx, %ecx
je 0x1a03c6
jmp 0x1a08b0
pushq $0x6
jmp 0x1a083e
movaps 0x50(%rax), %xmm13
movaps %xmm13, 0x10(%rsp)
movaps 0x60(%rax), %xmm12
movaps %xmm12, 0xf0(%rsp)
movaps 0x70(%rax), %xmm4
movaps %xmm4, 0x100(%rsp)
movaps 0x80(%rax), %xmm10
movaps 0x90(%rax), %xmm6
movaps %xmm6, 0x110(%rsp)
movaps 0xa0(%rax), %xmm5
movaps %xmm5, 0x120(%rsp)
movss 0x1d4c1b5(%rip), %xmm7 # 0x1eec714
subss %xmm1, %xmm7
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm7, %xmm0
shufps $0x0, %xmm7, %xmm0 # xmm0 = xmm0[0,0],xmm7[0,0]
movaps 0xe0(%rax), %xmm11
mulps %xmm1, %xmm11
movaps 0xf0(%rax), %xmm2
mulps %xmm1, %xmm2
movaps %xmm2, %xmm3
movaps 0x100(%rax), %xmm2
mulps %xmm1, %xmm2
mulss 0x1d4b48b(%rip), %xmm7 # 0x1eeba24
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
addps %xmm7, %xmm11
addps %xmm7, %xmm3
movaps %xmm3, 0x130(%rsp)
addps %xmm2, %xmm7
movaps 0x110(%rax), %xmm9
mulps %xmm1, %xmm9
movaps 0x120(%rax), %xmm8
mulps %xmm1, %xmm8
mulps 0x130(%rax), %xmm1
addps %xmm0, %xmm9
addps %xmm0, %xmm8
addps %xmm0, %xmm1
movaps 0x140(%rsp), %xmm3
movaps %xmm3, %xmm0
mulps %xmm10, %xmm0
movaps %xmm3, %xmm2
mulps %xmm6, %xmm2
mulps %xmm5, %xmm3
movaps 0x150(%rsp), %xmm6
movaps %xmm6, %xmm5
mulps %xmm13, %xmm5
addps %xmm0, %xmm5
movaps %xmm6, %xmm0
mulps %xmm12, %xmm0
addps %xmm2, %xmm0
mulps %xmm4, %xmm6
addps %xmm3, %xmm6
movaps 0x20(%rax), %xmm13
movaps 0x160(%rsp), %xmm3
movaps %xmm3, %xmm4
mulps %xmm13, %xmm4
addps %xmm5, %xmm4
movaps 0x30(%rax), %xmm14
movaps %xmm3, %xmm2
mulps %xmm14, %xmm2
addps %xmm0, %xmm2
movaps 0x40(%rax), %xmm15
mulps %xmm15, %xmm3
addps %xmm6, %xmm3
movaps 0xe0(%rsp), %xmm6
movaps %xmm4, %xmm0
movaps 0x1d4c069(%rip), %xmm5 # 0x1eec6c0
andps %xmm5, %xmm0
movaps 0x1d516de(%rip), %xmm12 # 0x1ef1d40
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm4
movaps %xmm2, %xmm0
andps %xmm5, %xmm0
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm2
movaps %xmm3, %xmm0
andps %xmm5, %xmm0
cmpltps %xmm12, %xmm0
blendvps %xmm0, %xmm12, %xmm3
rcpps %xmm4, %xmm0
mulps %xmm0, %xmm4
movaps 0x1d4c373(%rip), %xmm12 # 0x1eeca10
movaps %xmm12, %xmm5
subps %xmm4, %xmm5
mulps %xmm0, %xmm5
addps %xmm0, %xmm5
rcpps %xmm2, %xmm0
mulps %xmm0, %xmm2
movaps %xmm12, %xmm4
subps %xmm2, %xmm4
mulps %xmm0, %xmm4
addps %xmm0, %xmm4
rcpps %xmm3, %xmm2
mulps %xmm2, %xmm3
movaps %xmm12, %xmm0
subps %xmm3, %xmm0
mulps %xmm2, %xmm0
mulps (%rsp), %xmm10
addps 0xb0(%rax), %xmm10
addps %xmm2, %xmm0
movaps 0x10(%rsp), %xmm2
mulps 0x20(%rsp), %xmm2
addps %xmm10, %xmm2
movaps %xmm2, 0x10(%rsp)
movaps 0x20(%rsp), %xmm12
movaps 0x110(%rsp), %xmm10
mulps (%rsp), %xmm10
addps 0xc0(%rax), %xmm10
movaps 0xf0(%rsp), %xmm2
mulps %xmm12, %xmm2
addps %xmm10, %xmm2
movaps %xmm2, %xmm10
movaps 0x120(%rsp), %xmm3
mulps (%rsp), %xmm3
addps 0xd0(%rax), %xmm3
movaps 0x100(%rsp), %xmm2
mulps %xmm12, %xmm2
addps %xmm3, %xmm2
mulps %xmm6, %xmm13
addps 0x10(%rsp), %xmm13
mulps %xmm6, %xmm14
addps %xmm10, %xmm14
mulps %xmm6, %xmm15
addps %xmm2, %xmm15
subps %xmm13, %xmm11
subps %xmm13, %xmm9
movaps 0x80(%rsp), %xmm13
movaps 0x130(%rsp), %xmm2
subps %xmm14, %xmm2
subps %xmm14, %xmm8
movaps 0x70(%rsp), %xmm14
subps %xmm15, %xmm7
subps %xmm15, %xmm1
movaps 0x60(%rsp), %xmm15
mulps %xmm5, %xmm11
mulps %xmm5, %xmm9
movaps (%rsp), %xmm5
mulps %xmm4, %xmm2
mulps %xmm0, %xmm7
mulps %xmm4, %xmm8
mulps %xmm0, %xmm1
movaps %xmm2, %xmm0
movaps %xmm2, %xmm3
pminsd %xmm8, %xmm0
movaps %xmm7, %xmm2
pminsd %xmm1, %xmm2
maxps %xmm2, %xmm0
movaps %xmm11, %xmm2
pminsd %xmm9, %xmm2
pmaxsd %xmm11, %xmm9
movaps 0x90(%rsp), %xmm11
pmaxsd %xmm3, %xmm8
movaps 0xa0(%rsp), %xmm10
pmaxsd %xmm7, %xmm1
movaps 0xd0(%rsp), %xmm7
minps %xmm1, %xmm8
movaps %xmm14, %xmm1
maxps %xmm2, %xmm1
maxps %xmm0, %xmm1
movaps %xmm15, %xmm0
minps %xmm9, %xmm0
movaps 0xb0(%rsp), %xmm9
minps %xmm8, %xmm0
movaps 0xc0(%rsp), %xmm8
mulps 0x1d51557(%rip), %xmm1 # 0x1ef1d80
mulps 0x1d51560(%rip), %xmm0 # 0x1ef1d90
cmpleps %xmm0, %xmm1
movmskps %xmm1, %ebp
jmp 0x1a04b6
pushq $0x4
popq %rcx
jmp 0x1a04f8
movaps 0xe0(%rax), %xmm2
cmpleps %xmm1, %xmm2
cmpltps 0xf0(%rax), %xmm1
andps %xmm2, %xmm1
andps %xmm1, %xmm0
jmp 0x1a04ae
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %rdx
leaq -0x1(%rdi), %rax
movq (%r8,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rdi, %rax
je 0x1a08a0
movq %rdx, (%r9)
addq $0x8, %r9
bsfq %rax, %rdx
leaq -0x1(%rax), %rdi
jmp 0x1a0871
movq %rdx, %r8
leaq 0x170(%rsp), %rdx
jmp 0x1a04f8
cmpl $0x6, %ecx
jne 0x1a096b
andq $-0x10, %r8
movzbl (%r8), %eax
movq 0x38(%rsp), %rcx
movq %r9, 0x10(%rsp)
movq 0x8(%rcx), %r9
shll $0x6, %eax
movq 0x48(%rsp), %rdi
movq %r10, %rdx
movq 0x958(%rsp), %rcx
callq *0x18(%r9,%rax)
leaq 0x170(%rsp), %rdx
movaps 0x60(%rsp), %xmm15
movaps 0x70(%rsp), %xmm14
movq 0x30(%rsp), %r11
movaps 0x80(%rsp), %xmm13
movaps 0x90(%rsp), %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0xb0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm8
movaps 0xd0(%rsp), %xmm7
movq 0x10(%rsp), %r9
movaps (%rsp), %xmm5
movaps 0x20(%rsp), %xmm12
movaps 0xe0(%rsp), %xmm6
movq 0x40(%rsp), %r10
movq 0x50(%rsp), %rsi
xorl %ecx, %ecx
testb %al, %al
je 0x1a096b
movl $0xff800000, 0x80(%rsi,%r10,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
testb $0x3, %cl
movq 0x58(%rsp), %rax
je 0x1a03ad
cmpq %rdx, %rax
setne %al
addq $0x918, %rsp # imm = 0x918
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, true, embree::sse42::SubdivPatch1IntersectorK<4>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubdivPatch1PrecalculationsK<4, embree::sse42::GridSOAIntersectorK<4>::Precalculations>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12b8, %rsp # imm = 0x12B8
movq %rcx, %rdi
movq 0x12f8(%rsp), %r11
movq 0x12f0(%rsp), %rax
leaq 0x380(%rsp), %rbx
movq %rdx, -0x10(%rbx)
andl $0x0, -0x8(%rbx)
movss (%rax,%rcx,4), %xmm4
movss 0x10(%rax,%rcx,4), %xmm8
movss 0x20(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm10
movss 0x70(%rax,%rcx,4), %xmm11
movss 0x80(%rax,%rcx,4), %xmm12
movss 0x1d7d424(%rip), %xmm13 # 0x1f1ff10
movaps %xmm10, %xmm14
mulss %xmm13, %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm11, %xmm15
mulss %xmm13, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulss %xmm12, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movss 0x1d7d3fa(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r15
movslq 0xa0(%rax,%rcx,4), %r14
movslq 0xb0(%rax,%rcx,4), %r13
movq %r15, %rcx
xorq $0x10, %rcx
movq %rcx, 0x250(%rsp)
movq %r14, 0x258(%rsp)
xorq $0x10, %r14
movq %r13, %r12
xorq $0x10, %r13
movss 0xc0(%rax,%rdi,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movss 0xd0(%rax,%rdi,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
pushq $0x1
popq %rax
movl %edi, %ecx
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f81b90(%rip), %rax # 0x2124730
movq %rax, 0x1c8(%rsp)
movq %r9, 0x58(%rsp)
movq %r8, 0x8(%rsp)
movq %rdi, (%rsp)
movaps %xmm4, 0x1e0(%rsp)
movaps %xmm8, 0x2d0(%rsp)
movaps %xmm9, 0x2c0(%rsp)
movaps %xmm10, 0x2b0(%rsp)
movaps %xmm11, 0x2a0(%rsp)
movaps %xmm12, 0x290(%rsp)
movaps %xmm13, 0x280(%rsp)
movaps %xmm14, 0x270(%rsp)
movaps %xmm15, 0x260(%rsp)
movq %r15, 0x1d0(%rsp)
movaps %xmm6, 0x1f0(%rsp)
leaq 0x370(%rsp), %rax
cmpq %rax, %rbx
je 0x1a46ed
movss -0x8(%rbx), %xmm0
addq $-0x10, %rbx
ucomiss 0x80(%r9,%rdi,4), %xmm0
ja 0x1a2c16
movq (%rbx), %rax
testb $0x8, %al
jne 0x1a2cc7
movaps 0x20(%rax,%r15), %xmm0
subps %xmm4, %xmm0
mulps %xmm14, %xmm0
movq 0x258(%rsp), %rcx
movaps 0x20(%rax,%rcx), %xmm2
subps %xmm8, %xmm2
mulps %xmm15, %xmm2
maxps %xmm2, %xmm0
movaps 0x20(%rax,%r12), %xmm2
subps %xmm9, %xmm2
mulps %xmm13, %xmm2
maxps %xmm6, %xmm2
maxps %xmm2, %xmm0
movq 0x250(%rsp), %rcx
movaps 0x20(%rax,%rcx), %xmm2
subps %xmm4, %xmm2
mulps %xmm10, %xmm2
movaps 0x20(%rax,%r14), %xmm3
subps %xmm8, %xmm3
mulps %xmm11, %xmm3
minps %xmm3, %xmm2
movaps 0x20(%rax,%r13), %xmm3
subps %xmm9, %xmm3
mulps %xmm12, %xmm3
minps %xmm1, %xmm3
minps %xmm3, %xmm2
movaps %xmm0, 0x100(%rsp)
cmpleps %xmm2, %xmm0
movmskps %xmm0, %ebp
testb $0x8, %al
jne 0x1a2cfe
testq %rbp, %rbp
je 0x1a2d02
andq $-0x10, %rax
bsfq %rbp, %rdx
leaq -0x1(%rbp), %r10
xorl %ecx, %ecx
movq (%rax,%rdx,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
andq %rbp, %r10
jne 0x1a2d07
movq %rsi, %rax
testl %ecx, %ecx
je 0x1a2c3e
jmp 0x1a2ee5
pushq $0x6
jmp 0x1a2d04
pushq $0x4
popq %rcx
jmp 0x1a2cf1
movl 0x100(%rsp,%rdx,4), %r8d
bsfq %r10, %r11
leaq -0x1(%r10), %rdx
movq (%rax,%r11,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
movl 0x100(%rsp,%r11,4), %r11d
andq %r10, %rdx
jne 0x1a2d67
leaq 0x10(%rbx), %rax
cmpl %r11d, %r8d
jae 0x1a2d47
movq %rdi, (%rbx)
movl %r11d, 0x8(%rbx)
movq %rax, %rbx
movq %rsi, %rax
jmp 0x1a2d54
movq %rsi, (%rbx)
movl %r8d, 0x8(%rbx)
movq %rax, %rbx
movq %rdi, %rax
movq 0x8(%rsp), %r8
movq (%rsp), %rdi
movq 0x12f8(%rsp), %r11
jmp 0x1a2cf1
movq %rsi, %xmm2
movd %r8d, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %rdi, %xmm5
movd %r11d, %xmm0
punpcklqdq %xmm0, %xmm5 # xmm5 = xmm5[0],xmm0[0]
bsfq %rdx, %rdi
leaq -0x1(%rdx), %rsi
movq (%rax,%rdi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
movq %r8, %xmm3
movd 0x100(%rsp,%rdi,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rdx, %rsi
jne 0x1a2e2a
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm4
blendvps %xmm0, %xmm2, %xmm4
blendvps %xmm0, %xmm5, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm5
blendvps %xmm0, %xmm4, %xmm5
blendvps %xmm0, %xmm3, %xmm4
movaps %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm4, %xmm2
movaps %xmm2, (%rbx)
movaps %xmm3, 0x10(%rbx)
movq %xmm5, %rax
addq $0x20, %rbx
movq 0x8(%rsp), %r8
movq (%rsp), %rdi
movq 0x12f8(%rsp), %r11
movaps 0x1e0(%rsp), %xmm4
jmp 0x1a2cf1
bsfq %rsi, %rdx
movq (%rax,%rdx,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
movq %rax, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm5, %xmm4
blendvps %xmm0, %xmm2, %xmm4
movd 0x100(%rsp,%rdx,4), %xmm7
blendvps %xmm0, %xmm5, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm5
blendvps %xmm0, %xmm3, %xmm5
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm5, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm4
movaps %xmm6, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm5
blendvps %xmm0, %xmm4, %xmm5
blendvps %xmm0, %xmm6, %xmm4
movaps 0x1f0(%rsp), %xmm6
movaps %xmm2, (%rbx)
movaps %xmm4, 0x10(%rbx)
movaps %xmm5, 0x20(%rbx)
movq %xmm3, %rax
addq $0x30, %rbx
jmp 0x1a2e0c
cmpl $0x6, %ecx
jne 0x1a2c16
movl %eax, %ecx
andl $0xf, %ecx
cmpl $0x8, %ecx
jne 0x1a4601
movq %rbx, 0x1d8(%rsp)
movq (%r8), %rcx
movl 0xc(%rcx), %ebx
movl 0x24(%rcx), %edx
addq %rcx, %rdx
shrq $0x4, %rax
leaq (%rdx,%rax,4), %rsi
movups 0x2c(%rdx,%rax,4), %xmm2
movups 0x2c(%rsi,%rbx,4), %xmm0
cmpq $0x2, %rbx
je 0x1a4615
movl 0x14(%rcx), %eax
leaq (%rsi,%rax,4), %rdx
addq $0x2c, %rdx
movups (%rdx), %xmm8
movups (%rdx,%rbx,4), %xmm5
cmpq $0x2, %rbx
je 0x1a4622
leaq (%rsi,%rax,8), %rdx
addq $0x2c, %rdx
movaps %xmm2, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,2]
movups (%rdx), %xmm3
movups (%rdx,%rbx,4), %xmm12
cmpq $0x2, %rbx
je 0x1a4630
movl 0x10(%rcx), %r10d
imulq $0xc, %rax, %rax
addq %rsi, %rax
addq $0x2c, %rax
movaps %xmm3, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
shufps $0x94, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,2]
movq %rax, 0xb8(%rsp)
movq %rbx, 0xc0(%rsp)
movq %r10, 0xc8(%rsp)
movl 0x18(%rcx), %eax
movl 0x1c(%rcx), %r8d
leaq 0xb8(%rsp), %rdx
movq %rdx, 0x130(%rsp)
movss (%r9,%rdi,4), %xmm6
movss 0x10(%r9,%rdi,4), %xmm7
movaps %xmm3, %xmm9
movss 0x20(%r9,%rdi,4), %xmm3
movss 0x40(%r9,%rdi,4), %xmm4
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm4, 0x30(%rsp)
subps %xmm6, %xmm1
subps %xmm7, %xmm11
movaps %xmm11, 0x230(%rsp)
subps %xmm3, %xmm13
subps %xmm6, %xmm2
movaps %xmm2, 0x40(%rsp)
subps %xmm7, %xmm8
movaps %xmm8, 0x20(%rsp)
subps %xmm3, %xmm9
movaps %xmm9, 0x10(%rsp)
subps %xmm6, %xmm0
subps %xmm7, %xmm5
subps %xmm3, %xmm12
movaps %xmm0, %xmm9
subps %xmm1, %xmm9
movaps %xmm5, %xmm2
subps %xmm11, %xmm2
movaps %xmm12, %xmm15
subps %xmm13, %xmm15
movaps %xmm0, %xmm3
addps %xmm1, %xmm3
movaps %xmm5, %xmm6
addps %xmm11, %xmm6
movaps %xmm12, %xmm7
addps %xmm13, %xmm7
movaps %xmm9, %xmm8
mulps %xmm6, %xmm8
mulps %xmm15, %xmm6
movaps %xmm2, %xmm10
mulps %xmm7, %xmm10
subps %xmm6, %xmm10
movaps %xmm9, 0x1b0(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm6
movaps %xmm15, 0xa0(%rsp)
mulps %xmm15, %xmm6
subps %xmm7, %xmm6
movss 0x50(%r9,%rdi,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm2, 0x90(%rsp)
mulps %xmm2, %xmm3
subps %xmm3, %xmm8
movss 0x60(%r9,%rdi,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm14, %xmm8
mulps %xmm4, %xmm6
addps %xmm8, %xmm6
movaps %xmm1, %xmm8
movaps 0x40(%rsp), %xmm15
subps %xmm15, %xmm8
mulps 0x30(%rsp), %xmm10
addps %xmm6, %xmm10
movaps %xmm11, %xmm9
movaps 0x20(%rsp), %xmm7
subps %xmm7, %xmm9
movaps %xmm13, %xmm2
movaps %xmm1, %xmm3
movaps 0x10(%rsp), %xmm1
subps %xmm1, %xmm2
movaps %xmm11, %xmm6
addps %xmm7, %xmm6
movaps %xmm13, 0x60(%rsp)
movaps %xmm13, %xmm7
addps %xmm1, %xmm7
movaps %xmm8, %xmm13
mulps %xmm6, %xmm13
mulps %xmm2, %xmm6
movaps %xmm9, %xmm11
mulps %xmm7, %xmm11
subps %xmm6, %xmm11
movaps %xmm3, 0x240(%rsp)
addps %xmm15, %xmm3
mulps %xmm8, %xmm7
movaps %xmm3, %xmm6
movaps %xmm2, 0x220(%rsp)
mulps %xmm2, %xmm6
subps %xmm7, %xmm6
mulps %xmm9, %xmm3
subps %xmm3, %xmm13
mulps %xmm14, %xmm13
mulps %xmm4, %xmm6
addps %xmm13, %xmm6
movaps 0x30(%rsp), %xmm1
mulps %xmm1, %xmm11
addps %xmm6, %xmm11
movaps %xmm15, %xmm6
subps %xmm0, %xmm6
addps %xmm15, %xmm0
movaps 0x20(%rsp), %xmm2
movaps %xmm2, %xmm3
subps %xmm5, %xmm3
addps %xmm2, %xmm5
movaps 0x10(%rsp), %xmm2
movaps %xmm2, %xmm13
subps %xmm12, %xmm13
addps %xmm2, %xmm12
movaps %xmm1, %xmm15
movaps %xmm6, %xmm7
mulps %xmm5, %xmm7
mulps %xmm13, %xmm5
movaps %xmm3, %xmm1
mulps %xmm12, %xmm1
subps %xmm5, %xmm1
mulps %xmm6, %xmm12
movaps %xmm0, %xmm2
mulps %xmm13, %xmm2
subps %xmm12, %xmm2
mulps %xmm3, %xmm0
subps %xmm0, %xmm7
movaps %xmm14, 0x10(%rsp)
mulps %xmm14, %xmm7
mulps %xmm4, %xmm2
addps %xmm7, %xmm2
mulps %xmm15, %xmm1
addps %xmm2, %xmm1
movaps %xmm10, %xmm2
addps %xmm11, %xmm2
addps %xmm1, %xmm2
movaps %xmm10, %xmm0
minps %xmm11, %xmm0
minps %xmm1, %xmm0
movaps %xmm10, 0x210(%rsp)
maxps %xmm11, %xmm10
maxps %xmm1, %xmm10
movaps %xmm2, 0x200(%rsp)
movaps %xmm2, %xmm1
andps 0x1d494a5(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d4eb4e(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm10
xorps 0x1d494a2(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm10
movmskps %xmm10, %edx
testl %edx, %edx
je 0x1a36a1
movaps %xmm4, %xmm14
movaps %xmm9, %xmm0
movaps 0xa0(%rsp), %xmm15
mulps %xmm15, %xmm0
movaps %xmm8, %xmm1
movaps 0x90(%rsp), %xmm5
mulps %xmm5, %xmm1
movaps %xmm3, %xmm7
movaps %xmm3, 0x20(%rsp)
movaps 0x220(%rsp), %xmm3
mulps %xmm3, %xmm7
movaps %xmm13, %xmm12
movaps %xmm13, 0x40(%rsp)
movaps %xmm6, %xmm13
mulps %xmm9, %xmm13
mulps %xmm3, %xmm5
subps %xmm0, %xmm5
movaps 0x1b0(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm9, %xmm4
mulps %xmm12, %xmm9
subps %xmm7, %xmm9
movaps 0x1d49410(%rip), %xmm12 # 0x1eec6c0
andps %xmm12, %xmm0
andps %xmm12, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm5, %xmm9
movaps %xmm8, %xmm0
mulps 0x40(%rsp), %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm6
mulps %xmm8, %xmm15
subps %xmm2, %xmm15
subps %xmm0, %xmm6
andps %xmm12, %xmm2
andps %xmm12, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm15, %xmm6
mulps 0x20(%rsp), %xmm8
subps %xmm1, %xmm4
subps %xmm13, %xmm8
andps %xmm12, %xmm1
andps %xmm12, %xmm13
cmpltps %xmm13, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm8
movaps 0x30(%rsp), %xmm3
movaps 0x10(%rsp), %xmm0
mulps %xmm8, %xmm0
mulps %xmm6, %xmm14
addps %xmm0, %xmm14
mulps %xmm9, %xmm3
addps %xmm14, %xmm3
addps %xmm3, %xmm3
movaps 0x60(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x230(%rsp), %xmm1
mulps %xmm6, %xmm1
addps %xmm0, %xmm1
movaps 0x240(%rsp), %xmm4
mulps %xmm9, %xmm4
addps %xmm1, %xmm4
rcpps %xmm3, %xmm1
movaps %xmm3, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d496a5(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm4, %xmm4
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm4, %xmm0
movss 0x80(%r9,%rdi,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%rdi,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm10, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a36a1
cmpneqps 0x1d48658(%rip), %xmm3 # 0x1eeba10
andps %xmm3, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a36a1
movaps 0x210(%rsp), %xmm2
movaps %xmm2, 0x100(%rsp)
movaps %xmm11, 0x110(%rsp)
movaps 0x200(%rsp), %xmm2
movaps %xmm2, 0x120(%rsp)
leaq 0xb8(%rsp), %rdx
movq %rdx, 0x130(%rsp)
movaps %xmm1, 0x140(%rsp)
movaps %xmm0, 0x170(%rsp)
movaps %xmm9, 0x180(%rsp)
movaps %xmm6, 0x190(%rsp)
movaps %xmm8, 0x1a0(%rsp)
movq (%r11), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rcx
movl 0x90(%r9,%rdi,4), %edx
movq %rcx, 0x30(%rsp)
testl %edx, 0x34(%rcx)
je 0x1a36a1
movq %rsi, 0x10(%rsp)
movaps 0x140(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movaps 0x120(%rsp), %xmm2
movaps %xmm2, %xmm1
andps 0x1d4924f(%rip), %xmm1 # 0x1eec6c0
rcpps %xmm2, %xmm3
mulps %xmm3, %xmm2
movaps 0x1d49592(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm2, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
cmpnltps 0x1d4e8ae(%rip), %xmm1 # 0x1ef1d40
andps %xmm4, %xmm1
movaps 0x100(%rsp), %xmm2
mulps %xmm1, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x150(%rsp)
mulps 0x110(%rsp), %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x160(%rsp)
movq 0x130(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm6
movups (%rsi,%rdx,4), %xmm4
cmpq $0x2, %rdx
je 0x1a46d3
movaps %xmm6, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
shufps $0xa5, %xmm6, %xmm6 # xmm6 = xmm6[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm3, %xmm5
psrld $0x10, %xmm5
xorps %xmm9, %xmm9
pblendw $0xaa, %xmm9, %xmm3 # xmm3 = xmm3[0],xmm9[1],xmm3[2],xmm9[3],xmm3[4],xmm9[5],xmm3[6],xmm9[7]
cvtdq2ps %xmm3, %xmm3
movaps 0x1d79466(%rip), %xmm10 # 0x1f1c970
mulps %xmm10, %xmm3
cvtdq2ps %xmm5, %xmm5
mulps %xmm10, %xmm5
movaps %xmm6, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm9, %xmm6 # xmm6 = xmm6[0],xmm9[1],xmm6[2],xmm9[3],xmm6[4],xmm9[5],xmm6[6],xmm9[7]
cvtdq2ps %xmm6, %xmm6
mulps %xmm10, %xmm6
cvtdq2ps %xmm7, %xmm7
mulps %xmm10, %xmm7
movaps %xmm4, %xmm8
psrld $0x10, %xmm8
pblendw $0xaa, %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[1],xmm4[2],xmm9[3],xmm4[4],xmm9[5],xmm4[6],xmm9[7]
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
cvtdq2ps %xmm8, %xmm8
mulps %xmm10, %xmm8
mulps %xmm2, %xmm6
mulps %xmm2, %xmm7
mulps %xmm1, %xmm4
addps %xmm6, %xmm4
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x1d494a3(%rip), %xmm6 # 0x1eeca10
subps %xmm2, %xmm6
subps %xmm1, %xmm6
mulps %xmm6, %xmm3
addps %xmm4, %xmm3
mulps %xmm5, %xmm6
addps %xmm8, %xmm6
movaps %xmm3, 0x150(%rsp)
movaps %xmm6, 0x160(%rsp)
movaps 0x170(%rsp), %xmm6
movaps 0x1d48451(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
movq 0x10(%rsp), %rsi
je 0x1a35ce
movaps %xmm1, %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %r15
movq 0x10(%r11), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a3ea5
movq 0x30(%rsp), %rdx
cmpq $0x0, 0x40(%rdx)
jne 0x1a3ea5
movss 0x150(%rsp,%r15,4), %xmm0
movss 0x160(%rsp,%r15,4), %xmm1
movss 0x180(%rsp,%r15,4), %xmm2
movss 0x190(%rsp,%r15,4), %xmm3
movss 0x1a0(%rsp,%r15,4), %xmm4
movss 0x170(%rsp,%r15,4), %xmm5
movss %xmm5, 0x80(%r9,%rdi,4)
movss %xmm2, 0xc0(%r9,%rdi,4)
movss %xmm3, 0xd0(%r9,%rdi,4)
movss %xmm4, 0xe0(%r9,%rdi,4)
movss %xmm0, 0xf0(%r9,%rdi,4)
movss %xmm1, 0x100(%r9,%rdi,4)
movl %r8d, 0x110(%r9,%rdi,4)
movl %eax, 0x120(%r9,%rdi,4)
movq 0x8(%r11), %rax
movl (%rax), %eax
movl %eax, 0x130(%r9,%rdi,4)
movq 0x8(%r11), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r9,%rdi,4)
movq 0x1d0(%rsp), %r15
cmpl $0x3, %r10d
jb 0x1a463e
leaq (%rsi,%rbx,4), %rax
addq $0x2c, %rax
movups (%rax), %xmm2
movups (%rax,%rbx,4), %xmm0
cmpq $0x2, %rbx
movq 0x8(%rsp), %r8
je 0x1a46aa
movq (%r8), %rcx
movl 0x14(%rcx), %edx
leaq (%rax,%rdx,4), %rsi
movups (%rsi), %xmm8
movups (%rsi,%rbx,4), %xmm5
cmpq $0x2, %rbx
je 0x1a46b7
leaq (%rax,%rdx,8), %rsi
movaps %xmm2, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,2]
movups (%rsi), %xmm3
movups (%rsi,%rbx,4), %xmm12
cmpq $0x2, %rbx
je 0x1a46c5
imulq $0xc, %rdx, %rdx
addq %rdx, %rax
movaps %xmm3, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
shufps $0x94, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,2]
movq %rax, 0xb8(%rsp)
movq %rbx, 0xc0(%rsp)
movq %r10, 0xc8(%rsp)
movl 0x18(%rcx), %eax
movl 0x1c(%rcx), %ecx
leaq 0xb8(%rsp), %rdx
movq %rdx, 0x130(%rsp)
movss (%r9,%rdi,4), %xmm6
movss 0x10(%r9,%rdi,4), %xmm7
movaps %xmm3, %xmm9
movss 0x20(%r9,%rdi,4), %xmm3
movss 0x40(%r9,%rdi,4), %xmm4
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm4, 0x30(%rsp)
subps %xmm6, %xmm1
subps %xmm7, %xmm11
movaps %xmm11, 0x230(%rsp)
subps %xmm3, %xmm13
subps %xmm6, %xmm2
movaps %xmm2, 0x40(%rsp)
subps %xmm7, %xmm8
movaps %xmm8, 0x20(%rsp)
subps %xmm3, %xmm9
movaps %xmm9, 0x10(%rsp)
subps %xmm6, %xmm0
subps %xmm7, %xmm5
subps %xmm3, %xmm12
movaps %xmm0, %xmm9
subps %xmm1, %xmm9
movaps %xmm5, %xmm2
subps %xmm11, %xmm2
movaps %xmm12, %xmm15
subps %xmm13, %xmm15
movaps %xmm0, %xmm3
addps %xmm1, %xmm3
movaps %xmm5, %xmm6
addps %xmm11, %xmm6
movaps %xmm12, %xmm7
addps %xmm13, %xmm7
movaps %xmm9, %xmm8
mulps %xmm6, %xmm8
mulps %xmm15, %xmm6
movaps %xmm2, %xmm10
mulps %xmm7, %xmm10
subps %xmm6, %xmm10
movaps %xmm9, 0x1b0(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm6
movaps %xmm15, 0xa0(%rsp)
mulps %xmm15, %xmm6
subps %xmm7, %xmm6
movss 0x50(%r9,%rdi,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm2, 0x90(%rsp)
mulps %xmm2, %xmm3
subps %xmm3, %xmm8
movss 0x60(%r9,%rdi,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm14, %xmm8
mulps %xmm4, %xmm6
addps %xmm8, %xmm6
movaps %xmm1, %xmm8
movaps 0x40(%rsp), %xmm15
subps %xmm15, %xmm8
mulps 0x30(%rsp), %xmm10
addps %xmm6, %xmm10
movaps %xmm11, %xmm9
movaps 0x20(%rsp), %xmm7
subps %xmm7, %xmm9
movaps %xmm13, %xmm2
movaps %xmm1, %xmm3
movaps 0x10(%rsp), %xmm1
subps %xmm1, %xmm2
movaps %xmm11, %xmm6
addps %xmm7, %xmm6
movaps %xmm13, 0x60(%rsp)
movaps %xmm13, %xmm7
addps %xmm1, %xmm7
movaps %xmm8, %xmm13
mulps %xmm6, %xmm13
mulps %xmm2, %xmm6
movaps %xmm9, %xmm11
mulps %xmm7, %xmm11
subps %xmm6, %xmm11
movaps %xmm3, 0x240(%rsp)
addps %xmm15, %xmm3
mulps %xmm8, %xmm7
movaps %xmm3, %xmm6
movaps %xmm2, 0x220(%rsp)
mulps %xmm2, %xmm6
subps %xmm7, %xmm6
mulps %xmm9, %xmm3
subps %xmm3, %xmm13
mulps %xmm14, %xmm13
mulps %xmm4, %xmm6
addps %xmm13, %xmm6
movaps 0x30(%rsp), %xmm1
mulps %xmm1, %xmm11
addps %xmm6, %xmm11
movaps %xmm15, %xmm6
subps %xmm0, %xmm6
addps %xmm15, %xmm0
movaps 0x20(%rsp), %xmm2
movaps %xmm2, %xmm3
subps %xmm5, %xmm3
addps %xmm2, %xmm5
movaps 0x10(%rsp), %xmm2
movaps %xmm2, %xmm13
subps %xmm12, %xmm13
addps %xmm2, %xmm12
movaps %xmm1, %xmm15
movaps %xmm6, %xmm7
mulps %xmm5, %xmm7
mulps %xmm13, %xmm5
movaps %xmm3, %xmm1
mulps %xmm12, %xmm1
subps %xmm5, %xmm1
mulps %xmm6, %xmm12
movaps %xmm0, %xmm2
mulps %xmm13, %xmm2
subps %xmm12, %xmm2
mulps %xmm3, %xmm0
subps %xmm0, %xmm7
movaps %xmm14, 0x10(%rsp)
mulps %xmm14, %xmm7
mulps %xmm4, %xmm2
addps %xmm7, %xmm2
mulps %xmm15, %xmm1
addps %xmm2, %xmm1
movaps %xmm10, %xmm2
addps %xmm11, %xmm2
addps %xmm1, %xmm2
movaps %xmm10, %xmm0
minps %xmm11, %xmm0
minps %xmm1, %xmm0
movaps %xmm10, 0x210(%rsp)
maxps %xmm11, %xmm10
maxps %xmm1, %xmm10
movaps %xmm2, 0x200(%rsp)
movaps %xmm2, %xmm1
andps 0x1d48d16(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d4e3bf(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm10
xorps 0x1d48d13(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm10
movmskps %xmm10, %edx
testl %edx, %edx
je 0x1a3e1c
movaps %xmm4, %xmm14
movaps %xmm9, %xmm0
movaps 0xa0(%rsp), %xmm15
mulps %xmm15, %xmm0
movaps %xmm8, %xmm1
movaps 0x90(%rsp), %xmm5
mulps %xmm5, %xmm1
movaps %xmm3, %xmm7
movaps %xmm3, 0x20(%rsp)
movaps 0x220(%rsp), %xmm3
mulps %xmm3, %xmm7
movaps %xmm13, %xmm12
movaps %xmm13, 0x40(%rsp)
movaps %xmm6, %xmm13
mulps %xmm9, %xmm13
mulps %xmm3, %xmm5
subps %xmm0, %xmm5
movaps 0x1b0(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm9, %xmm4
mulps %xmm12, %xmm9
subps %xmm7, %xmm9
movaps 0x1d48c81(%rip), %xmm12 # 0x1eec6c0
andps %xmm12, %xmm0
andps %xmm12, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm5, %xmm9
movaps %xmm8, %xmm0
mulps 0x40(%rsp), %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm6
mulps %xmm8, %xmm15
subps %xmm2, %xmm15
subps %xmm0, %xmm6
andps %xmm12, %xmm2
andps %xmm12, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm15, %xmm6
mulps 0x20(%rsp), %xmm8
subps %xmm1, %xmm4
subps %xmm13, %xmm8
andps %xmm12, %xmm1
andps %xmm12, %xmm13
cmpltps %xmm13, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm8
movaps 0x30(%rsp), %xmm3
movaps 0x10(%rsp), %xmm0
mulps %xmm8, %xmm0
mulps %xmm6, %xmm14
addps %xmm0, %xmm14
mulps %xmm9, %xmm3
addps %xmm14, %xmm3
addps %xmm3, %xmm3
movaps 0x60(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x230(%rsp), %xmm1
mulps %xmm6, %xmm1
addps %xmm0, %xmm1
movaps 0x240(%rsp), %xmm4
mulps %xmm9, %xmm4
addps %xmm1, %xmm4
rcpps %xmm3, %xmm1
movaps %xmm3, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d48f16(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm4, %xmm4
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm4, %xmm0
movss 0x80(%r9,%rdi,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%rdi,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm10, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a3e1c
cmpneqps 0x1d47ec9(%rip), %xmm3 # 0x1eeba10
andps %xmm3, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a3e1c
movaps 0x210(%rsp), %xmm2
movaps %xmm2, 0x100(%rsp)
movaps %xmm11, 0x110(%rsp)
movaps 0x200(%rsp), %xmm2
movaps %xmm2, 0x120(%rsp)
leaq 0xb8(%rsp), %rdx
movq %rdx, 0x130(%rsp)
movaps %xmm1, 0x140(%rsp)
movaps %xmm0, 0x170(%rsp)
movaps %xmm9, 0x180(%rsp)
movaps %xmm6, 0x190(%rsp)
movaps %xmm8, 0x1a0(%rsp)
movq (%r11), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rbx
movl 0x90(%r9,%rdi,4), %edx
testl %edx, 0x34(%rbx)
je 0x1a3e1c
movaps 0x140(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movaps 0x120(%rsp), %xmm2
movaps %xmm2, %xmm1
andps 0x1d48aca(%rip), %xmm1 # 0x1eec6c0
rcpps %xmm2, %xmm3
mulps %xmm3, %xmm2
movaps 0x1d48e0d(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm2, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
cmpnltps 0x1d4e129(%rip), %xmm1 # 0x1ef1d40
andps %xmm4, %xmm1
movaps 0x100(%rsp), %xmm2
mulps %xmm1, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x150(%rsp)
mulps 0x110(%rsp), %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x160(%rsp)
movq 0x130(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm6
movups (%rsi,%rdx,4), %xmm4
cmpq $0x2, %rdx
je 0x1a46e0
movaps %xmm6, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
shufps $0xa5, %xmm6, %xmm6 # xmm6 = xmm6[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm3, %xmm5
psrld $0x10, %xmm5
xorps %xmm9, %xmm9
pblendw $0xaa, %xmm9, %xmm3 # xmm3 = xmm3[0],xmm9[1],xmm3[2],xmm9[3],xmm3[4],xmm9[5],xmm3[6],xmm9[7]
cvtdq2ps %xmm3, %xmm3
movaps 0x1d78ce1(%rip), %xmm10 # 0x1f1c970
mulps %xmm10, %xmm3
cvtdq2ps %xmm5, %xmm5
mulps %xmm10, %xmm5
movaps %xmm6, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm9, %xmm6 # xmm6 = xmm6[0],xmm9[1],xmm6[2],xmm9[3],xmm6[4],xmm9[5],xmm6[6],xmm9[7]
cvtdq2ps %xmm6, %xmm6
mulps %xmm10, %xmm6
cvtdq2ps %xmm7, %xmm7
mulps %xmm10, %xmm7
movaps %xmm4, %xmm8
psrld $0x10, %xmm8
pblendw $0xaa, %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[1],xmm4[2],xmm9[3],xmm4[4],xmm9[5],xmm4[6],xmm9[7]
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
cvtdq2ps %xmm8, %xmm8
mulps %xmm10, %xmm8
mulps %xmm2, %xmm6
mulps %xmm2, %xmm7
mulps %xmm1, %xmm4
addps %xmm6, %xmm4
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x1d48d1e(%rip), %xmm6 # 0x1eeca10
subps %xmm2, %xmm6
subps %xmm1, %xmm6
mulps %xmm6, %xmm3
addps %xmm4, %xmm3
mulps %xmm5, %xmm6
addps %xmm8, %xmm6
movaps %xmm3, 0x150(%rsp)
movaps %xmm6, 0x160(%rsp)
movaps 0x170(%rsp), %xmm6
movaps 0x1d47ccc(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a3d4e
movaps %xmm1, %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %r15
movq 0x10(%r11), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a426c
cmpq $0x0, 0x40(%rbx)
jne 0x1a426c
movss 0x150(%rsp,%r15,4), %xmm0
movss 0x160(%rsp,%r15,4), %xmm1
movss 0x180(%rsp,%r15,4), %xmm2
movss 0x190(%rsp,%r15,4), %xmm3
movss 0x1a0(%rsp,%r15,4), %xmm4
movss 0x170(%rsp,%r15,4), %xmm5
movss %xmm5, 0x80(%r9,%rdi,4)
movss %xmm2, 0xc0(%r9,%rdi,4)
movss %xmm3, 0xd0(%r9,%rdi,4)
movss %xmm4, 0xe0(%r9,%rdi,4)
movss %xmm0, 0xf0(%r9,%rdi,4)
movss %xmm1, 0x100(%r9,%rdi,4)
movl %ecx, 0x110(%r9,%rdi,4)
movl %eax, 0x120(%r9,%rdi,4)
movq 0x8(%r11), %rax
movl (%rax), %eax
movl %eax, 0x130(%r9,%rdi,4)
movq 0x8(%r11), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r9,%rdi,4)
movq 0x1d0(%rsp), %r15
xorl %ecx, %ecx
movq 0x1d8(%rsp), %rbx
movaps 0x1e0(%rsp), %xmm4
movaps 0x2d0(%rsp), %xmm8
movaps 0x2c0(%rsp), %xmm9
movaps 0x2b0(%rsp), %xmm10
movaps 0x2a0(%rsp), %xmm11
movaps 0x290(%rsp), %xmm12
movaps 0x280(%rsp), %xmm13
movaps 0x270(%rsp), %xmm14
movaps 0x260(%rsp), %xmm15
movaps 0x1f0(%rsp), %xmm6
movss 0x80(%r9,%rdi,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
testq %rcx, %rcx
je 0x1a2c16
movq %rcx, (%rbx)
andl $0x0, 0x8(%rbx)
addq $0x10, %rbx
jmp 0x1a2c16
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x40(%rsp)
movd %r8d, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x60(%rsp)
movq 0x1c8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x1b0(%rsp)
movq %r10, 0xa0(%rsp)
movaps %xmm6, 0x90(%rsp)
movss 0x80(%r9,%rdi,4), %xmm0
movss %xmm0, 0x20(%rsp)
movss 0x170(%rsp,%r15,4), %xmm0
movss 0x150(%rsp,%r15,4), %xmm1
movss 0x160(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%rdi,4)
movq 0x8(%r11), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x180(%rsp,%r15,4), %xmm0
movss 0x190(%rsp,%r15,4), %xmm3
movss 0x1a0(%rsp,%r15,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2e0(%rsp)
movaps %xmm3, 0x2f0(%rsp)
movaps %xmm4, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm2, 0x320(%rsp)
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x330(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa %xmm0, 0x340(%rsp)
leaq 0x350(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x350(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x360(%rsp)
movdqa 0x1b0(%rsp), %xmm0
movdqa %xmm0, 0x80(%rsp)
leaq 0x80(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
movq 0x30(%rsp), %rdx
movq 0x18(%rdx), %rcx
movq %rcx, 0xd8(%rsp)
movq %rax, 0xe0(%rsp)
movq %r9, 0xe8(%rsp)
leaq 0x2e0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x40(%rdx), %rax
testq %rax, %rax
je 0x1a406a
leaq 0xd0(%rsp), %rdi
callq *%rax
movaps 0x90(%rsp), %xmm6
movq 0x10(%rsp), %rsi
movq 0xa0(%rsp), %r10
movq 0x12f8(%rsp), %r11
movq (%rsp), %rdi
movq 0x58(%rsp), %r9
movdqa 0x80(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1a41d0
movq 0x10(%r11), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1a40cb
testb $0x2, (%rcx)
jne 0x1a409b
movq 0x30(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1a40cb
leaq 0xd0(%rsp), %rdi
callq *%rax
movaps 0x90(%rsp), %xmm6
movq 0x10(%rsp), %rsi
movq 0xa0(%rsp), %r10
movq 0x12f8(%rsp), %r11
movq (%rsp), %rdi
movq 0x58(%rsp), %r9
movdqa 0x80(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d4792f(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d47d33(%rip), %xmm1 # 0x1eebe20
je 0x1a41e0
movq 0xe8(%rsp), %rax
movq 0xf0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1a41e0
pcmpeqd 0x1d47838(%rip), %xmm1 # 0x1eeba10
pxor 0x1d47c40(%rip), %xmm1 # 0x1eebe20
ptest 0x1d48937(%rip), %xmm1 # 0x1eecb20
jne 0x1a41fb
movd 0x20(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%rdi,4)
andl $0x0, 0x70(%rsp,%r15,4)
movss 0x80(%r9,%rdi,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm6, %xmm0
cmpleps %xmm1, %xmm0
andps 0x70(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1a425f
movaps 0x1d477c2(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1a4258
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %r15
testb %al, %al
jne 0x1a3ee7
jmp 0x1a3699
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x10(%rsp)
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x20(%rsp)
movq 0x1c8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x40(%rsp)
movaps %xmm6, 0x60(%rsp)
movss 0x80(%r9,%rdi,4), %xmm0
movss %xmm0, 0x30(%rsp)
movss 0x170(%rsp,%r15,4), %xmm0
movss 0x150(%rsp,%r15,4), %xmm1
movss 0x160(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%r9,%rdi,4)
movq 0x8(%r11), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x180(%rsp,%r15,4), %xmm0
movss 0x190(%rsp,%r15,4), %xmm3
movss 0x1a0(%rsp,%r15,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2e0(%rsp)
movaps %xmm3, 0x2f0(%rsp)
movaps %xmm4, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm2, 0x320(%rsp)
movaps 0x20(%rsp), %xmm0
movaps %xmm0, 0x330(%rsp)
movdqa 0x10(%rsp), %xmm0
movdqa %xmm0, 0x340(%rsp)
leaq 0x350(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x350(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x360(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa %xmm0, 0x80(%rsp)
leaq 0x80(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0xd8(%rsp)
movq %rax, 0xe0(%rsp)
movq %r9, 0xe8(%rsp)
leaq 0x2e0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
je 0x1a440f
leaq 0xd0(%rsp), %rdi
callq *%rax
movaps 0x60(%rsp), %xmm6
movq 0x12f8(%rsp), %r11
movq (%rsp), %rdi
movq 0x8(%rsp), %r8
movq 0x58(%rsp), %r9
movdqa 0x80(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1a4565
movq 0x10(%r11), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1a4460
testb $0x2, (%rcx)
jne 0x1a443b
testb $0x40, 0x3e(%rbx)
je 0x1a4460
leaq 0xd0(%rsp), %rdi
callq *%rax
movaps 0x60(%rsp), %xmm6
movq 0x12f8(%rsp), %r11
movq (%rsp), %rdi
movq 0x8(%rsp), %r8
movq 0x58(%rsp), %r9
movdqa 0x80(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d4759a(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4799e(%rip), %xmm1 # 0x1eebe20
je 0x1a4575
movq 0xe8(%rsp), %rax
movq 0xf0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1a4575
pcmpeqd 0x1d474a3(%rip), %xmm1 # 0x1eeba10
pxor 0x1d478ab(%rip), %xmm1 # 0x1eebe20
ptest 0x1d485a2(%rip), %xmm1 # 0x1eecb20
jne 0x1a4590
movd 0x30(%rsp), %xmm0
movd %xmm0, 0x80(%r9,%rdi,4)
andl $0x0, 0x70(%rsp,%r15,4)
movss 0x80(%r9,%rdi,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm6, %xmm0
cmpleps %xmm1, %xmm0
andps 0x70(%rsp), %xmm0
movaps %xmm0, 0x70(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1a45f4
movaps 0x1d4742d(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1a45ed
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %r15
testb %al, %al
jne 0x1a429f
jmp 0x1a3e14
andq $-0x10, %rax
movl 0x2c(%rax), %ecx
movq 0x30(%rax,%rcx), %rcx
movq %rax, (%r8)
jmp 0x1a3e7e
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a2f2c
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
jmp 0x1a2f49
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,1]
jmp 0x1a2f82
xorl %ecx, %ecx
movq 0x1d8(%rsp), %rbx
movaps 0x1e0(%rsp), %xmm4
movaps 0x2d0(%rsp), %xmm8
movaps 0x2c0(%rsp), %xmm9
movaps 0x2b0(%rsp), %xmm10
movaps 0x2a0(%rsp), %xmm11
movaps 0x290(%rsp), %xmm12
movaps 0x280(%rsp), %xmm13
movaps 0x270(%rsp), %xmm14
movaps 0x260(%rsp), %xmm15
movaps 0x1f0(%rsp), %xmm6
movq 0x8(%rsp), %r8
jmp 0x1a3e7e
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a36c9
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
jmp 0x1a36e5
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,1]
jmp 0x1a371a
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a34de
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a3c63
addq $0x12b8, %rsp # imm = 0x12B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, true, embree::sse42::SubdivPatch1IntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubdivPatch1PrecalculationsK<4, embree::sse42::GridSOAIntersectorK<4>::Precalculations>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb18, %rsp # imm = 0xB18
movq %r8, 0x178(%rsp)
movq %rcx, %r10
movq 0xb50(%rsp), %rax
leaq 0x378(%rsp), %r11
movq %rdx, -0x8(%r11)
movss (%rax,%rcx,4), %xmm3
movss 0x10(%rax,%rcx,4), %xmm4
movss 0x20(%rax,%rcx,4), %xmm5
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm6
movss 0x70(%rax,%rcx,4), %xmm7
movss 0x80(%rax,%rcx,4), %xmm8
movss 0x1d799a2(%rip), %xmm9 # 0x1f1ff10
movaps %xmm6, %xmm10
mulss %xmm9, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movaps %xmm7, %xmm11
mulss %xmm9, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm8, %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x1d79978(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulss %xmm0, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulss %xmm0, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r12
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %r12, 0x2d8(%rsp)
xorq $0x10, %r12
movq %rbx, %r13
xorq $0x10, %r13
movq %r15, %r14
xorq $0x10, %r14
movss 0xc0(%rax,%rcx,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
pushq $0x1
popq %rax
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f7e118(%rip), %rax # 0x2124730
movq %rax, 0x1b8(%rsp)
movaps %xmm3, 0x140(%rsp)
movaps %xmm4, 0x130(%rsp)
movaps %xmm5, 0x120(%rsp)
movaps %xmm6, 0x110(%rsp)
movaps %xmm7, 0x100(%rsp)
movaps %xmm8, 0xd0(%rsp)
movaps %xmm9, 0xc0(%rsp)
movaps %xmm10, 0xb0(%rsp)
movaps %xmm11, 0xa0(%rsp)
movaps %xmm12, 0x90(%rsp)
movaps %xmm13, 0x80(%rsp)
movq %r11, %rcx
leaq 0x370(%rsp), %rax
cmpq %rax, %r11
je 0x1a7e45
leaq -0x8(%rcx), %r11
movq %rcx, 0x2d0(%rsp)
movq -0x8(%rcx), %rax
testb $0x8, %al
jne 0x1a6713
movq 0x2d8(%rsp), %rcx
movaps 0x20(%rax,%rcx), %xmm0
subps %xmm3, %xmm0
mulps %xmm10, %xmm0
movaps 0x20(%rax,%rbx), %xmm1
subps %xmm4, %xmm1
mulps %xmm11, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%rax,%r15), %xmm1
subps %xmm5, %xmm1
mulps %xmm9, %xmm1
maxps %xmm12, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%rax,%r12), %xmm1
subps %xmm3, %xmm1
mulps %xmm6, %xmm1
movaps 0x20(%rax,%r13), %xmm2
subps %xmm4, %xmm2
mulps %xmm7, %xmm2
minps %xmm2, %xmm1
movaps 0x20(%rax,%r14), %xmm2
subps %xmm5, %xmm2
mulps %xmm8, %xmm2
minps %xmm13, %xmm2
minps %xmm2, %xmm1
cmpleps %xmm1, %xmm0
movmskps %xmm0, %ebp
testb $0x8, %al
jne 0x1a6747
testq %rbp, %rbp
je 0x1a674b
andq $-0x10, %rax
bsfq %rbp, %rdx
leaq -0x1(%rbp), %rdi
xorl %ecx, %ecx
movq (%rax,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
andq %rbp, %rdi
jne 0x1a6750
movq %rdx, %rax
testl %ecx, %ecx
je 0x1a66a2
jmp 0x1a6795
pushq $0x6
jmp 0x1a674d
pushq $0x4
popq %rcx
jmp 0x1a673d
movq %rdx, (%r11)
addq $0x8, %r11
movq %r11, (%rsp)
bsfq %rdi, %rsi
leaq -0x1(%rdi), %rdx
movq (%rax,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
andq %rdi, %rdx
je 0x1a678c
movq (%rsp), %rdi
movq %rsi, (%rdi)
addq $0x8, %rdi
movq %rdi, (%rsp)
bsfq %rdx, %rsi
leaq -0x1(%rdx), %rdi
jmp 0x1a6763
movq %rsi, %rax
movq (%rsp), %r11
jmp 0x1a673d
cmpl $0x6, %ecx
jne 0x1a781c
movl %eax, %ecx
andl $0xf, %ecx
cmpl $0x8, %ecx
jne 0x1a7899
movq 0x178(%rsp), %rcx
movq (%rcx), %rdx
movl 0xc(%rdx), %esi
movl 0x24(%rdx), %ecx
addq %rdx, %rcx
shrq $0x4, %rax
leaq (%rcx,%rax,4), %rdi
movups 0x2c(%rcx,%rax,4), %xmm2
movups 0x2c(%rdi,%rsi,4), %xmm0
cmpq $0x2, %rsi
je 0x1a78b2
movl 0x14(%rdx), %eax
leaq (%rdi,%rax,4), %rcx
addq $0x2c, %rcx
movups (%rcx), %xmm8
movups (%rcx,%rsi,4), %xmm5
cmpq $0x2, %rsi
je 0x1a78bf
leaq (%rdi,%rax,8), %rcx
addq $0x2c, %rcx
movaps %xmm2, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,2]
movups (%rcx), %xmm3
movups (%rcx,%rsi,4), %xmm12
cmpq $0x2, %rsi
je 0x1a78cd
movl 0x10(%rdx), %r8d
imulq $0xc, %rax, %rax
addq %rdi, %rax
addq $0x2c, %rax
movaps %xmm3, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
shufps $0x94, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,2]
movq %rax, 0x188(%rsp)
movq %rsi, 0x190(%rsp)
movq %r8, 0x198(%rsp)
movl 0x18(%rdx), %ecx
movl 0x1c(%rdx), %eax
movl %eax, 0x184(%rsp)
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movss (%r9,%r10,4), %xmm6
movss 0x10(%r9,%r10,4), %xmm7
movaps %xmm3, %xmm9
movss 0x20(%r9,%r10,4), %xmm3
movss 0x40(%r9,%r10,4), %xmm4
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm4, 0x20(%rsp)
subps %xmm6, %xmm1
subps %xmm7, %xmm11
movaps %xmm11, 0x1a0(%rsp)
subps %xmm3, %xmm13
subps %xmm6, %xmm2
movaps %xmm2, 0x30(%rsp)
subps %xmm7, %xmm8
movaps %xmm8, 0x10(%rsp)
subps %xmm3, %xmm9
movaps %xmm9, (%rsp)
subps %xmm6, %xmm0
subps %xmm7, %xmm5
subps %xmm3, %xmm12
movaps %xmm0, %xmm9
subps %xmm1, %xmm9
movaps %xmm5, %xmm2
subps %xmm11, %xmm2
movaps %xmm12, %xmm15
subps %xmm13, %xmm15
movaps %xmm0, %xmm3
addps %xmm1, %xmm3
movaps %xmm5, %xmm6
addps %xmm11, %xmm6
movaps %xmm12, %xmm7
addps %xmm13, %xmm7
movaps %xmm9, %xmm8
mulps %xmm6, %xmm8
mulps %xmm15, %xmm6
movaps %xmm2, %xmm10
mulps %xmm7, %xmm10
subps %xmm6, %xmm10
movaps %xmm9, 0x50(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm6
movaps %xmm15, 0xf0(%rsp)
mulps %xmm15, %xmm6
subps %xmm7, %xmm6
movss 0x50(%r9,%r10,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm2, 0x150(%rsp)
mulps %xmm2, %xmm3
subps %xmm3, %xmm8
movss 0x60(%r9,%r10,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm14, %xmm8
mulps %xmm4, %xmm6
addps %xmm8, %xmm6
movaps %xmm1, %xmm8
movaps 0x30(%rsp), %xmm15
subps %xmm15, %xmm8
mulps 0x20(%rsp), %xmm10
addps %xmm6, %xmm10
movaps %xmm11, %xmm9
movaps 0x10(%rsp), %xmm7
subps %xmm7, %xmm9
movaps %xmm13, %xmm2
movaps %xmm1, %xmm3
movaps (%rsp), %xmm1
subps %xmm1, %xmm2
movaps %xmm11, %xmm6
addps %xmm7, %xmm6
movaps %xmm13, 0x60(%rsp)
movaps %xmm13, %xmm7
addps %xmm1, %xmm7
movaps %xmm8, %xmm13
mulps %xmm6, %xmm13
mulps %xmm2, %xmm6
movaps %xmm9, %xmm11
mulps %xmm7, %xmm11
subps %xmm6, %xmm11
movaps %xmm3, 0xe0(%rsp)
addps %xmm15, %xmm3
mulps %xmm8, %xmm7
movaps %xmm3, %xmm6
movaps %xmm2, 0x2c0(%rsp)
mulps %xmm2, %xmm6
subps %xmm7, %xmm6
mulps %xmm9, %xmm3
subps %xmm3, %xmm13
mulps %xmm14, %xmm13
mulps %xmm4, %xmm6
addps %xmm13, %xmm6
movaps 0x20(%rsp), %xmm1
mulps %xmm1, %xmm11
addps %xmm6, %xmm11
movaps %xmm15, %xmm6
subps %xmm0, %xmm6
addps %xmm15, %xmm0
movaps 0x10(%rsp), %xmm2
movaps %xmm2, %xmm3
subps %xmm5, %xmm3
addps %xmm2, %xmm5
movaps (%rsp), %xmm2
movaps %xmm2, %xmm13
subps %xmm12, %xmm13
addps %xmm2, %xmm12
movaps %xmm1, %xmm15
movaps %xmm6, %xmm7
mulps %xmm5, %xmm7
mulps %xmm13, %xmm5
movaps %xmm3, %xmm1
mulps %xmm12, %xmm1
subps %xmm5, %xmm1
mulps %xmm6, %xmm12
movaps %xmm0, %xmm2
mulps %xmm13, %xmm2
subps %xmm12, %xmm2
mulps %xmm3, %xmm0
subps %xmm0, %xmm7
movaps %xmm14, (%rsp)
mulps %xmm14, %xmm7
mulps %xmm4, %xmm2
addps %xmm7, %xmm2
mulps %xmm15, %xmm1
addps %xmm2, %xmm1
movaps %xmm10, %xmm2
addps %xmm11, %xmm2
addps %xmm1, %xmm2
movaps %xmm10, %xmm0
minps %xmm11, %xmm0
minps %xmm1, %xmm0
movaps %xmm10, 0x2b0(%rsp)
maxps %xmm11, %xmm10
maxps %xmm1, %xmm10
movaps %xmm2, 0x2a0(%rsp)
movaps %xmm2, %xmm1
andps 0x1d45bf6(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d4b29f(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm10
xorps 0x1d45bf3(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm10
movmskps %xmm10, %edx
testl %edx, %edx
je 0x1a71d6
movaps %xmm4, %xmm14
movaps %xmm9, %xmm0
movaps 0xf0(%rsp), %xmm15
mulps %xmm15, %xmm0
movaps %xmm8, %xmm1
movaps 0x150(%rsp), %xmm5
mulps %xmm5, %xmm1
movaps %xmm3, %xmm7
movaps %xmm3, 0x10(%rsp)
movaps 0x2c0(%rsp), %xmm3
mulps %xmm3, %xmm7
movaps %xmm13, %xmm12
movaps %xmm13, 0x30(%rsp)
movaps %xmm6, %xmm13
mulps %xmm9, %xmm13
mulps %xmm3, %xmm5
subps %xmm0, %xmm5
movaps 0x50(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm9, %xmm4
mulps %xmm12, %xmm9
subps %xmm7, %xmm9
movaps 0x1d45b64(%rip), %xmm12 # 0x1eec6c0
andps %xmm12, %xmm0
andps %xmm12, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm5, %xmm9
movaps %xmm8, %xmm0
mulps 0x30(%rsp), %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm6
mulps %xmm8, %xmm15
subps %xmm2, %xmm15
subps %xmm0, %xmm6
andps %xmm12, %xmm2
andps %xmm12, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm15, %xmm6
mulps 0x10(%rsp), %xmm8
subps %xmm1, %xmm4
subps %xmm13, %xmm8
andps %xmm12, %xmm1
andps %xmm12, %xmm13
cmpltps %xmm13, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm8
movaps 0x20(%rsp), %xmm3
movaps (%rsp), %xmm0
mulps %xmm8, %xmm0
mulps %xmm6, %xmm14
addps %xmm0, %xmm14
mulps %xmm9, %xmm3
addps %xmm14, %xmm3
addps %xmm3, %xmm3
movaps 0x60(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x1a0(%rsp), %xmm1
mulps %xmm6, %xmm1
addps %xmm0, %xmm1
movaps 0xe0(%rsp), %xmm4
mulps %xmm9, %xmm4
addps %xmm1, %xmm4
rcpps %xmm3, %xmm1
movaps %xmm3, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d45dfa(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm4, %xmm4
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm4, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm10, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a71d6
cmpneqps 0x1d44dad(%rip), %xmm3 # 0x1eeba10
andps %xmm3, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a71d6
movaps 0x2b0(%rsp), %xmm2
movaps %xmm2, 0x1f0(%rsp)
movaps %xmm11, 0x200(%rsp)
movaps 0x2a0(%rsp), %xmm2
movaps %xmm2, 0x210(%rsp)
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movaps %xmm1, 0x230(%rsp)
movaps %xmm0, 0x260(%rsp)
movaps %xmm9, 0x270(%rsp)
movaps %xmm6, 0x280(%rsp)
movaps %xmm8, 0x290(%rsp)
movq 0xb58(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rax
movl 0x90(%r9,%r10,4), %edx
movq %rax, 0x20(%rsp)
testl %edx, 0x34(%rax)
je 0x1a71d6
movq 0xb58(%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a6d23
movq 0x20(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
je 0x1a779a
movq %rdi, 0x50(%rsp)
movq %r11, (%rsp)
movaps 0x210(%rsp), %xmm1
movaps %xmm1, %xmm0
andps 0x1d45982(%rip), %xmm0 # 0x1eec6c0
rcpps %xmm1, %xmm2
mulps %xmm2, %xmm1
movaps 0x1d45cc5(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm1, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
cmpnltps 0x1d4afe1(%rip), %xmm0 # 0x1ef1d40
andps %xmm3, %xmm0
movaps 0x1f0(%rsp), %xmm1
mulps %xmm0, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x240(%rsp)
mulps 0x200(%rsp), %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x250(%rsp)
movq 0x220(%rsp), %rdx
movq (%rdx), %rdi
movq 0x8(%rdx), %rdx
movups (%rdi), %xmm5
movups (%rdi,%rdx,4), %xmm3
cmpq $0x2, %rdx
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
je 0x1a7e2b
movaps %xmm5, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
movaps %xmm2, %xmm4
psrld $0x10, %xmm4
xorps %xmm14, %xmm14
pblendw $0xaa, %xmm14, %xmm2 # xmm2 = xmm2[0],xmm14[1],xmm2[2],xmm14[3],xmm2[4],xmm14[5],xmm2[6],xmm14[7]
cvtdq2ps %xmm2, %xmm2
movaps 0x1d75b63(%rip), %xmm15 # 0x1f1c970
mulps %xmm15, %xmm2
cvtdq2ps %xmm4, %xmm4
mulps %xmm15, %xmm4
movaps %xmm5, %xmm6
psrld $0x10, %xmm6
pblendw $0xaa, %xmm14, %xmm5 # xmm5 = xmm5[0],xmm14[1],xmm5[2],xmm14[3],xmm5[4],xmm14[5],xmm5[6],xmm14[7]
cvtdq2ps %xmm5, %xmm5
mulps %xmm15, %xmm5
cvtdq2ps %xmm6, %xmm6
mulps %xmm15, %xmm6
movaps %xmm3, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm14, %xmm3 # xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4],xmm14[5],xmm3[6],xmm14[7]
cvtdq2ps %xmm3, %xmm3
mulps %xmm15, %xmm3
cvtdq2ps %xmm7, %xmm7
mulps %xmm15, %xmm7
mulps %xmm1, %xmm5
mulps %xmm1, %xmm6
mulps %xmm0, %xmm3
addps %xmm5, %xmm3
mulps %xmm0, %xmm7
addps %xmm6, %xmm7
movaps 0x1d45ba5(%rip), %xmm5 # 0x1eeca10
subps %xmm1, %xmm5
subps %xmm0, %xmm5
mulps %xmm5, %xmm2
addps %xmm3, %xmm2
mulps %xmm4, %xmm5
addps %xmm7, %xmm5
movaps %xmm2, 0x240(%rsp)
movaps %xmm5, 0x250(%rsp)
movaps 0x230(%rsp), %xmm0
movmskps %xmm0, %eax
bsfq %rax, %rdx
movq %rax, 0x30(%rsp)
testl %eax, %eax
sete 0xf0(%rsp)
movq (%rsp), %r11
movq 0x50(%rsp), %rdi
je 0x1a720c
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x150(%rsp)
movd 0x184(%rsp), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0xe0(%rsp)
movq 0x1b8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x1a0(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x40(%rsp)
movq %rsi, 0x1b0(%rsp)
movq %r8, 0x78(%rsp)
movss 0x80(%r9,%r10,4), %xmm5
movss 0x260(%rsp,%rdx,4), %xmm0
movss 0x240(%rsp,%rdx,4), %xmm1
movss 0x250(%rsp,%rdx,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0xb58(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x270(%rsp,%rdx,4), %xmm0
movss 0x280(%rsp,%rdx,4), %xmm3
movq %rdx, 0x10(%rsp)
movss 0x290(%rsp,%rdx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2e0(%rsp)
movaps %xmm3, 0x2f0(%rsp)
movaps %xmm4, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm2, 0x320(%rsp)
movaps 0xe0(%rsp), %xmm0
movaps %xmm0, 0x330(%rsp)
movdqa 0x150(%rsp), %xmm0
movdqa %xmm0, 0x340(%rsp)
leaq 0x350(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x350(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x360(%rsp)
movdqa 0x1a0(%rsp), %xmm0
movdqa %xmm0, 0x160(%rsp)
leaq 0x160(%rsp), %rcx
movq %rcx, 0x1c0(%rsp)
movq 0x20(%rsp), %rdx
movq 0x18(%rdx), %rcx
movq %rcx, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
movq %r9, 0x1d8(%rsp)
leaq 0x2e0(%rsp), %rax
movq %rax, 0x1e0(%rsp)
movl $0x4, 0x1e8(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
movss %xmm5, 0x60(%rsp)
je 0x1a7099
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x60(%rsp), %xmm5
movq 0x50(%rsp), %rdi
movq 0x78(%rsp), %r8
movq 0x1b0(%rsp), %rsi
movq 0x40(%rsp), %r10
movq 0x48(%rsp), %r9
movdqa 0x160(%rsp), %xmm1
ptest %xmm1, %xmm1
movq 0x10(%rsp), %rdx
je 0x1a714e
movq 0xb58(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
movq (%rsp), %r11
je 0x1a7110
testb $0x2, (%rcx)
jne 0x1a70db
movq 0x20(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1a7110
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x60(%rsp), %xmm5
movq 0x10(%rsp), %rdx
movq 0x50(%rsp), %rdi
movq 0x78(%rsp), %r8
movq 0x1b0(%rsp), %rsi
movq (%rsp), %r11
movq 0x40(%rsp), %r10
movq 0x48(%rsp), %r9
movdqa 0x160(%rsp), %xmm0
pcmpeqd 0x1d448ef(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d44cf3(%rip), %xmm1 # 0x1eebe20
movq 0x1d8(%rsp), %rax
movaps 0x1d448c4(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1a7162
pcmpeqd 0x1d448ba(%rip), %xmm1 # 0x1eeba10
pxor 0x1d44cc2(%rip), %xmm1 # 0x1eebe20
movq (%rsp), %r11
ptest 0x1d459b5(%rip), %xmm1 # 0x1eecb20
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
jne 0x1a7d87
movss %xmm5, 0x80(%r9,%r10,4)
movq 0x30(%rsp), %rax
btcq %rdx, %rax
bsfq %rax, %rdx
movq %rax, 0x30(%rsp)
testq %rax, %rax
sete 0xf0(%rsp)
jne 0x1a6f0d
jmp 0x1a720c
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
cmpl $0x3, %r8d
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
jb 0x1a7890
movq %r8, 0x78(%rsp)
leaq (%rdi,%rsi,4), %rax
addq $0x2c, %rax
movups (%rax), %xmm2
movups (%rax,%rsi,4), %xmm0
movq %rsi, %r8
cmpq $0x2, %rsi
je 0x1a7dc6
movq 0x178(%rsp), %rcx
movq (%rcx), %rdx
movl 0x14(%rdx), %ecx
leaq (%rax,%rcx,4), %rsi
movups (%rsi), %xmm8
movups (%rsi,%r8,4), %xmm5
cmpq $0x2, %r8
je 0x1a7dd3
leaq (%rax,%rcx,8), %rsi
movaps %xmm2, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,2]
movups (%rsi), %xmm3
movups (%rsi,%r8,4), %xmm12
cmpq $0x2, %r8
je 0x1a7de1
imulq $0xc, %rcx, %rcx
addq %rcx, %rax
movaps %xmm3, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
shufps $0x94, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,2]
movq %rax, 0x188(%rsp)
movq %r8, 0x190(%rsp)
movq 0x78(%rsp), %rax
movq %rax, 0x198(%rsp)
movl 0x18(%rdx), %ecx
movl 0x1c(%rdx), %eax
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movss (%r9,%r10,4), %xmm6
movss 0x10(%r9,%r10,4), %xmm7
movaps %xmm3, %xmm9
movss 0x20(%r9,%r10,4), %xmm3
movss 0x40(%r9,%r10,4), %xmm4
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm4, 0x20(%rsp)
subps %xmm6, %xmm1
subps %xmm7, %xmm11
movaps %xmm11, 0x1a0(%rsp)
subps %xmm3, %xmm13
subps %xmm6, %xmm2
movaps %xmm2, 0x30(%rsp)
subps %xmm7, %xmm8
movaps %xmm8, 0x10(%rsp)
subps %xmm3, %xmm9
movaps %xmm9, (%rsp)
subps %xmm6, %xmm0
subps %xmm7, %xmm5
subps %xmm3, %xmm12
movaps %xmm0, %xmm9
subps %xmm1, %xmm9
movaps %xmm5, %xmm2
subps %xmm11, %xmm2
movaps %xmm12, %xmm15
subps %xmm13, %xmm15
movaps %xmm0, %xmm3
addps %xmm1, %xmm3
movaps %xmm5, %xmm6
addps %xmm11, %xmm6
movaps %xmm12, %xmm7
addps %xmm13, %xmm7
movaps %xmm9, %xmm8
mulps %xmm6, %xmm8
mulps %xmm15, %xmm6
movaps %xmm2, %xmm10
mulps %xmm7, %xmm10
subps %xmm6, %xmm10
movaps %xmm9, 0x50(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm6
movaps %xmm15, 0xf0(%rsp)
mulps %xmm15, %xmm6
subps %xmm7, %xmm6
movss 0x50(%r9,%r10,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm2, 0x150(%rsp)
mulps %xmm2, %xmm3
subps %xmm3, %xmm8
movss 0x60(%r9,%r10,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm14, %xmm8
mulps %xmm4, %xmm6
addps %xmm8, %xmm6
movaps %xmm1, %xmm8
movaps 0x30(%rsp), %xmm15
subps %xmm15, %xmm8
mulps 0x20(%rsp), %xmm10
addps %xmm6, %xmm10
movaps %xmm11, %xmm9
movaps 0x10(%rsp), %xmm7
subps %xmm7, %xmm9
movaps %xmm13, %xmm2
movaps %xmm1, %xmm3
movaps (%rsp), %xmm1
subps %xmm1, %xmm2
movaps %xmm11, %xmm6
addps %xmm7, %xmm6
movaps %xmm13, 0x60(%rsp)
movaps %xmm13, %xmm7
addps %xmm1, %xmm7
movaps %xmm8, %xmm13
mulps %xmm6, %xmm13
mulps %xmm2, %xmm6
movaps %xmm9, %xmm11
mulps %xmm7, %xmm11
subps %xmm6, %xmm11
movaps %xmm3, 0xe0(%rsp)
addps %xmm15, %xmm3
mulps %xmm8, %xmm7
movaps %xmm3, %xmm6
movaps %xmm2, 0x2c0(%rsp)
mulps %xmm2, %xmm6
subps %xmm7, %xmm6
mulps %xmm9, %xmm3
subps %xmm3, %xmm13
mulps %xmm14, %xmm13
mulps %xmm4, %xmm6
addps %xmm13, %xmm6
movaps 0x20(%rsp), %xmm1
mulps %xmm1, %xmm11
addps %xmm6, %xmm11
movaps %xmm15, %xmm6
subps %xmm0, %xmm6
addps %xmm15, %xmm0
movaps 0x10(%rsp), %xmm2
movaps %xmm2, %xmm3
subps %xmm5, %xmm3
addps %xmm2, %xmm5
movaps (%rsp), %xmm2
movaps %xmm2, %xmm13
subps %xmm12, %xmm13
addps %xmm2, %xmm12
movaps %xmm1, %xmm15
movaps %xmm6, %xmm7
mulps %xmm5, %xmm7
mulps %xmm13, %xmm5
movaps %xmm3, %xmm1
mulps %xmm12, %xmm1
subps %xmm5, %xmm1
mulps %xmm6, %xmm12
movaps %xmm0, %xmm2
mulps %xmm13, %xmm2
subps %xmm12, %xmm2
mulps %xmm3, %xmm0
subps %xmm0, %xmm7
movaps %xmm14, (%rsp)
mulps %xmm14, %xmm7
mulps %xmm4, %xmm2
addps %xmm7, %xmm2
mulps %xmm15, %xmm1
addps %xmm2, %xmm1
movaps %xmm10, %xmm2
addps %xmm11, %xmm2
addps %xmm1, %xmm2
movaps %xmm10, %xmm0
minps %xmm11, %xmm0
minps %xmm1, %xmm0
movaps %xmm10, 0x2b0(%rsp)
maxps %xmm11, %xmm10
maxps %xmm1, %xmm10
movaps %xmm2, 0x2a0(%rsp)
movaps %xmm2, %xmm1
andps 0x1d45179(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d4a822(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm10
xorps 0x1d45176(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm10
movmskps %xmm10, %edx
testl %edx, %edx
je 0x1a7832
movaps %xmm4, %xmm14
movaps %xmm9, %xmm0
movaps 0xf0(%rsp), %xmm15
mulps %xmm15, %xmm0
movaps %xmm8, %xmm1
movaps 0x150(%rsp), %xmm5
mulps %xmm5, %xmm1
movaps %xmm3, %xmm7
movaps %xmm3, 0x10(%rsp)
movaps 0x2c0(%rsp), %xmm3
mulps %xmm3, %xmm7
movaps %xmm13, %xmm12
movaps %xmm13, 0x30(%rsp)
movaps %xmm6, %xmm13
mulps %xmm9, %xmm13
mulps %xmm3, %xmm5
subps %xmm0, %xmm5
movaps 0x50(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm9, %xmm4
mulps %xmm12, %xmm9
subps %xmm7, %xmm9
movaps 0x1d450e7(%rip), %xmm12 # 0x1eec6c0
andps %xmm12, %xmm0
andps %xmm12, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm5, %xmm9
movaps %xmm8, %xmm0
mulps 0x30(%rsp), %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm6
mulps %xmm8, %xmm15
subps %xmm2, %xmm15
subps %xmm0, %xmm6
andps %xmm12, %xmm2
andps %xmm12, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm15, %xmm6
mulps 0x10(%rsp), %xmm8
subps %xmm1, %xmm4
subps %xmm13, %xmm8
andps %xmm12, %xmm1
andps %xmm12, %xmm13
cmpltps %xmm13, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm8
movaps 0x20(%rsp), %xmm3
movaps (%rsp), %xmm0
mulps %xmm8, %xmm0
mulps %xmm6, %xmm14
addps %xmm0, %xmm14
mulps %xmm9, %xmm3
addps %xmm14, %xmm3
addps %xmm3, %xmm3
movaps 0x60(%rsp), %xmm0
mulps %xmm8, %xmm0
movaps 0x1a0(%rsp), %xmm1
mulps %xmm6, %xmm1
addps %xmm0, %xmm1
movaps 0xe0(%rsp), %xmm4
mulps %xmm9, %xmm4
addps %xmm1, %xmm4
rcpps %xmm3, %xmm1
movaps %xmm3, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d4537d(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm4, %xmm4
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm4, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm10, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a7832
cmpneqps 0x1d44330(%rip), %xmm3 # 0x1eeba10
andps %xmm3, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a7832
movaps 0x2b0(%rsp), %xmm2
movaps %xmm2, 0x1f0(%rsp)
movaps %xmm11, 0x200(%rsp)
movaps 0x2a0(%rsp), %xmm2
movaps %xmm2, 0x210(%rsp)
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movaps %xmm1, 0x230(%rsp)
movaps %xmm0, 0x260(%rsp)
movaps %xmm9, 0x270(%rsp)
movaps %xmm6, 0x280(%rsp)
movaps %xmm8, 0x290(%rsp)
movq 0xb58(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rdi
movl 0x90(%r9,%r10,4), %edx
testl %edx, 0x34(%rdi)
je 0x1a7832
movq 0xb58(%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a78e7
cmpq $0x0, 0x48(%rdi)
jne 0x1a78e7
movb $0x1, %al
xorl %edx, %edx
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
testb %al, %al
je 0x1a7811
movl $0xff800000, 0x80(%r9,%r10,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
jmp 0x1a781c
xorl %ecx, %ecx
testq %rdx, %rdx
jne 0x1a78db
testb $0x3, %cl
movq 0x2d0(%rsp), %rcx
je 0x1a667e
jmp 0x1a7e45
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
xorl %edx, %edx
xorl %eax, %eax
jmp 0x1a77fc
andq $-0x10, %rax
movl 0x2c(%rax), %ecx
movq 0x30(%rax,%rcx), %rdx
movq 0x178(%rsp), %rcx
movq %rax, (%rcx)
jmp 0x1a7892
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a67dc
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
jmp 0x1a67f9
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,1]
jmp 0x1a6832
movq %rdx, (%r11)
addq $0x8, %r11
jmp 0x1a781c
movq %r11, (%rsp)
movaps 0x210(%rsp), %xmm1
movaps %xmm1, %xmm0
andps 0x1d44dc3(%rip), %xmm0 # 0x1eec6c0
rcpps %xmm1, %xmm2
mulps %xmm2, %xmm1
movaps 0x1d45106(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm1, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
cmpnltps 0x1d4a422(%rip), %xmm0 # 0x1ef1d40
andps %xmm3, %xmm0
movaps 0x1f0(%rsp), %xmm1
mulps %xmm0, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x240(%rsp)
mulps 0x200(%rsp), %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x250(%rsp)
movq 0x220(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm5
movups (%rsi,%rdx,4), %xmm3
cmpq $0x2, %rdx
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
je 0x1a7e38
movaps %xmm5, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
movaps %xmm2, %xmm4
psrld $0x10, %xmm4
xorps %xmm14, %xmm14
pblendw $0xaa, %xmm14, %xmm2 # xmm2 = xmm2[0],xmm14[1],xmm2[2],xmm14[3],xmm2[4],xmm14[5],xmm2[6],xmm14[7]
cvtdq2ps %xmm2, %xmm2
movaps 0x1d74fa4(%rip), %xmm15 # 0x1f1c970
mulps %xmm15, %xmm2
cvtdq2ps %xmm4, %xmm4
mulps %xmm15, %xmm4
movaps %xmm5, %xmm6
psrld $0x10, %xmm6
pblendw $0xaa, %xmm14, %xmm5 # xmm5 = xmm5[0],xmm14[1],xmm5[2],xmm14[3],xmm5[4],xmm14[5],xmm5[6],xmm14[7]
cvtdq2ps %xmm5, %xmm5
mulps %xmm15, %xmm5
cvtdq2ps %xmm6, %xmm6
mulps %xmm15, %xmm6
movaps %xmm3, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm14, %xmm3 # xmm3 = xmm3[0],xmm14[1],xmm3[2],xmm14[3],xmm3[4],xmm14[5],xmm3[6],xmm14[7]
cvtdq2ps %xmm3, %xmm3
mulps %xmm15, %xmm3
cvtdq2ps %xmm7, %xmm7
mulps %xmm15, %xmm7
mulps %xmm1, %xmm5
mulps %xmm1, %xmm6
mulps %xmm0, %xmm3
addps %xmm5, %xmm3
mulps %xmm0, %xmm7
addps %xmm6, %xmm7
movaps 0x1d44fe6(%rip), %xmm5 # 0x1eeca10
subps %xmm1, %xmm5
subps %xmm0, %xmm5
mulps %xmm5, %xmm2
addps %xmm3, %xmm2
mulps %xmm4, %xmm5
addps %xmm7, %xmm5
movaps %xmm2, 0x240(%rsp)
movaps %xmm5, 0x250(%rsp)
movaps 0x230(%rsp), %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %rsi
testl %edx, %edx
sete 0x60(%rsp)
movq (%rsp), %r11
je 0x1a7d5a
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x50(%rsp)
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0xf0(%rsp)
movq 0x1b8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x150(%rsp)
movq %rdi, 0xe0(%rsp)
movq %r9, 0x48(%rsp)
movq %r10, 0x40(%rsp)
movss 0x80(%r9,%r10,4), %xmm5
movss 0x260(%rsp,%rsi,4), %xmm0
movss 0x240(%rsp,%rsi,4), %xmm1
movss 0x250(%rsp,%rsi,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0xb58(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x270(%rsp,%rsi,4), %xmm0
movss 0x280(%rsp,%rsi,4), %xmm3
movss 0x290(%rsp,%rsi,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2e0(%rsp)
movaps %xmm3, 0x2f0(%rsp)
movaps %xmm4, 0x300(%rsp)
movaps %xmm1, 0x310(%rsp)
movaps %xmm2, 0x320(%rsp)
movaps 0xf0(%rsp), %xmm0
movaps %xmm0, 0x330(%rsp)
movdqa 0x50(%rsp), %xmm0
movdqa %xmm0, 0x340(%rsp)
leaq 0x350(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x350(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x360(%rsp)
movdqa 0x150(%rsp), %xmm0
movdqa %xmm0, 0x160(%rsp)
leaq 0x160(%rsp), %rcx
movq %rcx, 0x1c0(%rsp)
movq 0x18(%rdi), %rcx
movq %rcx, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
movq %r9, 0x1d8(%rsp)
leaq 0x2e0(%rsp), %rax
movq %rax, 0x1e0(%rsp)
movl $0x4, 0x1e8(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
movq %rdx, 0x20(%rsp)
movq %rsi, 0x10(%rsp)
movss %xmm5, 0x30(%rsp)
je 0x1a7c3b
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movq 0x10(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0xe0(%rsp), %rdi
movq 0x40(%rsp), %r10
movq 0x48(%rsp), %r9
movdqa 0x160(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1a7ce1
movq 0xb58(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
movq (%rsp), %r11
je 0x1a7ca3
testb $0x2, (%rcx)
jne 0x1a7c73
testb $0x40, 0x3e(%rdi)
je 0x1a7ca3
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movq 0x10(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0xe0(%rsp), %rdi
movq (%rsp), %r11
movq 0x40(%rsp), %r10
movq 0x48(%rsp), %r9
movdqa 0x160(%rsp), %xmm0
pcmpeqd 0x1d43d5c(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d44160(%rip), %xmm1 # 0x1eebe20
movq 0x1d8(%rsp), %rax
movaps 0x1d43d31(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1a7cf5
pcmpeqd 0x1d43d27(%rip), %xmm1 # 0x1eeba10
pxor 0x1d4412f(%rip), %xmm1 # 0x1eebe20
movq (%rsp), %r11
ptest 0x1d44e22(%rip), %xmm1 # 0x1eecb20
movaps 0xd0(%rsp), %xmm8
movaps 0xc0(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm10
movaps 0xa0(%rsp), %xmm11
movaps 0x90(%rsp), %xmm12
movaps 0x80(%rsp), %xmm13
jne 0x1a7def
movss %xmm5, 0x80(%r9,%r10,4)
btcq %rsi, %rdx
bsfq %rdx, %rsi
testq %rdx, %rdx
sete 0x60(%rsp)
jne 0x1a7ab2
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
jmp 0x1a7890
testb $0x1, 0xf0(%rsp)
jne 0x1a720c
movb $0x1, %al
xorl %edx, %edx
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
jmp 0x1a77fc
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a725f
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
jmp 0x1a7284
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,1]
jmp 0x1a72b9
testb $0x1, 0x60(%rsp)
movaps 0x140(%rsp), %xmm3
movaps 0x130(%rsp), %xmm4
movaps 0x120(%rsp), %xmm5
movaps 0x110(%rsp), %xmm6
movaps 0x100(%rsp), %xmm7
jne 0x1a7890
movb $0x1, %al
xorl %edx, %edx
jmp 0x1a77fc
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1a6de1
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1a79a0
leaq 0x370(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xb18, %rsp # imm = 0xB18
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::sse42::SubdivPatch1MBIntersectorK<4>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubdivPatch1PrecalculationsK<4, embree::sse42::GridSOAIntersectorK<4>::Precalculations>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12f8, %rsp # imm = 0x12F8
movq %r9, %rbx
movq %r8, 0xf0(%rsp)
movq %rcx, %r15
leaq 0x3c0(%rsp), %r11
movq %rdx, -0x10(%r11)
andl $0x0, -0x8(%r11)
movq 0x1330(%rsp), %rax
movss (%rax,%rcx,4), %xmm8
movss 0x10(%rax,%rcx,4), %xmm9
movss 0x20(%rax,%rcx,4), %xmm10
movss 0x60(%rax,%rcx,4), %xmm11
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %rdi
movslq 0xa0(%rax,%rcx,4), %r10
movslq 0xb0(%rax,%rcx,4), %rbp
movq %rdi, %r12
xorq $0x10, %r12
movq %r10, %r14
xorq $0x10, %r14
movq %rbp, 0x238(%rsp)
xorq $0x10, %rbp
movss 0xc0(%rax,%rcx,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
pushq $0x1
popq %rax
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f7c7e8(%rip), %rax # 0x2124730
movq %rax, 0x208(%rsp)
movaps %xmm8, 0x2a0(%rsp)
movaps %xmm9, 0x290(%rsp)
movaps %xmm10, 0x280(%rsp)
movaps %xmm11, 0x270(%rsp)
movaps %xmm12, 0x260(%rsp)
movaps %xmm13, 0x250(%rsp)
movq %rdi, 0x68(%rsp)
movq %r10, 0x8(%rsp)
movaps %xmm14, 0x240(%rsp)
leaq 0x3b0(%rsp), %rax
cmpq %rax, %r11
je 0x1a9f14
movss -0x8(%r11), %xmm0
addq $-0x10, %r11
ucomiss 0x80(%rbx,%r15,4), %xmm0
ja 0x1a7f99
movq (%r11), %rax
testb $0x8, %al
jne 0x1a80b5
movq %rax, %rcx
andq $-0x10, %rcx
movss 0x70(%rbx,%r15,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rcx,%rdi), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rcx,%rdi), %xmm3
subps %xmm8, %xmm3
mulps %xmm11, %xmm3
movaps %xmm14, %xmm2
movaps 0x80(%rcx,%r10), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rcx,%r10), %xmm4
maxps %xmm3, %xmm2
subps %xmm9, %xmm4
mulps %xmm12, %xmm4
movq 0x238(%rsp), %rdx
movaps 0x80(%rcx,%rdx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rcx,%rdx), %xmm3
subps %xmm10, %xmm3
mulps %xmm13, %xmm3
maxps %xmm3, %xmm4
maxps %xmm4, %xmm2
movaps 0x80(%rcx,%r12), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rcx,%r12), %xmm4
subps %xmm8, %xmm4
mulps %xmm11, %xmm4
movaps %xmm1, %xmm3
minps %xmm4, %xmm3
movaps 0x80(%rcx,%r14), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rcx,%r14), %xmm4
subps %xmm9, %xmm4
movaps 0x80(%rcx,%rbp), %xmm5
mulps %xmm0, %xmm5
addps 0x20(%rcx,%rbp), %xmm5
mulps %xmm12, %xmm4
subps %xmm10, %xmm5
mulps %xmm13, %xmm5
minps %xmm5, %xmm4
minps %xmm4, %xmm3
movl %eax, %edx
andl $0x7, %edx
cmpl $0x6, %edx
je 0x1a80fe
movaps %xmm2, %xmm0
cmpleps %xmm3, %xmm0
pslld $0x1f, %xmm0
movmskps %xmm0, %r13d
movaps %xmm2, 0x140(%rsp)
testb $0x8, %al
jne 0x1a80fa
testq %r13, %r13
je 0x1a8120
andq $-0x10, %rax
bsfq %r13, %rdx
leaq -0x1(%r13), %r9
xorl %ecx, %ecx
movq (%rax,%rdx,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
andq %r13, %r9
jne 0x1a8125
movq %rsi, %rax
testl %ecx, %ecx
je 0x1a7fc2
jmp 0x1a8323
pushq $0x6
jmp 0x1a8122
movaps %xmm2, %xmm4
cmpleps %xmm3, %xmm4
movaps 0xe0(%rcx), %xmm3
cmpleps %xmm0, %xmm3
cmpltps 0xf0(%rcx), %xmm0
andps %xmm3, %xmm0
andps %xmm4, %xmm0
jmp 0x1a80a4
pushq $0x4
popq %rcx
jmp 0x1a80ed
movq %r11, 0x28(%rsp)
movl 0x140(%rsp,%rdx,4), %r11d
bsfq %r9, %r10
leaq -0x1(%r9), %rdx
movq (%rax,%r10,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x140(%rsp,%r10,4), %r10d
andq %r9, %rdx
jne 0x1a8199
movq 0x28(%rsp), %rdx
leaq 0x10(%rdx), %rax
cmpl %r10d, %r11d
jae 0x1a817d
movq %rdi, (%rdx)
movl %r10d, 0x8(%rdx)
movq %rax, %r11
movq %rsi, %rax
jmp 0x1a818a
movq %rsi, (%rdx)
movl %r11d, 0x8(%rdx)
movq %rax, %r11
movq %rdi, %rax
movq 0x68(%rsp), %rdi
movq 0x8(%rsp), %r10
jmp 0x1a80ed
movq %rsi, %xmm2
movd %r11d, %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movq %rdi, %xmm4
movd %r10d, %xmm0
punpcklqdq %xmm0, %xmm4 # xmm4 = xmm4[0],xmm0[0]
bsfq %rdx, %rdi
leaq -0x1(%rdx), %rsi
movq (%rax,%rdi,8), %r9
prefetcht0 (%r9)
prefetcht0 0x40(%r9)
prefetcht0 0x80(%r9)
prefetcht0 0xc0(%r9)
movq %r9, %xmm3
movd 0x140(%rsp,%rdi,4), %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
movdqa %xmm4, %xmm0
pcmpgtd %xmm2, %xmm0
andq %rdx, %rsi
jne 0x1a825a
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
blendvps %xmm0, %xmm4, %xmm2
movdqa %xmm3, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm3, %xmm5
movaps %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movq 0x28(%rsp), %r11
movaps %xmm2, (%r11)
movaps %xmm3, 0x10(%r11)
movq %xmm4, %rax
addq $0x20, %r11
jmp 0x1a818a
bsfq %rsi, %rdx
movq (%rax,%rdx,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
movq %rax, %xmm6
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm4, %xmm5
blendvps %xmm0, %xmm2, %xmm5
movd 0x140(%rsp,%rdx,4), %xmm7
blendvps %xmm0, %xmm4, %xmm2
punpcklqdq %xmm7, %xmm6 # xmm6 = xmm6[0],xmm7[0]
movdqa %xmm6, %xmm0
pcmpgtd %xmm3, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm6, %xmm4
blendvps %xmm0, %xmm3, %xmm4
blendvps %xmm0, %xmm6, %xmm3
movaps %xmm3, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm6
blendvps %xmm0, %xmm2, %xmm6
blendvps %xmm0, %xmm3, %xmm2
movaps %xmm4, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm5
movaps %xmm6, %xmm0
pcmpgtd %xmm5, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm6, %xmm4
blendvps %xmm0, %xmm5, %xmm4
blendvps %xmm0, %xmm6, %xmm5
movq 0x28(%rsp), %r11
movaps %xmm2, (%r11)
movaps %xmm5, 0x10(%r11)
movaps %xmm4, 0x20(%r11)
movq %xmm3, %rax
addq $0x30, %r11
jmp 0x1a818a
cmpl $0x6, %ecx
jne 0x1a7f99
movl %eax, %ecx
andl $0xf, %ecx
cmpl $0x8, %ecx
jne 0x1a9e3c
movq 0xf0(%rsp), %rcx
movq (%rcx), %rcx
movl 0x8(%rcx), %edx
decl %edx
cvtsi2ss %rdx, %xmm0
movss 0x70(%rbx,%r15,4), %xmm15
mulss %xmm0, %xmm15
roundss $0x9, %xmm15, %xmm1
addss 0x1d48662(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm3, %xmm3
maxss %xmm1, %xmm3
cvttss2si %xmm3, %edx
movl 0xc(%rcx), %edi
movq %rdi, 0x50(%rsp)
movslq %edx, %rdx
movl 0x24(%rcx), %r10d
movl 0x28(%rcx), %esi
imulq %rsi, %rdx
addq %rcx, %r10
addq %rdx, %r10
shrq $0x4, %rax
leaq (%r10,%rax,4), %r8
movups 0x2c(%r10,%rax,4), %xmm1
movq %r8, 0x90(%rsp)
movups 0x2c(%r8,%rdi,4), %xmm0
cmpq $0x2, %rdi
je 0x1a9e56
movl 0x14(%rcx), %edx
movq 0x90(%rsp), %rdi
leaq (%rdi,%rdx,4), %r9
addq $0x2c, %r9
movups (%r9), %xmm2
movq 0x50(%rsp), %rdi
movups (%r9,%rdi,4), %xmm10
movq %r11, %r8
cmpq $0x2, %rdi
je 0x1a9e63
movq 0x90(%rsp), %rdi
leaq (%rdi,%rdx,8), %rdi
addq $0x2c, %rdi
movups (%rdi), %xmm11
movq 0x50(%rsp), %r11
movups (%rdi,%r11,4), %xmm7
cmpq $0x2, %r11
je 0x1a9e71
shrl $0x2, %esi
leaq (%r10,%rax,4), %rax
addq $0x2c, %rax
shll $0x2, %esi
leaq (%rax,%rsi), %r10
movups (%rax,%rsi), %xmm5
movq 0x50(%rsp), %rax
movups (%r10,%rax,4), %xmm4
cmpq $0x2, %rax
movq %r8, %r11
je 0x1a9e7f
movq 0x8(%rsp), %r10
addq %rsi, %r9
movups (%r9), %xmm8
movq 0x50(%rsp), %rax
movups (%r9,%rax,4), %xmm6
movq %rax, %r9
cmpq $0x2, %rax
je 0x1a9e8c
subss %xmm3, %xmm15
movaps %xmm1, %xmm3
unpcklps %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
shufps $0xa5, %xmm1, %xmm1 # xmm1 = xmm1[1,1,2,2]
movaps %xmm1, 0x30(%rsp)
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm2, %xmm13
unpcklps %xmm10, %xmm13 # xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
movaps %xmm2, 0x40(%rsp)
shufps $0x94, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,2]
movaps %xmm10, 0x1f0(%rsp)
addq %rsi, %rdi
movaps %xmm11, %xmm14
unpcklps %xmm7, %xmm14 # xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
shufps $0xa5, %xmm11, %xmm11 # xmm11 = xmm11[1,1,2,2]
movaps %xmm11, 0x10(%rsp)
shufps $0x94, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,2]
movaps %xmm5, %xmm11
unpcklps %xmm4, %xmm11 # xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm4, %xmm9
movaps %xmm8, %xmm12
unpcklps %xmm6, %xmm12 # xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,2]
movups (%rdi), %xmm10
movups (%rdi,%r9,4), %xmm4
cmpq $0x2, %r9
movaps %xmm13, 0x80(%rsp)
movaps %xmm14, 0x70(%rsp)
movaps %xmm0, 0xb0(%rsp)
movaps %xmm7, 0xa0(%rsp)
je 0x1a9e9a
movq 0x1338(%rsp), %rdi
imulq $0xc, %rdx, %rax
movl 0x10(%rcx), %r8d
movq 0x90(%rsp), %rdx
addq %rdx, %rax
addq $0x2c, %rax
movaps %xmm10, %xmm13
unpcklps %xmm4, %xmm13 # xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
shufps $0xa5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movss 0x1d441d2(%rip), %xmm14 # 0x1eec714
subss %xmm15, %xmm14
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm15, %xmm11
mulps %xmm15, %xmm12
mulps %xmm15, %xmm13
movaps %xmm3, %xmm2
mulps %xmm14, %xmm2
addps %xmm11, %xmm2
movaps 0x80(%rsp), %xmm1
mulps %xmm14, %xmm1
addps %xmm12, %xmm1
movaps 0x70(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm13, %xmm0
mulps %xmm15, %xmm5
mulps %xmm15, %xmm8
mulps %xmm15, %xmm10
movaps 0x30(%rsp), %xmm7
mulps %xmm14, %xmm7
addps %xmm5, %xmm7
movaps %xmm7, %xmm13
movaps 0x40(%rsp), %xmm3
mulps %xmm14, %xmm3
addps %xmm8, %xmm3
movaps 0x10(%rsp), %xmm5
mulps %xmm14, %xmm5
addps %xmm10, %xmm5
movaps %xmm5, %xmm11
mulps %xmm15, %xmm9
mulps %xmm15, %xmm6
movaps %xmm15, 0x210(%rsp)
mulps %xmm15, %xmm4
movaps 0xb0(%rsp), %xmm10
mulps %xmm14, %xmm10
addps %xmm9, %xmm10
movaps 0x1f0(%rsp), %xmm9
mulps %xmm14, %xmm9
addps %xmm6, %xmm9
movaps %xmm14, 0x1f0(%rsp)
movaps 0xa0(%rsp), %xmm12
mulps %xmm14, %xmm12
addps %xmm4, %xmm12
movq %rax, 0xf8(%rsp)
movq %r9, 0x100(%rsp)
movq %r8, 0x108(%rsp)
movl 0x18(%rcx), %eax
movl 0x1c(%rcx), %ecx
leaq 0xf8(%rsp), %rdx
movq %rdx, 0x170(%rsp)
movss (%rbx,%r15,4), %xmm5
movss 0x10(%rbx,%r15,4), %xmm6
movss 0x20(%rbx,%r15,4), %xmm4
movss 0x40(%rbx,%r15,4), %xmm7
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movaps %xmm7, 0xb0(%rsp)
subps %xmm5, %xmm2
subps %xmm6, %xmm1
subps %xmm4, %xmm0
subps %xmm5, %xmm13
movaps %xmm13, 0x30(%rsp)
subps %xmm6, %xmm3
movaps %xmm3, 0x40(%rsp)
subps %xmm4, %xmm11
movaps %xmm11, 0x10(%rsp)
subps %xmm5, %xmm10
subps %xmm6, %xmm9
subps %xmm4, %xmm12
movaps %xmm10, %xmm13
subps %xmm2, %xmm13
movaps %xmm9, %xmm15
subps %xmm1, %xmm15
movaps %xmm12, %xmm14
subps %xmm0, %xmm14
movaps %xmm10, %xmm4
addps %xmm2, %xmm4
movaps %xmm9, %xmm5
addps %xmm1, %xmm5
movaps %xmm12, %xmm6
addps %xmm0, %xmm6
movaps %xmm13, %xmm11
mulps %xmm5, %xmm11
mulps %xmm14, %xmm5
movaps %xmm15, %xmm8
mulps %xmm6, %xmm8
subps %xmm5, %xmm8
movaps %xmm13, 0x220(%rsp)
mulps %xmm13, %xmm6
movaps %xmm4, %xmm5
movaps %xmm14, 0x3a0(%rsp)
mulps %xmm14, %xmm5
subps %xmm6, %xmm5
movss 0x50(%rbx,%r15,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm15, 0x390(%rsp)
mulps %xmm15, %xmm4
subps %xmm4, %xmm11
movss 0x60(%rbx,%r15,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm4, %xmm11
mulps %xmm14, %xmm5
addps %xmm11, %xmm5
movaps %xmm2, %xmm3
movaps 0x30(%rsp), %xmm15
subps %xmm15, %xmm3
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movaps %xmm1, %xmm11
movaps 0x40(%rsp), %xmm6
subps %xmm6, %xmm11
movaps %xmm0, %xmm7
movaps 0x10(%rsp), %xmm13
subps %xmm13, %xmm7
movaps %xmm1, 0x80(%rsp)
addps %xmm6, %xmm1
movaps %xmm0, 0x70(%rsp)
movaps %xmm0, %xmm6
addps %xmm13, %xmm6
movaps %xmm3, %xmm13
mulps %xmm1, %xmm13
mulps %xmm7, %xmm1
movaps %xmm11, %xmm0
mulps %xmm6, %xmm0
subps %xmm1, %xmm0
movaps %xmm2, 0xa0(%rsp)
addps %xmm15, %xmm2
movaps %xmm15, %xmm1
mulps %xmm3, %xmm6
movaps %xmm2, %xmm15
movaps %xmm7, 0x380(%rsp)
mulps %xmm7, %xmm15
subps %xmm6, %xmm15
mulps %xmm11, %xmm2
subps %xmm2, %xmm13
mulps %xmm4, %xmm13
mulps %xmm14, %xmm15
addps %xmm13, %xmm15
movaps 0xb0(%rsp), %xmm6
mulps %xmm6, %xmm0
addps %xmm15, %xmm0
movaps %xmm1, %xmm13
subps %xmm10, %xmm13
addps %xmm1, %xmm10
movaps 0x40(%rsp), %xmm1
movaps %xmm1, %xmm7
subps %xmm9, %xmm7
addps %xmm1, %xmm9
movaps 0x10(%rsp), %xmm1
movaps %xmm1, %xmm15
subps %xmm12, %xmm15
addps %xmm1, %xmm12
movaps %xmm13, %xmm5
mulps %xmm9, %xmm5
mulps %xmm15, %xmm9
movaps %xmm7, %xmm1
mulps %xmm12, %xmm1
subps %xmm9, %xmm1
movaps %xmm6, %xmm9
mulps %xmm13, %xmm12
movaps %xmm10, %xmm2
mulps %xmm15, %xmm2
subps %xmm12, %xmm2
mulps %xmm7, %xmm10
subps %xmm10, %xmm5
movaps %xmm4, 0x40(%rsp)
mulps %xmm4, %xmm5
movaps %xmm0, %xmm4
movaps %xmm14, 0x10(%rsp)
mulps %xmm14, %xmm2
addps %xmm5, %xmm2
mulps %xmm6, %xmm1
addps %xmm2, %xmm1
movaps %xmm8, %xmm2
addps %xmm0, %xmm2
addps %xmm1, %xmm2
movaps %xmm8, %xmm0
minps %xmm4, %xmm0
minps %xmm1, %xmm0
movaps %xmm8, 0x360(%rsp)
movaps %xmm8, %xmm6
movaps %xmm4, 0x340(%rsp)
maxps %xmm4, %xmm6
maxps %xmm1, %xmm6
movaps %xmm2, 0x350(%rsp)
movaps %xmm2, %xmm1
andps 0x1d43e21(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d494ca(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d43e1f(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
movmskps %xmm6, %edx
testl %edx, %edx
movq %r8, 0xc0(%rsp)
je 0x1a8d34
movaps %xmm3, %xmm5
movaps %xmm11, %xmm0
movaps 0x3a0(%rsp), %xmm8
mulps %xmm8, %xmm0
movaps %xmm3, %xmm1
movaps %xmm15, %xmm3
movaps %xmm15, 0x370(%rsp)
movaps 0x390(%rsp), %xmm15
mulps %xmm15, %xmm1
movaps %xmm7, 0x30(%rsp)
movaps 0x380(%rsp), %xmm14
mulps %xmm14, %xmm7
movaps %xmm5, %xmm4
movaps %xmm13, %xmm5
mulps %xmm11, %xmm5
mulps %xmm14, %xmm15
subps %xmm0, %xmm15
movaps 0x220(%rsp), %xmm12
movaps %xmm12, %xmm2
mulps %xmm11, %xmm12
mulps %xmm3, %xmm11
subps %xmm7, %xmm11
movaps 0xa0(%rsp), %xmm10
movaps 0x1d43d76(%rip), %xmm3 # 0x1eec6c0
andps %xmm3, %xmm0
andps %xmm3, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm15, %xmm11
movaps %xmm4, %xmm0
mulps 0x370(%rsp), %xmm0
mulps %xmm14, %xmm2
mulps %xmm14, %xmm13
mulps %xmm4, %xmm8
subps %xmm2, %xmm8
subps %xmm0, %xmm13
andps %xmm3, %xmm2
andps %xmm3, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm8, %xmm13
mulps 0x30(%rsp), %xmm4
subps %xmm1, %xmm12
subps %xmm5, %xmm4
andps %xmm3, %xmm1
andps %xmm3, %xmm5
cmpltps %xmm5, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm12, %xmm4
movaps 0x40(%rsp), %xmm1
mulps %xmm4, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm9
addps %xmm0, %xmm9
addps %xmm9, %xmm9
movaps 0x70(%rsp), %xmm1
mulps %xmm4, %xmm1
movaps 0x80(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm10
addps %xmm0, %xmm10
rcpps %xmm9, %xmm1
movaps %xmm9, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d44014(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm10, %xmm10
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm10, %xmm0
movss 0x80(%rbx,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%rbx,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a8d34
cmpneqps 0x1d42fc5(%rip), %xmm9 # 0x1eeba10
andps %xmm9, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a8d34
movaps 0x360(%rsp), %xmm2
movaps %xmm2, 0x140(%rsp)
movaps 0x340(%rsp), %xmm2
movaps %xmm2, 0x150(%rsp)
movaps 0x350(%rsp), %xmm2
movaps %xmm2, 0x160(%rsp)
leaq 0xf8(%rsp), %rdx
movq %rdx, 0x170(%rsp)
movaps %xmm1, 0x180(%rsp)
movaps %xmm0, 0x1b0(%rsp)
movaps %xmm11, 0x1c0(%rsp)
movaps %xmm13, 0x1d0(%rsp)
movaps %xmm4, 0x1e0(%rsp)
movq (%rdi), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rsi
movl 0x90(%rbx,%r15,4), %edx
testl %edx, 0x34(%rsi)
je 0x1a8d34
movq %rsi, 0x10(%rsp)
movaps 0x180(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movaps 0x160(%rsp), %xmm2
movaps %xmm2, %xmm1
andps 0x1d43bb6(%rip), %xmm1 # 0x1eec6c0
rcpps %xmm2, %xmm3
mulps %xmm3, %xmm2
movaps 0x1d43ef9(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm2, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
cmpnltps 0x1d49215(%rip), %xmm1 # 0x1ef1d40
andps %xmm4, %xmm1
movaps 0x140(%rsp), %xmm2
mulps %xmm1, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x190(%rsp)
mulps 0x150(%rsp), %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x1a0(%rsp)
movq 0x170(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm6
movups (%rsi,%rdx,4), %xmm4
movq %rax, %r8
cmpq $0x2, %rdx
je 0x1a9efa
movl %ecx, %eax
movaps %xmm6, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
shufps $0xa5, %xmm6, %xmm6 # xmm6 = xmm6[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm3, %xmm5
psrld $0x10, %xmm5
xorps %xmm9, %xmm9
pblendw $0xaa, %xmm9, %xmm3 # xmm3 = xmm3[0],xmm9[1],xmm3[2],xmm9[3],xmm3[4],xmm9[5],xmm3[6],xmm9[7]
cvtdq2ps %xmm3, %xmm3
movaps 0x1d73dc8(%rip), %xmm10 # 0x1f1c970
mulps %xmm10, %xmm3
cvtdq2ps %xmm5, %xmm5
mulps %xmm10, %xmm5
movaps %xmm6, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm9, %xmm6 # xmm6 = xmm6[0],xmm9[1],xmm6[2],xmm9[3],xmm6[4],xmm9[5],xmm6[6],xmm9[7]
cvtdq2ps %xmm6, %xmm6
mulps %xmm10, %xmm6
cvtdq2ps %xmm7, %xmm7
mulps %xmm10, %xmm7
movaps %xmm4, %xmm8
psrld $0x10, %xmm8
pblendw $0xaa, %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[1],xmm4[2],xmm9[3],xmm4[4],xmm9[5],xmm4[6],xmm9[7]
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
cvtdq2ps %xmm8, %xmm8
mulps %xmm10, %xmm8
mulps %xmm2, %xmm6
mulps %xmm2, %xmm7
mulps %xmm1, %xmm4
addps %xmm6, %xmm4
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x1d43e05(%rip), %xmm6 # 0x1eeca10
subps %xmm2, %xmm6
subps %xmm1, %xmm6
mulps %xmm6, %xmm3
addps %xmm4, %xmm3
mulps %xmm5, %xmm6
addps %xmm8, %xmm6
movaps %xmm3, 0x190(%rsp)
movaps %xmm6, 0x1a0(%rsp)
movaps 0x1b0(%rsp), %xmm6
movaps 0x1d42db3(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
movq 0x10(%rsp), %rsi
je 0x1a8c6c
movaps %xmm1, %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %rcx
movq 0x10(%rdi), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a9692
cmpq $0x0, 0x40(%rsi)
jne 0x1a9692
movss 0x190(%rsp,%rcx,4), %xmm0
movss 0x1a0(%rsp,%rcx,4), %xmm1
movss 0x1c0(%rsp,%rcx,4), %xmm2
movss 0x1d0(%rsp,%rcx,4), %xmm3
movss 0x1e0(%rsp,%rcx,4), %xmm4
movss 0x1b0(%rsp,%rcx,4), %xmm5
movss %xmm5, 0x80(%rbx,%r15,4)
movss %xmm2, 0xc0(%rbx,%r15,4)
movss %xmm3, 0xd0(%rbx,%r15,4)
movss %xmm4, 0xe0(%rbx,%r15,4)
movss %xmm0, 0xf0(%rbx,%r15,4)
movss %xmm1, 0x100(%rbx,%r15,4)
movl %eax, 0x110(%rbx,%r15,4)
movl %r8d, 0x120(%rbx,%r15,4)
movq 0x8(%rdi), %rax
movl (%rax), %eax
movl %eax, 0x130(%rbx,%r15,4)
movq 0x8(%rdi), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rbx,%r15,4)
movq 0xc0(%rsp), %r8
cmpl $0x3, %r8d
jb 0x1a9624
movq 0x90(%rsp), %rax
leaq (%rax,%r9,4), %rax
addq $0x2c, %rax
movups (%rax), %xmm1
movups (%rax,%r9,4), %xmm0
cmpq $0x2, %r9
je 0x1a9ea8
movaps 0x210(%rsp), %xmm13
movq 0xf0(%rsp), %rcx
movq (%rcx), %rcx
movl 0x14(%rcx), %edx
leaq (%rax,%rdx,4), %rdi
movups (%rdi), %xmm3
movups (%rdi,%r9,4), %xmm2
cmpq $0x2, %r9
je 0x1a9eb5
leaq (%rax,%rdx,8), %rsi
movups (%rsi), %xmm9
movups (%rsi,%r9,4), %xmm7
movq %r11, %r8
cmpq $0x2, %r9
je 0x1a9ec2
movl 0x28(%rcx), %r10d
shrl $0x2, %r10d
shll $0x2, %r10d
leaq (%rax,%r10), %r9
movups (%rax,%r10), %xmm5
movq 0x50(%rsp), %r11
movups (%r9,%r11,4), %xmm4
movq %r11, %r9
cmpq $0x2, %r11
je 0x1a9ed0
movq %r8, %r11
addq %r10, %rdi
movups (%rdi), %xmm8
movups (%rdi,%r9,4), %xmm6
cmpq $0x2, %r9
movq 0xc0(%rsp), %r8
je 0x1a9edd
movq 0x1338(%rsp), %rdi
movaps %xmm1, %xmm14
unpcklps %xmm0, %xmm14 # xmm14 = xmm14[0],xmm0[0],xmm14[1],xmm0[1]
shufps $0xa5, %xmm1, %xmm1 # xmm1 = xmm1[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm3, %xmm15
unpcklps %xmm2, %xmm15 # xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
movaps %xmm3, 0x90(%rsp)
shufps $0x94, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,2]
addq %r10, %rsi
movaps %xmm9, %xmm3
unpcklps %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
movaps %xmm3, 0x40(%rsp)
shufps $0xa5, %xmm9, %xmm9 # xmm9 = xmm9[1,1,2,2]
movaps %xmm9, 0x10(%rsp)
shufps $0x94, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,2]
movaps %xmm7, 0x80(%rsp)
movaps %xmm5, %xmm12
unpcklps %xmm4, %xmm12 # xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm6, %xmm11 # xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,2]
movups (%rsi), %xmm10
movups (%rsi,%r9,4), %xmm9
cmpq $0x2, %r9
movaps %xmm0, 0x30(%rsp)
movaps %xmm2, %xmm7
movaps %xmm14, %xmm3
je 0x1a9eeb
movq 0x8(%rsp), %r10
imulq $0xc, %rdx, %rdx
addq %rdx, %rax
mulps %xmm13, %xmm12
movaps 0x1f0(%rsp), %xmm14
mulps %xmm14, %xmm3
addps %xmm12, %xmm3
movaps %xmm10, %xmm12
unpcklps %xmm9, %xmm12 # xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
shufps $0xa5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,2,2]
shufps $0x94, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,2]
mulps %xmm13, %xmm11
mulps %xmm13, %xmm12
mulps %xmm14, %xmm15
addps %xmm11, %xmm15
movaps %xmm15, %xmm2
movaps 0x40(%rsp), %xmm15
mulps %xmm14, %xmm15
addps %xmm12, %xmm15
mulps %xmm13, %xmm5
mulps %xmm13, %xmm8
mulps %xmm13, %xmm10
mulps %xmm14, %xmm1
addps %xmm5, %xmm1
movaps 0x90(%rsp), %xmm11
mulps %xmm14, %xmm11
addps %xmm8, %xmm11
movaps 0x10(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm10, %xmm0
movaps %xmm0, %xmm10
mulps %xmm13, %xmm4
mulps %xmm13, %xmm6
mulps %xmm13, %xmm9
movaps 0x30(%rsp), %xmm8
mulps %xmm14, %xmm8
addps %xmm4, %xmm8
mulps %xmm14, %xmm7
addps %xmm6, %xmm7
movaps 0x80(%rsp), %xmm12
mulps %xmm14, %xmm12
addps %xmm9, %xmm12
movq %rax, 0xf8(%rsp)
movq %r9, 0x100(%rsp)
movq %r8, 0x108(%rsp)
movl 0x18(%rcx), %eax
movl 0x1c(%rcx), %ecx
leaq 0xf8(%rsp), %rdx
movq %rdx, 0x170(%rsp)
movss (%rbx,%r15,4), %xmm5
movss 0x10(%rbx,%r15,4), %xmm6
movss 0x20(%rbx,%r15,4), %xmm4
movss 0x40(%rbx,%r15,4), %xmm0
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x50(%rsp)
movss 0x50(%rbx,%r15,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x90(%rsp)
subps %xmm5, %xmm3
subps %xmm6, %xmm2
movaps %xmm2, 0x1f0(%rsp)
subps %xmm4, %xmm15
subps %xmm5, %xmm1
movaps %xmm1, 0x30(%rsp)
subps %xmm6, %xmm11
subps %xmm4, %xmm10
movaps %xmm10, 0x10(%rsp)
subps %xmm5, %xmm8
subps %xmm6, %xmm7
subps %xmm4, %xmm12
movaps %xmm8, %xmm13
subps %xmm3, %xmm13
movaps %xmm7, %xmm10
subps %xmm2, %xmm10
movaps %xmm12, %xmm14
subps %xmm15, %xmm14
movaps %xmm8, %xmm4
addps %xmm3, %xmm4
movaps %xmm7, %xmm5
addps %xmm2, %xmm5
movaps %xmm12, %xmm6
addps %xmm15, %xmm6
movaps %xmm13, %xmm9
mulps %xmm5, %xmm9
mulps %xmm14, %xmm5
movaps %xmm10, %xmm0
mulps %xmm6, %xmm0
subps %xmm5, %xmm0
movaps %xmm13, 0x70(%rsp)
mulps %xmm13, %xmm6
movaps %xmm4, %xmm5
movaps %xmm14, 0xc0(%rsp)
mulps %xmm14, %xmm5
subps %xmm6, %xmm5
movss 0x60(%rbx,%r15,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm10, 0xb0(%rsp)
mulps %xmm10, %xmm4
subps %xmm4, %xmm9
movaps %xmm3, %xmm10
subps %xmm1, %xmm10
mulps %xmm14, %xmm9
movaps 0x90(%rsp), %xmm1
mulps %xmm1, %xmm5
addps %xmm9, %xmm5
movaps %xmm2, %xmm4
movaps %xmm2, %xmm13
subps %xmm11, %xmm13
mulps 0x50(%rsp), %xmm0
addps %xmm5, %xmm0
movaps %xmm15, %xmm2
movaps 0x10(%rsp), %xmm6
subps %xmm6, %xmm2
addps %xmm11, %xmm4
movaps %xmm15, 0x40(%rsp)
movaps %xmm15, %xmm5
addps %xmm6, %xmm5
movaps %xmm10, %xmm6
mulps %xmm4, %xmm6
mulps %xmm2, %xmm4
movaps %xmm13, %xmm9
mulps %xmm5, %xmm9
subps %xmm4, %xmm9
movaps %xmm3, 0x80(%rsp)
movaps %xmm3, %xmm4
movaps %xmm1, %xmm3
movaps 0x30(%rsp), %xmm1
addps %xmm1, %xmm4
mulps %xmm10, %xmm5
movaps %xmm4, %xmm15
movaps %xmm2, 0xa0(%rsp)
mulps %xmm2, %xmm15
subps %xmm5, %xmm15
mulps %xmm13, %xmm4
subps %xmm4, %xmm6
mulps %xmm14, %xmm6
mulps %xmm3, %xmm15
addps %xmm6, %xmm15
movaps 0x50(%rsp), %xmm4
mulps %xmm4, %xmm9
addps %xmm15, %xmm9
movaps %xmm1, %xmm15
subps %xmm8, %xmm15
addps %xmm1, %xmm8
movaps %xmm11, %xmm6
subps %xmm7, %xmm6
addps %xmm11, %xmm7
movaps 0x10(%rsp), %xmm1
movaps %xmm1, %xmm11
subps %xmm12, %xmm11
addps %xmm1, %xmm12
movaps %xmm15, %xmm5
mulps %xmm7, %xmm5
mulps %xmm11, %xmm7
movaps %xmm6, %xmm1
mulps %xmm12, %xmm1
subps %xmm7, %xmm1
mulps %xmm15, %xmm12
movaps %xmm8, %xmm2
mulps %xmm11, %xmm2
subps %xmm12, %xmm2
movaps %xmm6, %xmm7
mulps %xmm6, %xmm8
subps %xmm8, %xmm5
movaps %xmm0, %xmm6
movaps %xmm14, 0x10(%rsp)
mulps %xmm14, %xmm5
mulps %xmm3, %xmm2
addps %xmm5, %xmm2
mulps %xmm4, %xmm1
addps %xmm2, %xmm1
movaps %xmm0, %xmm2
addps %xmm9, %xmm2
addps %xmm1, %xmm2
minps %xmm9, %xmm0
minps %xmm1, %xmm0
movaps %xmm6, 0x210(%rsp)
maxps %xmm9, %xmm6
maxps %xmm1, %xmm6
movaps %xmm2, 0x220(%rsp)
movaps %xmm2, %xmm1
andps 0x1d43507(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d48bb0(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d43505(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
movmskps %xmm6, %edx
testl %edx, %edx
je 0x1a9624
movaps %xmm4, %xmm8
movaps %xmm10, %xmm14
movaps %xmm13, %xmm0
movaps 0xc0(%rsp), %xmm10
mulps %xmm10, %xmm0
movaps %xmm14, %xmm1
movaps 0xb0(%rsp), %xmm3
mulps %xmm3, %xmm1
movaps %xmm7, 0x30(%rsp)
movaps 0xa0(%rsp), %xmm4
mulps %xmm4, %xmm7
movaps %xmm15, %xmm5
mulps %xmm13, %xmm5
mulps %xmm4, %xmm3
subps %xmm0, %xmm3
movaps 0x70(%rsp), %xmm12
movaps %xmm12, %xmm2
mulps %xmm13, %xmm12
mulps %xmm11, %xmm13
subps %xmm7, %xmm13
movaps %xmm11, 0x70(%rsp)
movaps 0x1d43479(%rip), %xmm11 # 0x1eec6c0
andps %xmm11, %xmm0
andps %xmm11, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm3, %xmm13
movaps %xmm14, %xmm0
mulps 0x70(%rsp), %xmm0
mulps %xmm4, %xmm2
mulps %xmm4, %xmm15
mulps %xmm14, %xmm10
subps %xmm2, %xmm10
subps %xmm0, %xmm15
andps %xmm11, %xmm2
andps %xmm11, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm10, %xmm15
mulps 0x30(%rsp), %xmm14
subps %xmm1, %xmm12
subps %xmm5, %xmm14
andps %xmm11, %xmm1
andps %xmm11, %xmm5
cmpltps %xmm5, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm12, %xmm14
movaps 0x10(%rsp), %xmm1
mulps %xmm14, %xmm1
movaps 0x90(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm1, %xmm0
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
addps %xmm8, %xmm8
movaps 0x40(%rsp), %xmm1
mulps %xmm14, %xmm1
movaps 0x1f0(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm1, %xmm0
movaps 0x80(%rsp), %xmm3
mulps %xmm13, %xmm3
addps %xmm0, %xmm3
rcpps %xmm8, %xmm1
movaps %xmm8, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d43706(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movss 0x80(%rbx,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%rbx,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a9624
cmpneqps 0x1d426b9(%rip), %xmm8 # 0x1eeba10
andps %xmm8, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a9624
movaps 0x210(%rsp), %xmm2
movaps %xmm2, 0x140(%rsp)
movaps %xmm9, 0x150(%rsp)
movaps 0x220(%rsp), %xmm2
movaps %xmm2, 0x160(%rsp)
leaq 0xf8(%rsp), %rdx
movq %rdx, 0x170(%rsp)
movaps %xmm1, 0x180(%rsp)
movaps %xmm0, 0x1b0(%rsp)
movaps %xmm13, 0x1c0(%rsp)
movaps %xmm15, 0x1d0(%rsp)
movaps %xmm14, 0x1e0(%rsp)
movq (%rdi), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %r8
movl 0x90(%rbx,%r15,4), %edx
testl %edx, 0x34(%r8)
je 0x1a9624
movaps 0x180(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movaps 0x160(%rsp), %xmm2
movaps %xmm2, %xmm1
andps 0x1d432b4(%rip), %xmm1 # 0x1eec6c0
rcpps %xmm2, %xmm3
mulps %xmm3, %xmm2
movaps 0x1d435f7(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm2, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
cmpnltps 0x1d48913(%rip), %xmm1 # 0x1ef1d40
andps %xmm4, %xmm1
movaps 0x140(%rsp), %xmm2
mulps %xmm1, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x190(%rsp)
mulps 0x150(%rsp), %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x1a0(%rsp)
movq 0x170(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm6
movups (%rsi,%rdx,4), %xmm4
cmpq $0x2, %rdx
je 0x1a9f07
movaps %xmm6, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
shufps $0xa5, %xmm6, %xmm6 # xmm6 = xmm6[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm3, %xmm5
psrld $0x10, %xmm5
xorps %xmm9, %xmm9
pblendw $0xaa, %xmm9, %xmm3 # xmm3 = xmm3[0],xmm9[1],xmm3[2],xmm9[3],xmm3[4],xmm9[5],xmm3[6],xmm9[7]
cvtdq2ps %xmm3, %xmm3
movaps 0x1d734cb(%rip), %xmm10 # 0x1f1c970
mulps %xmm10, %xmm3
cvtdq2ps %xmm5, %xmm5
mulps %xmm10, %xmm5
movaps %xmm6, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm9, %xmm6 # xmm6 = xmm6[0],xmm9[1],xmm6[2],xmm9[3],xmm6[4],xmm9[5],xmm6[6],xmm9[7]
cvtdq2ps %xmm6, %xmm6
mulps %xmm10, %xmm6
cvtdq2ps %xmm7, %xmm7
mulps %xmm10, %xmm7
movaps %xmm4, %xmm8
psrld $0x10, %xmm8
pblendw $0xaa, %xmm9, %xmm4 # xmm4 = xmm4[0],xmm9[1],xmm4[2],xmm9[3],xmm4[4],xmm9[5],xmm4[6],xmm9[7]
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
cvtdq2ps %xmm8, %xmm8
mulps %xmm10, %xmm8
mulps %xmm2, %xmm6
mulps %xmm2, %xmm7
mulps %xmm1, %xmm4
addps %xmm6, %xmm4
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x1d43508(%rip), %xmm6 # 0x1eeca10
subps %xmm2, %xmm6
subps %xmm1, %xmm6
mulps %xmm6, %xmm3
addps %xmm4, %xmm3
mulps %xmm5, %xmm6
addps %xmm8, %xmm6
movaps %xmm3, 0x190(%rsp)
movaps %xmm6, 0x1a0(%rsp)
movaps 0x1b0(%rsp), %xmm6
movaps 0x1d424b6(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1a9564
movaps %xmm1, %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %rsi
movq 0x10(%rdi), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1a9a7a
cmpq $0x0, 0x40(%r8)
jne 0x1a9a7a
movss 0x190(%rsp,%rsi,4), %xmm0
movss 0x1a0(%rsp,%rsi,4), %xmm1
movss 0x1c0(%rsp,%rsi,4), %xmm2
movss 0x1d0(%rsp,%rsi,4), %xmm3
movss 0x1e0(%rsp,%rsi,4), %xmm4
movss 0x1b0(%rsp,%rsi,4), %xmm5
movss %xmm5, 0x80(%rbx,%r15,4)
movss %xmm2, 0xc0(%rbx,%r15,4)
movss %xmm3, 0xd0(%rbx,%r15,4)
movss %xmm4, 0xe0(%rbx,%r15,4)
movss %xmm0, 0xf0(%rbx,%r15,4)
movss %xmm1, 0x100(%rbx,%r15,4)
movl %ecx, 0x110(%rbx,%r15,4)
movl %eax, 0x120(%rbx,%r15,4)
movq 0x8(%rdi), %rax
movl (%rax), %eax
movl %eax, 0x130(%rbx,%r15,4)
movq 0x8(%rdi), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rbx,%r15,4)
xorl %eax, %eax
movaps 0x2a0(%rsp), %xmm8
movaps 0x290(%rsp), %xmm9
movaps 0x280(%rsp), %xmm10
movaps 0x270(%rsp), %xmm11
movaps 0x260(%rsp), %xmm12
movaps 0x250(%rsp), %xmm13
movq 0x68(%rsp), %rdi
movaps 0x240(%rsp), %xmm14
movss 0x80(%rbx,%r15,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
testq %rax, %rax
je 0x1a7f99
movq %rax, (%r11)
andl $0x0, 0x8(%r11)
addq $0x10, %r11
jmp 0x1a7f99
movd %r8d, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x80(%rsp)
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x70(%rsp)
movq 0x208(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0xb0(%rsp)
movaps %xmm6, 0xa0(%rsp)
movq %rcx, %rdx
movq 0xc0(%rsp), %r8
movq %r11, 0x28(%rsp)
movss 0x80(%rbx,%r15,4), %xmm0
movss %xmm0, 0x30(%rsp)
movss 0x1b0(%rsp,%rdx,4), %xmm0
movss 0x190(%rsp,%rdx,4), %xmm1
movss 0x1a0(%rsp,%rdx,4), %xmm2
movss %xmm0, 0x80(%rbx,%r15,4)
movq 0x8(%rdi), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x1c0(%rsp,%rdx,4), %xmm0
movss 0x1d0(%rsp,%rdx,4), %xmm3
movss 0x1e0(%rsp,%rdx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2b0(%rsp)
movaps %xmm3, 0x2c0(%rsp)
movaps %xmm4, 0x2d0(%rsp)
movaps %xmm1, 0x2e0(%rsp)
movaps %xmm2, 0x2f0(%rsp)
movaps 0x70(%rsp), %xmm0
movaps %xmm0, 0x300(%rsp)
movdqa 0x80(%rsp), %xmm0
movdqa %xmm0, 0x310(%rsp)
leaq 0x320(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x320(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x330(%rsp)
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm0, 0xe0(%rsp)
leaq 0xe0(%rsp), %rcx
movq %rcx, 0x110(%rsp)
movq 0x18(%rsi), %rcx
movq %rcx, 0x118(%rsp)
movq %rax, 0x120(%rsp)
movq %rbx, 0x128(%rsp)
leaq 0x2b0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x40(%rsi), %rax
testq %rax, %rax
movq %rdx, 0x40(%rsp)
je 0x1a986a
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x40(%rsp), %rdx
movaps 0xa0(%rsp), %xmm6
movq 0x10(%rsp), %rsi
movq 0xc0(%rsp), %r8
movq 0x50(%rsp), %r9
movq 0x8(%rsp), %r10
movq 0x1338(%rsp), %rdi
movq 0x28(%rsp), %r11
movdqa 0xe0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1a99d6
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1a98d1
testb $0x2, (%rcx)
jne 0x1a9896
testb $0x40, 0x3e(%rsi)
je 0x1a98d1
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x40(%rsp), %rdx
movaps 0xa0(%rsp), %xmm6
movq 0x10(%rsp), %rsi
movq 0xc0(%rsp), %r8
movq 0x50(%rsp), %r9
movq 0x8(%rsp), %r10
movq 0x1338(%rsp), %rdi
movq 0x28(%rsp), %r11
movdqa 0xe0(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d42129(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4252d(%rip), %xmm1 # 0x1eebe20
je 0x1a99e6
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1a99e6
pcmpeqd 0x1d42032(%rip), %xmm1 # 0x1eeba10
pxor 0x1d4243a(%rip), %xmm1 # 0x1eebe20
ptest 0x1d43131(%rip), %xmm1 # 0x1eecb20
jne 0x1a9a01
movd 0x30(%rsp), %xmm0
movd %xmm0, 0x80(%rbx,%r15,4)
andl $0x0, 0xd0(%rsp,%rdx,4)
movss 0x80(%rbx,%r15,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm6, %xmm0
cmpleps %xmm1, %xmm0
andps 0xd0(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1a9a6d
movaps 0x1d41fb4(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1a9a66
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %rdx
testb %al, %al
jne 0x1a96df
jmp 0x1a8d34
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x40(%rsp)
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x90(%rsp)
movq 0x208(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x30(%rsp)
movq %r8, 0x80(%rsp)
movaps %xmm6, 0x70(%rsp)
movq %r11, 0x28(%rsp)
movss 0x80(%rbx,%r15,4), %xmm0
movss %xmm0, 0x10(%rsp)
movss 0x1b0(%rsp,%rsi,4), %xmm0
movss 0x190(%rsp,%rsi,4), %xmm1
movss 0x1a0(%rsp,%rsi,4), %xmm2
movss %xmm0, 0x80(%rbx,%r15,4)
movq 0x8(%rdi), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x1c0(%rsp,%rsi,4), %xmm0
movss 0x1d0(%rsp,%rsi,4), %xmm3
movss 0x1e0(%rsp,%rsi,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x2b0(%rsp)
movaps %xmm3, 0x2c0(%rsp)
movaps %xmm4, 0x2d0(%rsp)
movaps %xmm1, 0x2e0(%rsp)
movaps %xmm2, 0x2f0(%rsp)
movaps 0x90(%rsp), %xmm0
movaps %xmm0, 0x300(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa %xmm0, 0x310(%rsp)
leaq 0x320(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x320(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x330(%rsp)
movdqa 0x30(%rsp), %xmm0
movdqa %xmm0, 0xe0(%rsp)
leaq 0xe0(%rsp), %rcx
movq %rcx, 0x110(%rsp)
movq 0x18(%r8), %rcx
movq %rcx, 0x118(%rsp)
movq %rax, 0x120(%rsp)
movq %rbx, 0x128(%rsp)
leaq 0x2b0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x40(%r8), %rax
testq %rax, %rax
movq %rsi, 0x50(%rsp)
je 0x1a9c38
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x50(%rsp), %rsi
movaps 0x70(%rsp), %xmm6
movq 0x80(%rsp), %r8
movq 0x8(%rsp), %r10
movq 0x1338(%rsp), %rdi
movq 0x28(%rsp), %r11
movdqa 0xe0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1a9d98
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1a9c93
testb $0x2, (%rcx)
jne 0x1a9c65
testb $0x40, 0x3e(%r8)
je 0x1a9c93
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x50(%rsp), %rsi
movaps 0x70(%rsp), %xmm6
movq 0x80(%rsp), %r8
movq 0x8(%rsp), %r10
movq 0x1338(%rsp), %rdi
movq 0x28(%rsp), %r11
movdqa 0xe0(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d41d67(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4216b(%rip), %xmm1 # 0x1eebe20
je 0x1a9da8
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1a9da8
pcmpeqd 0x1d41c70(%rip), %xmm1 # 0x1eeba10
pxor 0x1d42078(%rip), %xmm1 # 0x1eebe20
ptest 0x1d42d6f(%rip), %xmm1 # 0x1eecb20
jne 0x1a9dc3
movd 0x10(%rsp), %xmm0
movd %xmm0, 0x80(%rbx,%r15,4)
andl $0x0, 0xd0(%rsp,%rsi,4)
movss 0x80(%rbx,%r15,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm6, %xmm0
cmpleps %xmm1, %xmm0
andps 0xd0(%rsp), %xmm0
movaps %xmm0, 0xd0(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1a9e2f
movaps 0x1d41bf2(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm6, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1a9e28
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %rsi
testb %al, %al
jne 0x1a9abd
jmp 0x1a9624
andq $-0x10, %rax
movq (%rax), %rcx
movq (%rcx), %rax
movq 0xf0(%rsp), %rdx
movq %rcx, (%rdx)
jmp 0x1a966a
shufps $0x54, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a83bb
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
jmp 0x1a83e9
shufps $0x54, %xmm11, %xmm11 # xmm11 = xmm11[0,1,1,1]
shufps $0x54, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,1]
jmp 0x1a8411
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a843e
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
jmp 0x1a8461
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a8509
shufps $0x54, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1a8d60
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
jmp 0x1a8d8d
shufps $0x54, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,1]
shufps $0x54, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,1]
jmp 0x1a8da7
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a8dd3
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
jmp 0x1a8df4
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
shufps $0x54, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,1]
jmp 0x1a8e8a
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a8b7a
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1a9479
addq $0x12f8, %rsp # imm = 0x12F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::sse42::SubdivPatch1MBIntersectorK<4>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubdivPatch1PrecalculationsK<4, embree::sse42::GridSOAIntersectorK<4>::Precalculations>&, embree::RayK<4>&, embree::sse42::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb68, %rsp # imm = 0xB68
movq %r8, 0x168(%rsp)
movq %rcx, %r10
movq 0xba0(%rsp), %rax
leaq 0x3c8(%rsp), %r11
movq %rdx, -0x8(%r11)
movss (%rax,%rcx,4), %xmm5
movss 0x10(%rax,%rcx,4), %xmm6
movss 0x20(%rax,%rcx,4), %xmm7
movss 0x60(%rax,%rcx,4), %xmm8
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
movss 0x70(%rax,%rcx,4), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x80(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %rsi
movslq 0xa0(%rax,%rcx,4), %rbx
movslq 0xb0(%rax,%rcx,4), %r15
movq %rsi, %r12
xorq $0x10, %r12
movq %rbx, %r13
xorq $0x10, %r13
movq %r15, %r14
xorq $0x10, %r14
movss 0xc0(%rax,%rcx,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
pushq $0x1
popq %rax
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f7a739(%rip), %rax # 0x2124730
movq %rax, 0x1b8(%rsp)
movq %rcx, 0x48(%rsp)
movaps %xmm5, 0x130(%rsp)
movaps %xmm6, 0x120(%rsp)
movaps %xmm7, 0x110(%rsp)
movaps %xmm8, 0xf0(%rsp)
movaps %xmm9, 0xe0(%rsp)
movaps %xmm10, 0xd0(%rsp)
movq %rsi, 0x8(%rsp)
movaps %xmm11, 0xc0(%rsp)
movaps %xmm12, 0xb0(%rsp)
movq %r11, %rcx
leaq 0x3c0(%rsp), %rax
cmpq %rax, %r11
je 0x1abc55
leaq -0x8(%rcx), %r11
movq %rcx, 0x2f8(%rsp)
movq -0x8(%rcx), %rax
testb $0x8, %al
jne 0x1aa14e
movq %rax, %rcx
andq $-0x10, %rcx
movss 0x70(%r9,%r10,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rcx,%rsi), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rcx,%rsi), %xmm2
subps %xmm5, %xmm2
mulps %xmm8, %xmm2
movaps %xmm11, %xmm1
movaps 0x80(%rcx,%rbx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rcx,%rbx), %xmm3
maxps %xmm2, %xmm1
subps %xmm6, %xmm3
mulps %xmm9, %xmm3
movaps 0x80(%rcx,%r15), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rcx,%r15), %xmm2
subps %xmm7, %xmm2
mulps %xmm10, %xmm2
maxps %xmm2, %xmm3
maxps %xmm3, %xmm1
movaps 0x80(%rcx,%r12), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rcx,%r12), %xmm3
subps %xmm5, %xmm3
mulps %xmm8, %xmm3
movaps %xmm12, %xmm2
minps %xmm3, %xmm2
movaps 0x80(%rcx,%r13), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rcx,%r13), %xmm3
subps %xmm6, %xmm3
movaps 0x80(%rcx,%r14), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rcx,%r14), %xmm4
mulps %xmm9, %xmm3
subps %xmm7, %xmm4
mulps %xmm10, %xmm4
minps %xmm4, %xmm3
minps %xmm3, %xmm2
movl %eax, %edx
andl $0x7, %edx
cmpleps %xmm2, %xmm1
cmpl $0x6, %edx
je 0x1aa194
pslld $0x1f, %xmm1
movmskps %xmm1, %ebp
testb $0x8, %al
jne 0x1aa190
testq %rbp, %rbp
je 0x1aa1af
andq $-0x10, %rax
bsfq %rbp, %rdx
leaq -0x1(%rbp), %rdi
xorl %ecx, %ecx
movq (%rax,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rbp, %rdi
jne 0x1aa1b4
movq %rdx, %rax
testl %ecx, %ecx
je 0x1aa072
jmp 0x1aa1fc
pushq $0x6
jmp 0x1aa1b1
movaps 0xe0(%rcx), %xmm2
cmpleps %xmm0, %xmm2
cmpltps 0xf0(%rcx), %xmm0
andps %xmm2, %xmm0
andps %xmm0, %xmm1
jmp 0x1aa146
pushq $0x4
popq %rcx
jmp 0x1aa186
movq %rdx, (%r11)
addq $0x8, %r11
bsfq %rdi, %rsi
leaq -0x1(%rdi), %rdx
movq (%rax,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
andq %rdi, %rdx
je 0x1aa1f2
movq %rsi, (%r11)
addq $0x8, %r11
bsfq %rdx, %rsi
leaq -0x1(%rdx), %rdi
jmp 0x1aa1c3
movq %rsi, %rax
movq 0x8(%rsp), %rsi
jmp 0x1aa186
cmpl $0x6, %ecx
jne 0x1ab635
movl %eax, %ecx
andl $0xf, %ecx
cmpl $0x8, %ecx
jne 0x1ab699
movq %r11, 0x68(%rsp)
movq 0x168(%rsp), %rcx
movq (%rcx), %rdx
movl 0x8(%rdx), %ecx
decl %ecx
cvtsi2ss %rcx, %xmm0
movss 0x70(%r9,%r10,4), %xmm15
mulss %xmm0, %xmm15
roundss $0x9, %xmm15, %xmm1
addss 0x1d46784(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm3, %xmm3
maxss %xmm1, %xmm3
cvttss2si %xmm3, %ecx
movl 0xc(%rdx), %edi
movslq %ecx, %rcx
movl 0x24(%rdx), %r11d
movl 0x28(%rdx), %esi
imulq %rsi, %rcx
addq %rdx, %r11
addq %rcx, %r11
shrq $0x4, %rax
leaq (%r11,%rax,4), %rcx
movups 0x2c(%r11,%rax,4), %xmm1
movups 0x2c(%rcx,%rdi,4), %xmm0
movq %rdi, 0x10(%rsp)
cmpq $0x2, %rdi
je 0x1ab6b0
movl 0x14(%rdx), %r8d
leaq (%rcx,%r8,4), %r10
addq $0x2c, %r10
movups (%r10), %xmm2
movq 0x10(%rsp), %rdi
movups (%r10,%rdi,4), %xmm10
cmpq $0x2, %rdi
je 0x1ab6bd
movq %rcx, 0x170(%rsp)
leaq (%rcx,%r8,8), %rdi
addq $0x2c, %rdi
movups (%rdi), %xmm11
movq 0x10(%rsp), %rcx
movups (%rdi,%rcx,4), %xmm7
cmpq $0x2, %rcx
je 0x1ab6cb
shrl $0x2, %esi
leaq (%r11,%rax,4), %rax
addq $0x2c, %rax
shll $0x2, %esi
leaq (%rax,%rsi), %r11
movups (%rax,%rsi), %xmm5
movq 0x10(%rsp), %rax
movups (%r11,%rax,4), %xmm4
cmpq $0x2, %rax
movq 0x170(%rsp), %rcx
je 0x1ab6d9
movq 0x68(%rsp), %r11
addq %rsi, %r10
movups (%r10), %xmm8
movq 0x10(%rsp), %rax
movups (%r10,%rax,4), %xmm6
cmpq $0x2, %rax
je 0x1ab6e6
movq 0x48(%rsp), %r10
subss %xmm3, %xmm15
movaps %xmm1, %xmm3
unpcklps %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
shufps $0xa5, %xmm1, %xmm1 # xmm1 = xmm1[1,1,2,2]
movaps %xmm1, 0x50(%rsp)
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm2, %xmm13
unpcklps %xmm10, %xmm13 # xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1]
shufps $0xa5, %xmm2, %xmm2 # xmm2 = xmm2[1,1,2,2]
movaps %xmm2, 0x30(%rsp)
shufps $0x94, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,2]
movaps %xmm10, 0x140(%rsp)
addq %rsi, %rdi
movaps %xmm11, %xmm14
unpcklps %xmm7, %xmm14 # xmm14 = xmm14[0],xmm7[0],xmm14[1],xmm7[1]
shufps $0xa5, %xmm11, %xmm11 # xmm11 = xmm11[1,1,2,2]
movaps %xmm11, 0x20(%rsp)
shufps $0x94, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,2]
movaps %xmm5, %xmm11
unpcklps %xmm4, %xmm11 # xmm11 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm4, %xmm9
movaps %xmm8, %xmm12
unpcklps %xmm6, %xmm12 # xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,2]
movups (%rdi), %xmm10
movq 0x10(%rsp), %rax
movups (%rdi,%rax,4), %xmm4
cmpq $0x2, %rax
movaps %xmm13, 0x70(%rsp)
movaps %xmm14, 0x80(%rsp)
movaps %xmm0, 0xa0(%rsp)
movaps %xmm7, 0x100(%rsp)
je 0x1ab6f4
movq 0x8(%rsp), %rsi
imulq $0xc, %r8, %rax
movl 0x10(%rdx), %edi
addq %rcx, %rax
addq $0x2c, %rax
movaps %xmm10, %xmm13
unpcklps %xmm4, %xmm13 # xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
shufps $0xa5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movss 0x1d42309(%rip), %xmm14 # 0x1eec714
subss %xmm15, %xmm14
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
mulps %xmm15, %xmm11
mulps %xmm15, %xmm12
mulps %xmm15, %xmm13
movaps %xmm3, %xmm2
mulps %xmm14, %xmm2
addps %xmm11, %xmm2
movaps 0x70(%rsp), %xmm1
mulps %xmm14, %xmm1
addps %xmm12, %xmm1
movaps 0x80(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm13, %xmm0
mulps %xmm15, %xmm5
mulps %xmm15, %xmm8
mulps %xmm15, %xmm10
movaps 0x50(%rsp), %xmm7
mulps %xmm14, %xmm7
addps %xmm5, %xmm7
movaps %xmm7, %xmm13
movaps 0x30(%rsp), %xmm3
mulps %xmm14, %xmm3
addps %xmm8, %xmm3
movaps 0x20(%rsp), %xmm5
mulps %xmm14, %xmm5
addps %xmm10, %xmm5
movaps %xmm5, %xmm11
mulps %xmm15, %xmm9
mulps %xmm15, %xmm6
movaps %xmm15, 0x1a0(%rsp)
mulps %xmm15, %xmm4
movaps 0xa0(%rsp), %xmm10
mulps %xmm14, %xmm10
addps %xmm9, %xmm10
movaps 0x140(%rsp), %xmm9
mulps %xmm14, %xmm9
addps %xmm6, %xmm9
movaps %xmm14, 0x390(%rsp)
movaps 0x100(%rsp), %xmm12
mulps %xmm14, %xmm12
addps %xmm4, %xmm12
movq %rax, 0x188(%rsp)
movq 0x10(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdi, 0x198(%rsp)
movl 0x18(%rdx), %ecx
movl 0x1c(%rdx), %eax
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movss (%r9,%r10,4), %xmm5
movss 0x10(%r9,%r10,4), %xmm6
movss 0x20(%r9,%r10,4), %xmm4
movss 0x40(%r9,%r10,4), %xmm7
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movaps %xmm7, 0xa0(%rsp)
subps %xmm5, %xmm2
subps %xmm6, %xmm1
subps %xmm4, %xmm0
subps %xmm5, %xmm13
movaps %xmm13, 0x50(%rsp)
subps %xmm6, %xmm3
movaps %xmm3, 0x30(%rsp)
subps %xmm4, %xmm11
movaps %xmm11, 0x20(%rsp)
subps %xmm5, %xmm10
subps %xmm6, %xmm9
subps %xmm4, %xmm12
movaps %xmm10, %xmm13
subps %xmm2, %xmm13
movaps %xmm9, %xmm15
subps %xmm1, %xmm15
movaps %xmm12, %xmm14
subps %xmm0, %xmm14
movaps %xmm10, %xmm4
addps %xmm2, %xmm4
movaps %xmm9, %xmm5
addps %xmm1, %xmm5
movaps %xmm12, %xmm6
addps %xmm0, %xmm6
movaps %xmm13, %xmm11
mulps %xmm5, %xmm11
mulps %xmm14, %xmm5
movaps %xmm15, %xmm8
mulps %xmm6, %xmm8
subps %xmm5, %xmm8
movaps %xmm13, 0x100(%rsp)
mulps %xmm13, %xmm6
movaps %xmm4, %xmm5
movaps %xmm14, 0x2e0(%rsp)
mulps %xmm14, %xmm5
subps %xmm6, %xmm5
movss 0x50(%r9,%r10,4), %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm15, 0x2d0(%rsp)
mulps %xmm15, %xmm4
subps %xmm4, %xmm11
movss 0x60(%r9,%r10,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm4, %xmm11
mulps %xmm14, %xmm5
addps %xmm11, %xmm5
movaps %xmm2, %xmm3
movaps 0x50(%rsp), %xmm15
subps %xmm15, %xmm3
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movaps %xmm1, %xmm11
movaps 0x30(%rsp), %xmm6
subps %xmm6, %xmm11
movaps %xmm0, %xmm7
movaps 0x20(%rsp), %xmm13
subps %xmm13, %xmm7
movaps %xmm1, 0x70(%rsp)
addps %xmm6, %xmm1
movaps %xmm0, 0x80(%rsp)
movaps %xmm0, %xmm6
addps %xmm13, %xmm6
movaps %xmm3, %xmm13
mulps %xmm1, %xmm13
mulps %xmm7, %xmm1
movaps %xmm11, %xmm0
mulps %xmm6, %xmm0
subps %xmm1, %xmm0
movaps %xmm2, 0x140(%rsp)
addps %xmm15, %xmm2
movaps %xmm15, %xmm1
mulps %xmm3, %xmm6
movaps %xmm2, %xmm15
movaps %xmm7, 0x2c0(%rsp)
mulps %xmm7, %xmm15
subps %xmm6, %xmm15
mulps %xmm11, %xmm2
subps %xmm2, %xmm13
mulps %xmm4, %xmm13
mulps %xmm14, %xmm15
addps %xmm13, %xmm15
movaps 0xa0(%rsp), %xmm6
mulps %xmm6, %xmm0
addps %xmm15, %xmm0
movaps %xmm1, %xmm13
subps %xmm10, %xmm13
addps %xmm1, %xmm10
movaps 0x30(%rsp), %xmm1
movaps %xmm1, %xmm7
subps %xmm9, %xmm7
addps %xmm1, %xmm9
movaps 0x20(%rsp), %xmm1
movaps %xmm1, %xmm15
subps %xmm12, %xmm15
addps %xmm1, %xmm12
movaps %xmm13, %xmm5
mulps %xmm9, %xmm5
mulps %xmm15, %xmm9
movaps %xmm7, %xmm1
mulps %xmm12, %xmm1
subps %xmm9, %xmm1
movaps %xmm6, %xmm9
mulps %xmm13, %xmm12
movaps %xmm10, %xmm2
mulps %xmm15, %xmm2
subps %xmm12, %xmm2
mulps %xmm7, %xmm10
subps %xmm10, %xmm5
movaps %xmm4, 0x30(%rsp)
mulps %xmm4, %xmm5
movaps %xmm0, %xmm4
movaps %xmm14, 0x20(%rsp)
mulps %xmm14, %xmm2
addps %xmm5, %xmm2
mulps %xmm6, %xmm1
addps %xmm2, %xmm1
movaps %xmm8, %xmm2
addps %xmm0, %xmm2
addps %xmm1, %xmm2
movaps %xmm8, %xmm0
minps %xmm4, %xmm0
minps %xmm1, %xmm0
movaps %xmm8, 0x2a0(%rsp)
movaps %xmm8, %xmm6
movaps %xmm4, 0x3a0(%rsp)
maxps %xmm4, %xmm6
maxps %xmm1, %xmm6
movaps %xmm2, 0x3b0(%rsp)
movaps %xmm2, %xmm1
andps 0x1d41f53(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d475fc(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d41f51(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
movmskps %xmm6, %edx
testl %edx, %edx
je 0x1aae71
movaps %xmm3, %xmm5
movaps %xmm11, %xmm0
movaps 0x2e0(%rsp), %xmm8
mulps %xmm8, %xmm0
movaps %xmm3, %xmm1
movaps %xmm15, %xmm3
movaps %xmm15, 0x2b0(%rsp)
movaps 0x2d0(%rsp), %xmm15
mulps %xmm15, %xmm1
movaps %xmm7, 0x50(%rsp)
movaps 0x2c0(%rsp), %xmm14
mulps %xmm14, %xmm7
movaps %xmm5, %xmm4
movaps %xmm13, %xmm5
mulps %xmm11, %xmm5
mulps %xmm14, %xmm15
subps %xmm0, %xmm15
movaps 0x100(%rsp), %xmm12
movaps %xmm12, %xmm2
mulps %xmm11, %xmm12
mulps %xmm3, %xmm11
subps %xmm7, %xmm11
movaps 0x140(%rsp), %xmm10
movaps 0x1d41eb0(%rip), %xmm3 # 0x1eec6c0
andps %xmm3, %xmm0
andps %xmm3, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm15, %xmm11
movaps %xmm4, %xmm0
mulps 0x2b0(%rsp), %xmm0
mulps %xmm14, %xmm2
mulps %xmm14, %xmm13
mulps %xmm4, %xmm8
subps %xmm2, %xmm8
subps %xmm0, %xmm13
andps %xmm3, %xmm2
andps %xmm3, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm8, %xmm13
mulps 0x50(%rsp), %xmm4
subps %xmm1, %xmm12
subps %xmm5, %xmm4
andps %xmm3, %xmm1
andps %xmm3, %xmm5
cmpltps %xmm5, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm12, %xmm4
movaps 0x30(%rsp), %xmm1
mulps %xmm4, %xmm1
movaps 0x20(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm9
addps %xmm0, %xmm9
addps %xmm9, %xmm9
movaps 0x80(%rsp), %xmm1
mulps %xmm4, %xmm1
movaps 0x70(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm10
addps %xmm0, %xmm10
rcpps %xmm9, %xmm1
movaps %xmm9, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d4214e(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm10, %xmm10
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm10, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1aae71
cmpneqps 0x1d410ff(%rip), %xmm9 # 0x1eeba10
andps %xmm9, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1aae71
movaps 0x2a0(%rsp), %xmm2
movaps %xmm2, 0x1f0(%rsp)
movaps 0x3a0(%rsp), %xmm2
movaps %xmm2, 0x200(%rsp)
movaps 0x3b0(%rsp), %xmm2
movaps %xmm2, 0x210(%rsp)
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movaps %xmm1, 0x230(%rsp)
movaps %xmm0, 0x260(%rsp)
movaps %xmm11, 0x270(%rsp)
movaps %xmm13, 0x280(%rsp)
movaps %xmm4, 0x290(%rsp)
movq 0xba8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %r8
movl 0x90(%r9,%r10,4), %edx
testl %edx, 0x34(%r8)
je 0x1aae71
movq 0xba8(%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1aa9d0
cmpq $0x0, 0x48(%r8)
je 0x1ab5cc
movaps 0x210(%rsp), %xmm1
movaps %xmm1, %xmm0
andps 0x1d41cde(%rip), %xmm0 # 0x1eec6c0
rcpps %xmm1, %xmm2
mulps %xmm2, %xmm1
movaps 0x1d42021(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm1, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
cmpnltps 0x1d4733d(%rip), %xmm0 # 0x1ef1d40
andps %xmm3, %xmm0
movaps 0x1f0(%rsp), %xmm1
mulps %xmm0, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x240(%rsp)
mulps 0x200(%rsp), %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x250(%rsp)
movq 0x220(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm5
movups (%rsi,%rdx,4), %xmm3
cmpq $0x2, %rdx
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
je 0x1abc3b
movaps 0x1a0(%rsp), %xmm14
movaps %xmm5, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
movaps %xmm2, %xmm4
psrld $0x10, %xmm4
xorps %xmm13, %xmm13
pblendw $0xaa, %xmm13, %xmm2 # xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4],xmm13[5],xmm2[6],xmm13[7]
cvtdq2ps %xmm2, %xmm2
movaps 0x1d71ebf(%rip), %xmm15 # 0x1f1c970
mulps %xmm15, %xmm2
cvtdq2ps %xmm4, %xmm4
mulps %xmm15, %xmm4
movaps %xmm5, %xmm6
psrld $0x10, %xmm6
pblendw $0xaa, %xmm13, %xmm5 # xmm5 = xmm5[0],xmm13[1],xmm5[2],xmm13[3],xmm5[4],xmm13[5],xmm5[6],xmm13[7]
cvtdq2ps %xmm5, %xmm5
mulps %xmm15, %xmm5
cvtdq2ps %xmm6, %xmm6
mulps %xmm15, %xmm6
movaps %xmm3, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm13, %xmm3 # xmm3 = xmm3[0],xmm13[1],xmm3[2],xmm13[3],xmm3[4],xmm13[5],xmm3[6],xmm13[7]
cvtdq2ps %xmm3, %xmm3
mulps %xmm15, %xmm3
cvtdq2ps %xmm7, %xmm7
mulps %xmm15, %xmm7
mulps %xmm1, %xmm5
mulps %xmm1, %xmm6
mulps %xmm0, %xmm3
addps %xmm5, %xmm3
mulps %xmm0, %xmm7
addps %xmm6, %xmm7
movaps 0x1d41f01(%rip), %xmm5 # 0x1eeca10
subps %xmm1, %xmm5
subps %xmm0, %xmm5
mulps %xmm5, %xmm2
addps %xmm3, %xmm2
mulps %xmm4, %xmm5
addps %xmm7, %xmm5
movaps %xmm2, 0x240(%rsp)
movaps %xmm5, 0x250(%rsp)
movaps 0x230(%rsp), %xmm0
movmskps %xmm0, %esi
bsfq %rsi, %rdx
movq %rsi, 0x20(%rsp)
testl %esi, %esi
sete 0x70(%rsp)
movq 0x8(%rsp), %rsi
je 0x1aaea7
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x80(%rsp)
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0xa0(%rsp)
movq 0x1b8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x140(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0x90(%rsp)
movq %rdi, 0x98(%rsp)
movss 0x80(%r9,%r10,4), %xmm5
movss 0x260(%rsp,%rdx,4), %xmm0
movss 0x240(%rsp,%rdx,4), %xmm1
movss 0x250(%rsp,%rdx,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0xba8(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x270(%rsp,%rdx,4), %xmm0
movss 0x280(%rsp,%rdx,4), %xmm3
movq %rdx, 0x30(%rsp)
movss 0x290(%rsp,%rdx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x300(%rsp)
movaps %xmm3, 0x310(%rsp)
movaps %xmm4, 0x320(%rsp)
movaps %xmm1, 0x330(%rsp)
movaps %xmm2, 0x340(%rsp)
movaps 0xa0(%rsp), %xmm0
movaps %xmm0, 0x350(%rsp)
movdqa 0x80(%rsp), %xmm0
movdqa %xmm0, 0x360(%rsp)
leaq 0x370(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x370(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movdqa 0x140(%rsp), %xmm0
movdqa %xmm0, 0x150(%rsp)
leaq 0x150(%rsp), %rcx
movq %rcx, 0x1c0(%rsp)
movq 0x18(%r8), %rcx
movq %rcx, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
movq %r9, 0x1d8(%rsp)
leaq 0x300(%rsp), %rax
movq %rax, 0x1e0(%rsp)
movl $0x4, 0x1e8(%rsp)
movq 0x48(%r8), %rax
testq %rax, %rax
movss %xmm5, 0x50(%rsp)
je 0x1aad41
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x50(%rsp), %xmm5
movq 0x100(%rsp), %r8
movq 0x98(%rsp), %rdi
movaps 0x1a0(%rsp), %xmm14
movq 0x8(%rsp), %rsi
movq 0x68(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x90(%rsp), %r9
movdqa 0x150(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1aadf4
movq 0xba8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1aadb6
testb $0x2, (%rcx)
jne 0x1aad76
testb $0x40, 0x3e(%r8)
je 0x1aadb6
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x50(%rsp), %xmm5
movq 0x100(%rsp), %r8
movq 0x98(%rsp), %rdi
movaps 0x1a0(%rsp), %xmm14
movq 0x8(%rsp), %rsi
movq 0x68(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x90(%rsp), %r9
movdqa 0x150(%rsp), %xmm0
pcmpeqd 0x1d40c49(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d4104d(%rip), %xmm1 # 0x1eebe20
movq 0x1d8(%rsp), %rax
movaps 0x1d40c1e(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1aae04
pcmpeqd 0x1d40c14(%rip), %xmm1 # 0x1eeba10
pxor 0x1d4101c(%rip), %xmm1 # 0x1eebe20
ptest 0x1d41d13(%rip), %xmm1 # 0x1eecb20
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
movq 0x30(%rsp), %rax
jne 0x1abb91
movss %xmm5, 0x80(%r9,%r10,4)
movq 0x20(%rsp), %rcx
btcq %rax, %rcx
bsfq %rcx, %rdx
movq %rcx, 0x20(%rsp)
testq %rcx, %rcx
sete 0x70(%rsp)
jne 0x1aaba6
jmp 0x1aaea7
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
movaps 0x1a0(%rsp), %xmm14
cmpl $0x3, %edi
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
jb 0x1ab690
movq 0x10(%rsp), %rcx
movq 0x170(%rsp), %rax
leaq (%rax,%rcx,4), %rax
addq $0x2c, %rax
movups (%rax), %xmm1
movups (%rax,%rcx,4), %xmm0
cmpq $0x2, %rcx
movq %rdi, 0x98(%rsp)
je 0x1abbbd
movq 0x168(%rsp), %rcx
movq (%rcx), %rdx
movl 0x14(%rdx), %ecx
leaq (%rax,%rcx,4), %rdi
movups (%rdi), %xmm3
movq 0x10(%rsp), %rsi
movups (%rdi,%rsi,4), %xmm2
cmpq $0x2, %rsi
je 0x1abbca
leaq (%rax,%rcx,8), %rsi
movups (%rsi), %xmm9
movq 0x10(%rsp), %r8
movups (%rsi,%r8,4), %xmm7
cmpq $0x2, %r8
je 0x1abbd7
movl 0x28(%rdx), %r11d
shrl $0x2, %r11d
shll $0x2, %r11d
leaq (%rax,%r11), %r10
movups (%rax,%r11), %xmm5
movq 0x10(%rsp), %r8
movups (%r10,%r8,4), %xmm4
cmpq $0x2, %r8
je 0x1abbe5
movq 0x48(%rsp), %r10
addq %r11, %rdi
movups (%rdi), %xmm8
movq 0x10(%rsp), %r8
movups (%rdi,%r8,4), %xmm6
cmpq $0x2, %r8
je 0x1abbf2
movaps %xmm1, %xmm13
unpcklps %xmm0, %xmm13 # xmm13 = xmm13[0],xmm0[0],xmm13[1],xmm0[1]
shufps $0xa5, %xmm1, %xmm1 # xmm1 = xmm1[1,1,2,2]
shufps $0x94, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,2]
movaps %xmm3, %xmm15
unpcklps %xmm2, %xmm15 # xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
shufps $0xa5, %xmm3, %xmm3 # xmm3 = xmm3[1,1,2,2]
movaps %xmm3, 0x30(%rsp)
shufps $0x94, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,2]
addq %r11, %rsi
movaps %xmm9, %xmm3
unpcklps %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
movaps %xmm3, 0x50(%rsp)
shufps $0xa5, %xmm9, %xmm9 # xmm9 = xmm9[1,1,2,2]
movaps %xmm9, 0x20(%rsp)
shufps $0x94, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,2]
movaps %xmm7, 0x80(%rsp)
movaps %xmm5, %xmm12
unpcklps %xmm4, %xmm12 # xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,2]
movaps %xmm8, %xmm11
unpcklps %xmm6, %xmm11 # xmm11 = xmm11[0],xmm6[0],xmm11[1],xmm6[1]
shufps $0xa5, %xmm8, %xmm8 # xmm8 = xmm8[1,1,2,2]
shufps $0x94, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,2]
movups (%rsi), %xmm10
movq 0x10(%rsp), %rdi
movups (%rsi,%rdi,4), %xmm9
cmpq $0x2, %rdi
movaps %xmm0, 0x70(%rsp)
movaps %xmm2, %xmm7
movaps %xmm13, %xmm3
je 0x1abc00
movq 0x68(%rsp), %r11
movq 0x8(%rsp), %rsi
movq 0x98(%rsp), %rdi
imulq $0xc, %rcx, %rcx
addq %rcx, %rax
mulps %xmm14, %xmm12
movaps 0x390(%rsp), %xmm13
mulps %xmm13, %xmm3
addps %xmm12, %xmm3
movaps %xmm10, %xmm12
unpcklps %xmm9, %xmm12 # xmm12 = xmm12[0],xmm9[0],xmm12[1],xmm9[1]
shufps $0xa5, %xmm10, %xmm10 # xmm10 = xmm10[1,1,2,2]
shufps $0x94, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,2]
mulps %xmm14, %xmm11
mulps %xmm14, %xmm12
mulps %xmm13, %xmm15
addps %xmm11, %xmm15
movaps %xmm15, %xmm11
movaps 0x50(%rsp), %xmm15
mulps %xmm13, %xmm15
addps %xmm12, %xmm15
mulps %xmm14, %xmm5
mulps %xmm14, %xmm8
mulps %xmm14, %xmm10
mulps %xmm13, %xmm1
addps %xmm5, %xmm1
movaps 0x30(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm8, %xmm0
movaps %xmm14, %xmm2
movaps %xmm0, %xmm14
movaps 0x20(%rsp), %xmm0
mulps %xmm13, %xmm0
addps %xmm10, %xmm0
movaps %xmm0, %xmm10
mulps %xmm2, %xmm4
mulps %xmm2, %xmm6
mulps %xmm2, %xmm9
movaps 0x70(%rsp), %xmm8
mulps %xmm13, %xmm8
addps %xmm4, %xmm8
mulps %xmm13, %xmm7
addps %xmm6, %xmm7
movaps 0x80(%rsp), %xmm12
mulps %xmm13, %xmm12
addps %xmm9, %xmm12
movq %rax, 0x188(%rsp)
movq 0x10(%rsp), %rax
movq %rax, 0x190(%rsp)
movq %rdi, 0x198(%rsp)
movl 0x18(%rdx), %ecx
movl 0x1c(%rdx), %eax
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movss (%r9,%r10,4), %xmm5
movss 0x10(%r9,%r10,4), %xmm6
movss 0x20(%r9,%r10,4), %xmm4
movss 0x40(%r9,%r10,4), %xmm0
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x70(%rsp)
movss 0x50(%r9,%r10,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x10(%rsp)
subps %xmm5, %xmm3
movaps %xmm11, %xmm2
subps %xmm6, %xmm2
movaps %xmm2, 0x2e0(%rsp)
subps %xmm4, %xmm15
subps %xmm5, %xmm1
subps %xmm6, %xmm14
movaps %xmm14, 0x30(%rsp)
subps %xmm4, %xmm10
movaps %xmm10, 0x20(%rsp)
subps %xmm5, %xmm8
subps %xmm6, %xmm7
subps %xmm4, %xmm12
movaps %xmm8, %xmm13
subps %xmm3, %xmm13
movaps %xmm7, %xmm10
subps %xmm2, %xmm10
movaps %xmm12, %xmm14
subps %xmm15, %xmm14
movaps %xmm8, %xmm4
addps %xmm3, %xmm4
movaps %xmm7, %xmm5
addps %xmm2, %xmm5
movaps %xmm12, %xmm6
addps %xmm15, %xmm6
movaps %xmm13, %xmm9
mulps %xmm5, %xmm9
mulps %xmm14, %xmm5
movaps %xmm10, %xmm0
mulps %xmm6, %xmm0
subps %xmm5, %xmm0
movaps %xmm13, 0x170(%rsp)
mulps %xmm13, %xmm6
movaps %xmm4, %xmm5
movaps %xmm14, 0xa0(%rsp)
mulps %xmm14, %xmm5
subps %xmm6, %xmm5
movss 0x60(%r9,%r10,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps %xmm10, 0x140(%rsp)
mulps %xmm10, %xmm4
subps %xmm4, %xmm9
movaps %xmm3, %xmm14
subps %xmm1, %xmm14
mulps %xmm11, %xmm9
mulps 0x10(%rsp), %xmm5
addps %xmm9, %xmm5
movaps %xmm2, %xmm4
movaps %xmm2, %xmm13
movaps 0x30(%rsp), %xmm10
subps %xmm10, %xmm13
mulps 0x70(%rsp), %xmm0
addps %xmm5, %xmm0
movaps %xmm15, %xmm2
movaps 0x20(%rsp), %xmm6
subps %xmm6, %xmm2
addps %xmm10, %xmm4
movaps %xmm15, 0x50(%rsp)
movaps %xmm15, %xmm5
addps %xmm6, %xmm5
movaps %xmm14, %xmm6
mulps %xmm4, %xmm6
mulps %xmm2, %xmm4
movaps %xmm13, %xmm9
mulps %xmm5, %xmm9
subps %xmm4, %xmm9
movaps %xmm3, 0x80(%rsp)
movaps %xmm3, %xmm4
movaps 0x10(%rsp), %xmm3
addps %xmm1, %xmm4
mulps %xmm14, %xmm5
movaps %xmm4, %xmm15
movaps %xmm2, 0x100(%rsp)
mulps %xmm2, %xmm15
subps %xmm5, %xmm15
mulps %xmm13, %xmm4
subps %xmm4, %xmm6
mulps %xmm11, %xmm6
mulps %xmm3, %xmm15
addps %xmm6, %xmm15
movaps 0x70(%rsp), %xmm4
mulps %xmm4, %xmm9
addps %xmm15, %xmm9
movaps %xmm1, %xmm15
subps %xmm8, %xmm15
addps %xmm1, %xmm8
movaps %xmm10, %xmm6
subps %xmm7, %xmm6
addps %xmm10, %xmm7
movaps 0x20(%rsp), %xmm1
movaps %xmm1, %xmm10
subps %xmm12, %xmm10
addps %xmm1, %xmm12
movaps %xmm15, %xmm5
mulps %xmm7, %xmm5
mulps %xmm10, %xmm7
movaps %xmm6, %xmm1
mulps %xmm12, %xmm1
subps %xmm7, %xmm1
mulps %xmm15, %xmm12
movaps %xmm8, %xmm2
mulps %xmm10, %xmm2
subps %xmm12, %xmm2
movaps %xmm6, %xmm7
mulps %xmm6, %xmm8
subps %xmm8, %xmm5
movaps %xmm0, %xmm6
movaps %xmm11, 0x20(%rsp)
mulps %xmm11, %xmm5
mulps %xmm3, %xmm2
addps %xmm5, %xmm2
mulps %xmm4, %xmm1
addps %xmm2, %xmm1
movaps %xmm0, %xmm2
addps %xmm9, %xmm2
addps %xmm1, %xmm2
minps %xmm9, %xmm0
minps %xmm1, %xmm0
movaps %xmm6, 0x2c0(%rsp)
movaps %xmm9, 0x2a0(%rsp)
maxps %xmm9, %xmm6
maxps %xmm1, %xmm6
movaps %xmm2, 0x2b0(%rsp)
movaps %xmm2, %xmm1
andps 0x1d4135f(%rip), %xmm1 # 0x1eec6c0
mulps 0x1d46a08(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d4135d(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
movmskps %xmm6, %edx
testl %edx, %edx
je 0x1ab64b
movaps %xmm4, %xmm8
movaps %xmm13, %xmm0
movaps 0x2e0(%rsp), %xmm4
movaps %xmm10, %xmm11
movaps %xmm10, 0x30(%rsp)
movaps 0xa0(%rsp), %xmm10
mulps %xmm10, %xmm0
movaps %xmm14, %xmm1
movaps 0x140(%rsp), %xmm3
mulps %xmm3, %xmm1
movaps %xmm7, 0x2d0(%rsp)
movaps 0x100(%rsp), %xmm9
mulps %xmm9, %xmm7
movaps %xmm15, %xmm5
mulps %xmm13, %xmm5
mulps %xmm9, %xmm3
subps %xmm0, %xmm3
movaps 0x170(%rsp), %xmm12
movaps %xmm12, %xmm2
mulps %xmm13, %xmm12
mulps %xmm11, %xmm13
subps %xmm7, %xmm13
movaps 0x1d412c0(%rip), %xmm11 # 0x1eec6c0
andps %xmm11, %xmm0
andps %xmm11, %xmm7
cmpltps %xmm7, %xmm0
blendvps %xmm0, %xmm3, %xmm13
movaps %xmm14, %xmm0
mulps 0x30(%rsp), %xmm0
mulps %xmm9, %xmm2
mulps %xmm9, %xmm15
mulps %xmm14, %xmm10
subps %xmm2, %xmm10
subps %xmm0, %xmm15
andps %xmm11, %xmm2
andps %xmm11, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm10, %xmm15
mulps 0x2d0(%rsp), %xmm14
subps %xmm1, %xmm12
subps %xmm5, %xmm14
andps %xmm11, %xmm1
andps %xmm11, %xmm5
cmpltps %xmm5, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm12, %xmm14
movaps 0x20(%rsp), %xmm1
mulps %xmm14, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm15, %xmm0
addps %xmm1, %xmm0
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
addps %xmm8, %xmm8
movaps 0x50(%rsp), %xmm0
mulps %xmm14, %xmm0
mulps %xmm15, %xmm4
addps %xmm0, %xmm4
movaps 0x80(%rsp), %xmm3
mulps %xmm13, %xmm3
addps %xmm4, %xmm3
rcpps %xmm8, %xmm1
movaps %xmm8, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d41554(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movss 0x80(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r9,%r10,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1ab64b
cmpneqps 0x1d40507(%rip), %xmm8 # 0x1eeba10
andps %xmm8, %xmm1
movmskps %xmm1, %edx
testl %edx, %edx
je 0x1ab64b
movaps 0x2c0(%rsp), %xmm2
movaps %xmm2, 0x1f0(%rsp)
movaps 0x2a0(%rsp), %xmm2
movaps %xmm2, 0x200(%rsp)
movaps 0x2b0(%rsp), %xmm2
movaps %xmm2, 0x210(%rsp)
leaq 0x188(%rsp), %rdx
movq %rdx, 0x220(%rsp)
movaps %xmm1, 0x230(%rsp)
movaps %xmm0, 0x260(%rsp)
movaps %xmm13, 0x270(%rsp)
movaps %xmm15, 0x280(%rsp)
movaps %xmm14, 0x290(%rsp)
movq 0xba8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rdi
movl 0x90(%r9,%r10,4), %edx
testl %edx, 0x34(%rdi)
je 0x1ab64b
movq 0xba8(%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x1ab70e
cmpq $0x0, 0x48(%rdi)
jne 0x1ab70e
movb $0x1, %cl
xorl %eax, %eax
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
testb %cl, %cl
je 0x1ab62a
movl $0xff800000, 0x80(%r9,%r10,4) # imm = 0xFF800000
pushq $0x1
popq %rcx
jmp 0x1ab635
xorl %ecx, %ecx
testq %rax, %rax
jne 0x1ab702
testb $0x3, %cl
movq 0x2f8(%rsp), %rcx
je 0x1aa04e
jmp 0x1abc55
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
xorl %eax, %eax
xorl %ecx, %ecx
jmp 0x1ab615
andq $-0x10, %rax
movq (%rax), %rcx
movq (%rcx), %rax
movq 0x168(%rsp), %rdx
movq %rcx, (%rdx)
jmp 0x1ab692
shufps $0x54, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1aa290
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
jmp 0x1aa2b4
shufps $0x54, %xmm11, %xmm11 # xmm11 = xmm11[0,1,1,1]
shufps $0x54, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,1]
jmp 0x1aa2db
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1aa30d
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
jmp 0x1aa32d
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1aa3de
movq %rax, (%r11)
addq $0x8, %r11
jmp 0x1ab635
movaps 0x210(%rsp), %xmm1
movaps %xmm1, %xmm0
andps 0x1d40fa0(%rip), %xmm0 # 0x1eec6c0
rcpps %xmm1, %xmm2
mulps %xmm2, %xmm1
movaps 0x1d412e3(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm1, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
cmpnltps 0x1d465ff(%rip), %xmm0 # 0x1ef1d40
andps %xmm3, %xmm0
movaps 0x1f0(%rsp), %xmm1
mulps %xmm0, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x240(%rsp)
mulps 0x200(%rsp), %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x250(%rsp)
movq 0x220(%rsp), %rdx
movq (%rdx), %rsi
movq 0x8(%rdx), %rdx
movups (%rsi), %xmm5
movups (%rsi,%rdx,4), %xmm3
cmpq $0x2, %rdx
movaps 0xf0(%rsp), %xmm8
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
je 0x1abc48
movaps %xmm5, %xmm2
unpcklps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
shufps $0xa5, %xmm5, %xmm5 # xmm5 = xmm5[1,1,2,2]
shufps $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
movaps %xmm2, %xmm4
psrld $0x10, %xmm4
xorps %xmm13, %xmm13
pblendw $0xaa, %xmm13, %xmm2 # xmm2 = xmm2[0],xmm13[1],xmm2[2],xmm13[3],xmm2[4],xmm13[5],xmm2[6],xmm13[7]
cvtdq2ps %xmm2, %xmm2
movaps 0x1d7118a(%rip), %xmm14 # 0x1f1c970
mulps %xmm14, %xmm2
cvtdq2ps %xmm4, %xmm4
mulps %xmm14, %xmm4
movaps %xmm5, %xmm6
psrld $0x10, %xmm6
pblendw $0xaa, %xmm13, %xmm5 # xmm5 = xmm5[0],xmm13[1],xmm5[2],xmm13[3],xmm5[4],xmm13[5],xmm5[6],xmm13[7]
cvtdq2ps %xmm5, %xmm5
mulps %xmm14, %xmm5
cvtdq2ps %xmm6, %xmm6
mulps %xmm14, %xmm6
movaps %xmm3, %xmm7
psrld $0x10, %xmm7
pblendw $0xaa, %xmm13, %xmm3 # xmm3 = xmm3[0],xmm13[1],xmm3[2],xmm13[3],xmm3[4],xmm13[5],xmm3[6],xmm13[7]
cvtdq2ps %xmm3, %xmm3
mulps %xmm14, %xmm3
cvtdq2ps %xmm7, %xmm7
mulps %xmm14, %xmm7
mulps %xmm1, %xmm5
mulps %xmm1, %xmm6
mulps %xmm0, %xmm3
addps %xmm5, %xmm3
mulps %xmm0, %xmm7
addps %xmm6, %xmm7
movaps 0x1d411cc(%rip), %xmm5 # 0x1eeca10
subps %xmm1, %xmm5
subps %xmm0, %xmm5
mulps %xmm5, %xmm2
addps %xmm3, %xmm2
mulps %xmm4, %xmm5
addps %xmm7, %xmm5
movaps %xmm2, 0x240(%rsp)
movaps %xmm5, 0x250(%rsp)
movaps 0x230(%rsp), %xmm0
movmskps %xmm0, %edx
bsfq %rdx, %r8
testl %edx, %edx
sete 0x50(%rsp)
movq 0x8(%rsp), %rsi
je 0x1abb74
movd %ecx, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x70(%rsp)
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x80(%rsp)
movq 0x1b8(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x170(%rsp)
movq %rdi, 0xa0(%rsp)
movq %r9, 0x90(%rsp)
movss 0x80(%r9,%r10,4), %xmm5
movss 0x260(%rsp,%r8,4), %xmm0
movss 0x240(%rsp,%r8,4), %xmm1
movss 0x250(%rsp,%r8,4), %xmm2
movss %xmm0, 0x80(%r9,%r10,4)
movq 0xba8(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x270(%rsp,%r8,4), %xmm0
movss 0x280(%rsp,%r8,4), %xmm3
movss 0x290(%rsp,%r8,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x300(%rsp)
movaps %xmm3, 0x310(%rsp)
movaps %xmm4, 0x320(%rsp)
movaps %xmm1, 0x330(%rsp)
movaps %xmm2, 0x340(%rsp)
movaps 0x80(%rsp), %xmm0
movaps %xmm0, 0x350(%rsp)
movdqa 0x70(%rsp), %xmm0
movdqa %xmm0, 0x360(%rsp)
leaq 0x370(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x370(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x380(%rsp)
movdqa 0x170(%rsp), %xmm0
movdqa %xmm0, 0x150(%rsp)
leaq 0x150(%rsp), %rcx
movq %rcx, 0x1c0(%rsp)
movq 0x18(%rdi), %rcx
movq %rcx, 0x1c8(%rsp)
movq %rax, 0x1d0(%rsp)
movq %r9, 0x1d8(%rsp)
leaq 0x300(%rsp), %rax
movq %rax, 0x1e0(%rsp)
movl $0x4, 0x1e8(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
movq %rdx, 0x10(%rsp)
movq %r8, 0x20(%rsp)
movss %xmm5, 0x30(%rsp)
je 0x1aba5d
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movq 0x20(%rsp), %r8
movq 0x10(%rsp), %rdx
movq 0xa0(%rsp), %rdi
movq 0x48(%rsp), %r10
movq 0x90(%rsp), %r9
movdqa 0x150(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1abafe
movq 0xba8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1abac0
testb $0x2, (%rcx)
jne 0x1aba91
testb $0x40, 0x3e(%rdi)
je 0x1abac0
leaq 0x1c0(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movq 0x20(%rsp), %r8
movq 0x10(%rsp), %rdx
movq 0xa0(%rsp), %rdi
movq 0x48(%rsp), %r10
movq 0x90(%rsp), %r9
movdqa 0x150(%rsp), %xmm0
pcmpeqd 0x1d3ff3f(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d40343(%rip), %xmm1 # 0x1eebe20
movq 0x1d8(%rsp), %rax
movaps 0x1d3ff14(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1abb0e
pcmpeqd 0x1d3ff0a(%rip), %xmm1 # 0x1eeba10
pxor 0x1d40312(%rip), %xmm1 # 0x1eebe20
ptest 0x1d41009(%rip), %xmm1 # 0x1eecb20
movaps 0xf0(%rsp), %xmm8
movq 0x68(%rsp), %r11
movaps 0xe0(%rsp), %xmm9
movaps 0xd0(%rsp), %xmm10
movq 0x8(%rsp), %rsi
movaps 0xc0(%rsp), %xmm11
movaps 0xb0(%rsp), %xmm12
jne 0x1abc0f
movss %xmm5, 0x80(%r9,%r10,4)
btcq %r8, %rdx
bsfq %rdx, %r8
testq %rdx, %rdx
sete 0x50(%rsp)
jne 0x1ab8cb
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
jmp 0x1ab690
testb $0x1, 0x70(%rsp)
jne 0x1aaea7
movb $0x1, %cl
xorl %eax, %eax
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
jmp 0x1ab615
shufps $0x54, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
jmp 0x1aaef6
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
shufps $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
jmp 0x1aaf1e
shufps $0x54, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,1]
shufps $0x54, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,1]
jmp 0x1aaf3a
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
jmp 0x1aaf63
shufps $0x54, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
shufps $0x54, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
jmp 0x1aaf83
shufps $0x54, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
shufps $0x54, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,1]
jmp 0x1ab013
testb $0x1, 0x50(%rsp)
movaps 0x130(%rsp), %xmm5
movaps 0x120(%rsp), %xmm6
movaps 0x110(%rsp), %xmm7
jne 0x1ab690
movb $0x1, %cl
xorl %eax, %eax
jmp 0x1ab615
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1aaa7c
shufps $0x54, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
shufps $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1ab7ba
leaq 0x3c0(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xb68, %rsp # imm = 0xB68
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, false, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::ObjectIntersectorK<4, false>>, false>::intersectCoherent(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
__forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
|
pcmpeqd %xmm3, %xmm3
movdqa (%rdi), %xmm2
pcmpeqd %xmm3, %xmm2
movmskps %xmm2, %eax
testl %eax, %eax
je 0x1ac6ca
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1188, %rsp # imm = 0x1188
movq (%rsi), %rsi
movq %rsi, 0xa0(%rsp)
movzbl %al, %eax
movaps (%rdx), %xmm9
movaps 0x10(%rdx), %xmm10
movaps 0x20(%rdx), %xmm11
movaps 0x40(%rdx), %xmm4
movaps 0x1d409fb(%rip), %xmm1 # 0x1eec6c0
movaps %xmm4, %xmm0
andps %xmm1, %xmm0
movaps 0x1d4606e(%rip), %xmm5 # 0x1ef1d40
cmpltps %xmm5, %xmm0
movaps %xmm4, %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps 0x30(%rdx), %xmm12
movaps 0x50(%rdx), %xmm7
movaps %xmm7, %xmm0
andps %xmm1, %xmm0
cmpltps %xmm5, %xmm0
blendvps %xmm0, %xmm5, %xmm7
movaps 0x60(%rdx), %xmm8
andps %xmm8, %xmm1
cmpltps %xmm5, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm5, %xmm8
rcpps %xmm6, %xmm0
mulps %xmm0, %xmm6
movaps 0x1d40cf6(%rip), %xmm13 # 0x1eeca10
movaps %xmm13, %xmm14
subps %xmm6, %xmm14
mulps %xmm0, %xmm14
addps %xmm0, %xmm14
rcpps %xmm7, %xmm0
mulps %xmm0, %xmm7
movaps %xmm13, %xmm15
subps %xmm7, %xmm15
mulps %xmm0, %xmm15
addps %xmm0, %xmm15
rcpps %xmm8, %xmm0
mulps %xmm0, %xmm8
subps %xmm8, %xmm13
mulps %xmm0, %xmm13
addps %xmm0, %xmm13
xorps %xmm5, %xmm5
maxps %xmm5, %xmm12
movaps %xmm12, 0xe0(%rsp)
movaps 0x80(%rdx), %xmm0
maxps %xmm5, %xmm0
movaps %xmm0, 0x100(%rsp)
cmpltps %xmm5, %xmm4
andps 0x1d40d9f(%rip), %xmm4 # 0x1eecb20
movaps 0x50(%rdx), %xmm0
movaps 0x60(%rdx), %xmm1
cmpltps %xmm5, %xmm0
andps 0x1d7416c(%rip), %xmm0 # 0x1f1ff00
orps %xmm4, %xmm0
cmpltps %xmm5, %xmm1
andps 0x1d4607e(%rip), %xmm1 # 0x1ef1e20
pxor %xmm3, %xmm2
por %xmm1, %xmm2
por %xmm0, %xmm2
movdqa %xmm2, 0xf0(%rsp)
leaq 0x240(%rsp), %r11
movq %rcx, 0xb8(%rsp)
movq %rdx, 0xb0(%rsp)
movaps %xmm9, 0x160(%rsp)
movaps %xmm10, 0x150(%rsp)
movaps %xmm11, 0x140(%rsp)
movaps %xmm13, 0x130(%rsp)
movaps %xmm14, 0x120(%rsp)
movaps %xmm15, 0x110(%rsp)
bsfq %rax, %rsi
movd 0xf0(%rsp,%rsi,4), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd 0xf0(%rsp), %xmm0
movmskps %xmm0, %esi
notq %rsi
andq %rax, %rsi
movq %rsi, 0xa8(%rsp)
movaps 0x1d3fbb7(%rip), %xmm12 # 0x1eeb9f0
movaps %xmm12, %xmm2
blendvps %xmm0, %xmm14, %xmm2
movaps %xmm2, %xmm1
shufps $0xb1, %xmm2, %xmm1 # xmm1 = xmm1[1,0],xmm2[3,2]
minps %xmm2, %xmm1
movaps %xmm1, %xmm8
shufps $0x4e, %xmm1, %xmm8 # xmm8 = xmm8[2,3],xmm1[0,1]
minps %xmm1, %xmm8
movaps %xmm12, %xmm2
blendvps %xmm0, %xmm15, %xmm2
movaps %xmm2, %xmm1
shufps $0xb1, %xmm2, %xmm1 # xmm1 = xmm1[1,0],xmm2[3,2]
minps %xmm2, %xmm1
movaps %xmm1, %xmm2
shufps $0x4e, %xmm1, %xmm2 # xmm2 = xmm2[2,3],xmm1[0,1]
minps %xmm1, %xmm2
movaps %xmm12, %xmm3
blendvps %xmm0, %xmm13, %xmm3
insertps $0x1c, %xmm2, %xmm8 # xmm8 = xmm8[0],xmm2[0],zero,zero
movaps %xmm3, %xmm1
shufps $0xb1, %xmm3, %xmm1 # xmm1 = xmm1[1,0],xmm3[3,2]
minps %xmm3, %xmm1
movaps %xmm1, %xmm2
shufps $0x4e, %xmm1, %xmm2 # xmm2 = xmm2[2,3],xmm1[0,1]
minps %xmm1, %xmm2
insertps $0x20, %xmm2, %xmm8 # xmm8 = xmm8[0,1],xmm2[0],xmm8[3]
movaps 0x1d3fb55(%rip), %xmm7 # 0x1eeba00
movaps %xmm7, %xmm2
movaps %xmm7, %xmm6
blendvps %xmm0, %xmm14, %xmm2
movaps %xmm2, %xmm1
shufps $0xb1, %xmm2, %xmm1 # xmm1 = xmm1[1,0],xmm2[3,2]
maxps %xmm2, %xmm1
movaps %xmm1, %xmm7
shufps $0x4e, %xmm1, %xmm7 # xmm7 = xmm7[2,3],xmm1[0,1]
maxps %xmm1, %xmm7
movaps %xmm6, %xmm2
movaps %xmm0, %xmm1
blendvps %xmm0, %xmm15, %xmm2
movaps %xmm2, %xmm0
shufps $0xb1, %xmm2, %xmm0 # xmm0 = xmm0[1,0],xmm2[3,2]
maxps %xmm2, %xmm0
movaps %xmm0, %xmm2
shufps $0x4e, %xmm0, %xmm2 # xmm2 = xmm2[2,3],xmm0[0,1]
maxps %xmm0, %xmm2
movaps %xmm6, %xmm4
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm13, %xmm4
insertps $0x1c, %xmm2, %xmm7 # xmm7 = xmm7[0],xmm2[0],zero,zero
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
maxps %xmm4, %xmm0
movaps %xmm0, %xmm2
shufps $0x4e, %xmm0, %xmm2 # xmm2 = xmm2[2,3],xmm0[0,1]
maxps %xmm0, %xmm2
insertps $0x20, %xmm2, %xmm7 # xmm7 = xmm7[0,1],xmm2[0],xmm7[3]
movaps %xmm7, 0x80(%rsp)
movaps %xmm8, %xmm0
cmpnltps 0x1d3fae5(%rip), %xmm0 # 0x1eeba10
movaps %xmm0, 0x10(%rsp)
blendvps %xmm0, %xmm8, %xmm7
movshdup %xmm7, %xmm0 # xmm0 = xmm7[1,1,3,3]
xorl %r13d, %r13d
xorps %xmm4, %xmm4
ucomiss %xmm0, %xmm4
seta %r13b
shll $0x4, %r13d
orq $0x20, %r13
movaps %xmm7, %xmm0
unpckhpd %xmm7, %xmm0 # xmm0 = xmm0[1],xmm7[1]
xorl %r15d, %r15d
ucomiss %xmm0, %xmm4
seta %r15b
shll $0x4, %r15d
orq $0x40, %r15
movq %r13, 0xd8(%rsp)
xorq $0x10, %r13
movq %r13, 0xd0(%rsp)
xorl %ebx, %ebx
ucomiss %xmm7, %xmm4
seta %bl
movaps %xmm12, %xmm4
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm9, %xmm4
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
minps %xmm4, %xmm0
movaps %xmm0, %xmm2
shufps $0x4e, %xmm0, %xmm2 # xmm2 = xmm2[2,3],xmm0[0,1]
minps %xmm0, %xmm2
movaps %xmm12, %xmm4
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm10, %xmm4
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
minps %xmm4, %xmm0
movaps %xmm0, %xmm4
shufps $0x4e, %xmm0, %xmm4 # xmm4 = xmm4[2,3],xmm0[0,1]
minps %xmm0, %xmm4
movaps %xmm12, %xmm5
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm11, %xmm5
insertps $0x1c, %xmm4, %xmm2 # xmm2 = xmm2[0],xmm4[0],zero,zero
movaps %xmm5, %xmm0
shufps $0xb1, %xmm5, %xmm0 # xmm0 = xmm0[1,0],xmm5[3,2]
minps %xmm5, %xmm0
movaps %xmm0, %xmm4
shufps $0x4e, %xmm0, %xmm4 # xmm4 = xmm4[2,3],xmm0[0,1]
minps %xmm0, %xmm4
insertps $0x20, %xmm4, %xmm2 # xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
movaps %xmm6, %xmm4
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm9, %xmm4
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
maxps %xmm4, %xmm0
movaps %xmm0, %xmm4
shufps $0x4e, %xmm0, %xmm4 # xmm4 = xmm4[2,3],xmm0[0,1]
maxps %xmm0, %xmm4
movaps %xmm6, %xmm5
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm10, %xmm5
movaps %xmm5, %xmm0
shufps $0xb1, %xmm5, %xmm0 # xmm0 = xmm0[1,0],xmm5[3,2]
maxps %xmm5, %xmm0
movaps %xmm0, %xmm5
shufps $0x4e, %xmm0, %xmm5 # xmm5 = xmm5[2,3],xmm0[0,1]
maxps %xmm0, %xmm5
movaps %xmm1, %xmm0
movaps %xmm1, 0x70(%rsp)
blendvps %xmm0, %xmm11, %xmm6
insertps $0x1c, %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],zero,zero
movaps %xmm6, %xmm0
shufps $0xb1, %xmm6, %xmm0 # xmm0 = xmm0[1,0],xmm6[3,2]
maxps %xmm6, %xmm0
movaps %xmm0, %xmm5
shufps $0x4e, %xmm0, %xmm5 # xmm5 = xmm5[2,3],xmm0[0,1]
maxps %xmm0, %xmm5
insertps $0x20, %xmm5, %xmm4 # xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
movaps %xmm4, %xmm3
movaps 0x10(%rsp), %xmm0
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm4, %xmm2
movaps 0x1d3f986(%rip), %xmm5 # 0x1eeba00
movaps %xmm1, %xmm0
blendvps %xmm0, 0x100(%rsp), %xmm5
movaps %xmm5, %xmm4
shufps $0xb1, %xmm5, %xmm4 # xmm4 = xmm4[1,0],xmm5[3,2]
movaps %xmm5, (%rsp)
maxps %xmm5, %xmm4
movaps %xmm4, %xmm6
unpckhpd %xmm4, %xmm6 # xmm6 = xmm6[1],xmm4[1]
movaps %xmm12, %xmm5
blendvps %xmm0, 0xe0(%rsp), %xmm5
movaps %xmm5, %xmm1
shufps $0xb1, %xmm5, %xmm1 # xmm1 = xmm1[1,0],xmm5[3,2]
minps %xmm5, %xmm1
maxss %xmm4, %xmm6
movaps 0x10(%rsp), %xmm0
blendvps %xmm0, 0x80(%rsp), %xmm8
mulps %xmm7, %xmm2
mulps %xmm8, %xmm3
shll $0x4, %ebx
movq %rbx, %rbp
xorq $0x10, %rbp
movq %r15, %r13
xorq $0x10, %r15
movq 0xa0(%rsp), %rax
movq 0x70(%rax), %rax
movq %rax, 0x240(%rsp)
andl $0x0, 0x248(%rsp)
movaps %xmm7, %xmm0
shufps $0x0, %xmm7, %xmm0 # xmm0 = xmm0[0,0],xmm7[0,0]
movaps %xmm0, 0x1f0(%rsp)
movaps %xmm2, %xmm0
shufps $0x0, %xmm2, %xmm0 # xmm0 = xmm0[0,0],xmm2[0,0]
movaps %xmm0, 0x1e0(%rsp)
movaps %xmm7, %xmm0
shufps $0x55, %xmm7, %xmm0 # xmm0 = xmm0[1,1],xmm7[1,1]
movaps %xmm0, 0x1d0(%rsp)
movaps %xmm2, %xmm0
shufps $0x55, %xmm2, %xmm0 # xmm0 = xmm0[1,1],xmm2[1,1]
movaps %xmm0, 0x1c0(%rsp)
shufps $0xaa, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
movaps %xmm7, 0x230(%rsp)
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
movaps %xmm2, 0x220(%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm0, 0x1b0(%rsp)
movaps %xmm3, %xmm0
shufps $0x0, %xmm3, %xmm0 # xmm0 = xmm0[0,0],xmm3[0,0]
movaps %xmm0, 0x1a0(%rsp)
movaps %xmm8, %xmm0
shufps $0x55, %xmm8, %xmm0 # xmm0 = xmm0[1,1],xmm8[1,1]
movaps %xmm0, 0x190(%rsp)
movaps %xmm1, %xmm0
shufps $0x0, %xmm1, %xmm0 # xmm0 = xmm0[0,0],xmm1[0,0]
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
minps %xmm0, %xmm1
movaps %xmm1, 0x200(%rsp)
movaps %xmm12, %xmm1
movaps 0x70(%rsp), %xmm0
blendvps %xmm0, 0xe0(%rsp), %xmm1
movaps %xmm1, 0x10(%rsp)
movaps %xmm3, %xmm0
shufps $0x55, %xmm3, %xmm0 # xmm0 = xmm0[1,1],xmm3[1,1]
movaps %xmm0, 0x180(%rsp)
shufps $0xaa, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
movaps %xmm8, 0x70(%rsp)
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
movaps %xmm3, 0x210(%rsp)
leaq 0x250(%rsp), %r14
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps %xmm6, 0x80(%rsp)
cmpq %r11, %r14
je 0x1ac6a8
movss -0x8(%r14), %xmm12
addq $-0x10, %r14
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movaps %xmm12, %xmm1
cmpltps (%rsp), %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1ac1e9
movq (%r14), %r12
testb $0x8, %r12b
jne 0x1ac50d
movaps 0x20(%r12,%rbx), %xmm1
mulps 0x1f0(%rsp), %xmm1
subps 0x1e0(%rsp), %xmm1
movq 0xd8(%rsp), %rax
movaps 0x20(%r12,%rax), %xmm2
mulps 0x1d0(%rsp), %xmm2
subps 0x1c0(%rsp), %xmm2
pmaxsd %xmm1, %xmm2
movaps 0x20(%r12,%r13), %xmm1
mulps 0x230(%rsp), %xmm1
subps 0x220(%rsp), %xmm1
pmaxsd 0x200(%rsp), %xmm1
pmaxsd %xmm2, %xmm1
movaps 0x20(%r12,%rbp), %xmm2
mulps 0x1b0(%rsp), %xmm2
subps 0x1a0(%rsp), %xmm2
movq 0xd0(%rsp), %rax
movaps 0x20(%r12,%rax), %xmm3
mulps 0x190(%rsp), %xmm3
subps 0x180(%rsp), %xmm3
pminsd %xmm2, %xmm3
movaps 0x20(%r12,%r15), %xmm2
mulps 0x70(%rsp), %xmm2
subps 0x210(%rsp), %xmm2
pminsd 0x80(%rsp), %xmm2
pminsd %xmm3, %xmm2
movdqa %xmm1, 0x90(%rsp)
cmpleps %xmm2, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1ac456
movzbl %al, %r8d
movaps 0x1d3f6f5(%rip), %xmm12 # 0x1eeb9f0
pushq $0x8
popq %rax
xorl %r9d, %r9d
bsfq %r8, %rsi
movss 0x20(%r12,%rsi,4), %xmm3
movss 0x30(%r12,%rsi,4), %xmm2
movss 0x40(%r12,%rsi,4), %xmm4
movss 0x50(%r12,%rsi,4), %xmm1
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
subps %xmm9, %xmm3
mulps %xmm14, %xmm3
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
subps %xmm10, %xmm4
mulps %xmm15, %xmm4
movss 0x60(%r12,%rsi,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
subps %xmm11, %xmm5
mulps %xmm13, %xmm5
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm9, %xmm2
mulps %xmm14, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
subps %xmm10, %xmm1
mulps %xmm15, %xmm1
movss 0x70(%r12,%rsi,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
subps %xmm11, %xmm6
mulps %xmm13, %xmm6
movaps %xmm3, %xmm7
pminsd %xmm2, %xmm7
movaps %xmm4, %xmm8
pminsd %xmm1, %xmm8
pmaxsd %xmm7, %xmm8
movaps %xmm5, %xmm7
pminsd %xmm6, %xmm7
pmaxsd 0x10(%rsp), %xmm7
pmaxsd %xmm8, %xmm7
pmaxsd %xmm3, %xmm2
pmaxsd %xmm4, %xmm1
pminsd %xmm2, %xmm1
pmaxsd %xmm5, %xmm6
pminsd (%rsp), %xmm6
pminsd %xmm1, %xmm6
cmpleps %xmm6, %xmm7
movmskps %xmm7, %edi
testl %edi, %edi
je 0x1ac41e
movss 0x90(%rsp,%rsi,4), %xmm1
movaps %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm2 # xmm2 = xmm2[0,0],xmm1[0,0]
movq (%r12,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
movaps %xmm2, %xmm3
cmpltps %xmm12, %xmm3
movmskps %xmm3, %edi
testl %edi, %edi
je 0x1ac40e
cmpq $0x8, %rax
je 0x1ac42d
movq %rax, (%r14)
movaps %xmm12, %xmm1
movaps %xmm2, %xmm12
movq %rsi, %rax
jmp 0x1ac411
movq %rsi, (%r14)
incq %r9
movss %xmm1, 0x8(%r14)
addq $0x10, %r14
leaq -0x1(%r8), %rsi
andq %rsi, %r8
jne 0x1ac301
jmp 0x1ac436
movaps %xmm2, %xmm12
movq %rsi, %rax
jmp 0x1ac41e
cmpq $0x8, %rax
je 0x1ac45b
movb $0x1, %r10b
cmpq $0x2, %r9
jae 0x1ac460
movq %rax, %r12
testb %r10b, %r10b
jne 0x1ac214
jmp 0x1ac1e9
xorl %r10d, %r10d
jmp 0x1ac448
xorl %r10d, %r10d
jmp 0x1ac445
leaq -0x20(%r14), %rsi
movl -0x18(%r14), %r8d
leaq -0x10(%r14), %rdi
cmpl -0x8(%r14), %r8d
jae 0x1ac499
movaps (%rsi), %xmm1
movaps %xmm1, 0x20(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%rsi)
movq (%rdi), %r8
movq %r8, (%rsi)
movq 0x20(%rsp), %r8
movq %r8, (%rdi)
movl 0x28(%rsp), %r8d
movl %r8d, 0x8(%rdi)
cmpq $0x2, %r9
je 0x1ac445
leaq -0x30(%r14), %r9
movl -0x28(%r14), %r8d
cmpl -0x8(%r14), %r8d
jae 0x1ac4d5
movaps (%r9), %xmm1
movaps %xmm1, 0x20(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%r9)
movq (%rdi), %r8
movq %r8, (%r9)
movq 0x20(%rsp), %r8
movq %r8, (%rdi)
movl 0x28(%rsp), %r8d
movl %r8d, 0x8(%rdi)
movl -0x28(%r14), %edi
cmpl -0x18(%r14), %edi
jae 0x1ac445
movaps (%r9), %xmm1
movaps %xmm1, 0x20(%rsp)
movl 0x8(%rsi), %edi
movl %edi, 0x8(%r9)
movq (%rsi), %rdi
movq %rdi, (%r9)
movq 0x20(%rsp), %rdi
movq %rdi, (%rsi)
movl 0x28(%rsp), %edi
movl %edi, 0x8(%rsi)
jmp 0x1ac445
movaps (%rsp), %xmm0
cmpnleps %xmm12, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1ac1e9
movl %r12d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0xc0(%rsp)
je 0x1ac664
andq $-0x10, %r12
xorl %r8d, %r8d
movaps %xmm0, 0x170(%rsp)
movq (%rcx), %rax
movl (%r12,%r8,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
movd 0x34(%rax), %xmm1
pshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
pand 0x90(%rdx), %xmm1
pcmpeqd 0x1d3f49c(%rip), %xmm1 # 0x1eeba10
pandn %xmm0, %xmm1
movmskps %xmm1, %esi
testl %esi, %esi
je 0x1ac653
movq %r8, 0xc8(%rsp)
movl 0x4(%r12,%r8,8), %r8d
movaps %xmm1, 0x90(%rsp)
leaq 0x90(%rsp), %rsi
movq %rsi, 0x20(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x28(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x38(%rsp)
movq %rdx, 0x40(%rsp)
movl $0x4, 0x48(%rsp)
movl %edi, 0x4c(%rsp)
movl %r8d, 0x30(%rsp)
movq %rax, 0x50(%rsp)
andq $0x0, 0x58(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0x60(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x1ac5ee
movq 0x60(%rax), %rcx
leaq 0x20(%rsp), %rdi
callq *%rcx
movq 0xb8(%rsp), %rcx
movq 0xb0(%rsp), %rdx
movaps 0x160(%rsp), %xmm9
movaps 0x150(%rsp), %xmm10
movaps 0x140(%rsp), %xmm11
movaps 0x130(%rsp), %xmm13
movaps 0x120(%rsp), %xmm14
movaps 0x110(%rsp), %xmm15
leaq 0x240(%rsp), %r11
movaps 0x170(%rsp), %xmm0
movq 0xc8(%rsp), %r8
incq %r8
cmpq %r8, 0xc0(%rsp)
jne 0x1ac548
movaps 0x80(%rdx), %xmm1
movaps %xmm1, %xmm2
cmpltps (%rsp), %xmm2
andps %xmm0, %xmm2
movmskps %xmm2, %eax
testl %eax, %eax
je 0x1ac1e9
movaps (%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, %xmm0
shufps $0xb1, %xmm2, %xmm0 # xmm0 = xmm0[1,0],xmm2[3,2]
movaps %xmm2, (%rsp)
maxps %xmm2, %xmm0
movaps %xmm0, %xmm6
unpckhpd %xmm0, %xmm6 # xmm6 = xmm6[1],xmm0[1]
maxss %xmm0, %xmm6
jmp 0x1ac1dd
movq 0xa8(%rsp), %rax
testq %rax, %rax
jne 0x1abe05
addq $0x1188, %rsp # imm = 0x1188
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint4_sse2.h
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, false, embree::sse42::ArrayIntersectorK_1<4, embree::sse42::ObjectIntersectorK<4, false>>, false>::occludedCoherent(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
__forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
|
movdqa (%rdi), %xmm0
pcmpeqd %xmm9, %xmm9
movdqa %xmm0, %xmm8
pcmpeqd %xmm9, %xmm8
movmskps %xmm8, %eax
testl %eax, %eax
je 0x1ad0b5
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x11c8, %rsp # imm = 0x11C8
movdqa %xmm0, 0x100(%rsp)
movq (%rsi), %rsi
movq %rsi, 0x50(%rsp)
movzbl %al, %esi
movaps (%rdx), %xmm10
movaps 0x10(%rdx), %xmm11
movaps 0x40(%rdx), %xmm2
movaps 0x1d3ff9c(%rip), %xmm1 # 0x1eec6c0
movaps %xmm2, %xmm0
andps %xmm1, %xmm0
movaps 0x1d4560f(%rip), %xmm3 # 0x1ef1d40
cmpltps %xmm3, %xmm0
movaps %xmm2, %xmm4
blendvps %xmm0, %xmm3, %xmm4
movaps 0x20(%rdx), %xmm12
movaps 0x50(%rdx), %xmm5
movaps %xmm5, %xmm0
andps %xmm1, %xmm0
cmpltps %xmm3, %xmm0
blendvps %xmm0, %xmm3, %xmm5
movaps 0x30(%rdx), %xmm7
movaps 0x60(%rdx), %xmm6
andps %xmm6, %xmm1
cmpltps %xmm3, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm3, %xmm6
rcpps %xmm4, %xmm0
mulps %xmm0, %xmm4
movaps 0x1d40296(%rip), %xmm13 # 0x1eeca10
movaps %xmm13, %xmm14
subps %xmm4, %xmm14
mulps %xmm0, %xmm14
addps %xmm0, %xmm14
rcpps %xmm5, %xmm0
mulps %xmm0, %xmm5
movaps %xmm13, %xmm15
subps %xmm5, %xmm15
mulps %xmm0, %xmm15
addps %xmm0, %xmm15
rcpps %xmm6, %xmm0
mulps %xmm0, %xmm6
subps %xmm6, %xmm13
mulps %xmm0, %xmm13
addps %xmm0, %xmm13
xorps %xmm3, %xmm3
maxps %xmm3, %xmm7
movaps %xmm7, 0x90(%rsp)
movaps 0x80(%rdx), %xmm0
maxps %xmm3, %xmm0
movaps %xmm0, 0x110(%rsp)
cmpltps %xmm3, %xmm2
andps 0x1d40343(%rip), %xmm2 # 0x1eecb20
pxor %xmm9, %xmm8
movaps 0x50(%rdx), %xmm0
movaps 0x60(%rdx), %xmm1
cmpltps %xmm3, %xmm0
andps 0x1d7370b(%rip), %xmm0 # 0x1f1ff00
orps %xmm2, %xmm0
cmpltps %xmm3, %xmm1
andps 0x1d4561d(%rip), %xmm1 # 0x1ef1e20
orps %xmm8, %xmm1
orps %xmm0, %xmm1
movaps %xmm1, 0xa0(%rsp)
leaq 0x1fa3767(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, 0x190(%rsp)
leaq 0x280(%rsp), %r10
pushq $0x8
popq %r11
movq %rdx, 0x68(%rsp)
movq %rcx, 0x60(%rsp)
movaps %xmm10, 0x170(%rsp)
movaps %xmm11, 0x160(%rsp)
movaps %xmm12, 0x150(%rsp)
movaps %xmm13, 0x140(%rsp)
movaps %xmm14, 0x130(%rsp)
movaps %xmm15, 0x120(%rsp)
bsfq %rsi, %rax
movd 0xa0(%rsp,%rax,4), %xmm0
pshufd $0x0, %xmm0, %xmm9 # xmm9 = xmm0[0,0,0,0]
pcmpeqd 0xa0(%rsp), %xmm9
movmskps %xmm9, %eax
movq %rax, %rdi
notq %rdi
andq %rsi, %rdi
movq %rdi, 0x58(%rsp)
movaps 0x1d3f14a(%rip), %xmm4 # 0x1eeb9f0
movaps %xmm4, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm14, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
minps %xmm1, %xmm0
movaps %xmm0, %xmm6
shufps $0x4e, %xmm0, %xmm6 # xmm6 = xmm6[2,3],xmm0[0,1]
minps %xmm0, %xmm6
movaps %xmm4, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm15, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
minps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
minps %xmm0, %xmm1
insertps $0x1c, %xmm1, %xmm6 # xmm6 = xmm6[0],xmm1[0],zero,zero
movaps %xmm4, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm13, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
minps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
minps %xmm0, %xmm1
insertps $0x20, %xmm1, %xmm6 # xmm6 = xmm6[0,1],xmm1[0],xmm6[3]
movaps 0x1d3f0e4(%rip), %xmm7 # 0x1eeba00
movaps %xmm7, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm14, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
maxps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
maxps %xmm0, %xmm1
movaps %xmm1, %xmm3
movaps %xmm7, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm15, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
maxps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
maxps %xmm0, %xmm1
insertps $0x1c, %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0],zero,zero
movaps %xmm7, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm13, %xmm1
movaps %xmm1, %xmm0
shufps $0xb1, %xmm1, %xmm0 # xmm0 = xmm0[1,0],xmm1[3,2]
maxps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
maxps %xmm0, %xmm1
insertps $0x20, %xmm1, %xmm3 # xmm3 = xmm3[0,1],xmm1[0],xmm3[3]
movaps %xmm3, 0x30(%rsp)
movaps %xmm6, %xmm0
cmpnltps 0x1d3f072(%rip), %xmm0 # 0x1eeba10
movaps %xmm0, 0x10(%rsp)
movaps %xmm3, %xmm1
blendvps %xmm0, %xmm6, %xmm1
movshdup %xmm1, %xmm0 # xmm0 = xmm1[1,1,3,3]
xorl %r15d, %r15d
xorps %xmm3, %xmm3
ucomiss %xmm0, %xmm3
seta %r15b
shll $0x4, %r15d
orq $0x20, %r15
movaps %xmm1, %xmm0
movaps %xmm1, 0x20(%rsp)
unpckhpd %xmm1, %xmm0 # xmm0 = xmm0[1],xmm1[1]
xorl %r13d, %r13d
ucomiss %xmm0, %xmm3
movq 0x50(%rsp), %rsi
movq 0x70(%rsi), %rsi
movq %rsi, 0x280(%rsp)
movq %rax, 0x288(%rsp)
seta %r13b
shll $0x4, %r13d
orq $0x40, %r13
movq %r15, 0x88(%rsp)
xorq $0x10, %r15
movq %r15, 0x80(%rsp)
xorl %ebx, %ebx
ucomiss %xmm1, %xmm3
seta %bl
movaps %xmm4, %xmm3
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm10, %xmm3
movaps %xmm3, %xmm0
shufps $0xb1, %xmm3, %xmm0 # xmm0 = xmm0[1,0],xmm3[3,2]
minps %xmm3, %xmm0
movaps %xmm0, %xmm1
shufps $0x4e, %xmm0, %xmm1 # xmm1 = xmm1[2,3],xmm0[0,1]
minps %xmm0, %xmm1
movaps %xmm4, %xmm3
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm11, %xmm3
movaps %xmm3, %xmm0
shufps $0xb1, %xmm3, %xmm0 # xmm0 = xmm0[1,0],xmm3[3,2]
minps %xmm3, %xmm0
movaps %xmm0, %xmm3
shufps $0x4e, %xmm0, %xmm3 # xmm3 = xmm3[2,3],xmm0[0,1]
minps %xmm0, %xmm3
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm12, %xmm4
insertps $0x1c, %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[0],zero,zero
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
minps %xmm4, %xmm0
movaps %xmm0, %xmm3
shufps $0x4e, %xmm0, %xmm3 # xmm3 = xmm3[2,3],xmm0[0,1]
minps %xmm0, %xmm3
insertps $0x20, %xmm3, %xmm1 # xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
movaps %xmm7, %xmm3
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm10, %xmm3
movaps %xmm3, %xmm0
shufps $0xb1, %xmm3, %xmm0 # xmm0 = xmm0[1,0],xmm3[3,2]
maxps %xmm3, %xmm0
movaps %xmm0, %xmm3
shufps $0x4e, %xmm0, %xmm3 # xmm3 = xmm3[2,3],xmm0[0,1]
maxps %xmm0, %xmm3
movaps %xmm7, %xmm4
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm11, %xmm4
movaps %xmm4, %xmm0
shufps $0xb1, %xmm4, %xmm0 # xmm0 = xmm0[1,0],xmm4[3,2]
maxps %xmm4, %xmm0
movaps %xmm0, %xmm4
shufps $0x4e, %xmm0, %xmm4 # xmm4 = xmm4[2,3],xmm0[0,1]
maxps %xmm0, %xmm4
movaps %xmm7, %xmm5
movaps %xmm9, %xmm0
blendvps %xmm0, %xmm12, %xmm5
insertps $0x1c, %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],zero,zero
movaps %xmm5, %xmm0
shufps $0xb1, %xmm5, %xmm0 # xmm0 = xmm0[1,0],xmm5[3,2]
maxps %xmm5, %xmm0
movaps %xmm0, %xmm4
shufps $0x4e, %xmm0, %xmm4 # xmm4 = xmm4[2,3],xmm0[0,1]
maxps %xmm0, %xmm4
insertps $0x20, %xmm4, %xmm3 # xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
movaps %xmm3, %xmm5
movaps 0x10(%rsp), %xmm0
blendvps %xmm0, %xmm1, %xmm5
movaps %xmm0, %xmm2
blendvps %xmm0, %xmm3, %xmm1
movaps %xmm9, %xmm0
blendvps %xmm0, 0x110(%rsp), %xmm7
movaps %xmm7, %xmm4
shufps $0xb1, %xmm7, %xmm4 # xmm4 = xmm4[1,0],xmm7[3,2]
movaps %xmm7, 0x40(%rsp)
maxps %xmm7, %xmm4
movaps 0x1d3eec5(%rip), %xmm3 # 0x1eeb9f0
blendvps %xmm0, 0x90(%rsp), %xmm3
movaps %xmm3, %xmm7
shufps $0xb1, %xmm3, %xmm7 # xmm7 = xmm7[1,0],xmm3[3,2]
minps %xmm3, %xmm7
movaps %xmm2, %xmm0
blendvps %xmm0, 0x30(%rsp), %xmm6
movaps 0x20(%rsp), %xmm2
mulps %xmm2, %xmm1
mulps %xmm6, %xmm5
shll $0x4, %ebx
movq %rbx, %r14
xorq $0x10, %r14
movq %r13, %r15
xorq $0x10, %r13
movaps %xmm2, %xmm0
shufps $0x0, %xmm2, %xmm0 # xmm0 = xmm0[0,0],xmm2[0,0]
movaps %xmm0, 0x220(%rsp)
movaps %xmm1, %xmm0
shufps $0x0, %xmm1, %xmm0 # xmm0 = xmm0[0,0],xmm1[0,0]
movaps %xmm0, 0x210(%rsp)
movaps %xmm2, %xmm0
shufps $0x55, %xmm2, %xmm0 # xmm0 = xmm0[1,1],xmm2[1,1]
movaps %xmm0, 0x200(%rsp)
movaps %xmm1, %xmm0
shufps $0x55, %xmm1, %xmm0 # xmm0 = xmm0[1,1],xmm1[1,1]
movaps %xmm0, 0x1f0(%rsp)
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
movaps %xmm2, 0x20(%rsp)
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
movaps %xmm1, 0x260(%rsp)
movaps %xmm6, %xmm0
shufps $0x0, %xmm6, %xmm0 # xmm0 = xmm0[0,0],xmm6[0,0]
movaps %xmm0, 0x1e0(%rsp)
movaps %xmm5, %xmm1
movaps 0x1d3ee21(%rip), %xmm3 # 0x1eeb9f0
movaps %xmm9, (%rsp)
movaps %xmm9, %xmm0
pcmpeqd %xmm9, %xmm9
blendvps %xmm0, 0x90(%rsp), %xmm3
movaps %xmm3, 0x30(%rsp)
shufps $0x0, %xmm5, %xmm1 # xmm1 = xmm1[0,0],xmm5[0,0]
movaps %xmm1, 0x1d0(%rsp)
movaps %xmm7, %xmm0
shufps $0x0, %xmm7, %xmm0 # xmm0 = xmm0[0,0],xmm7[0,0]
shufps $0xaa, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
minps %xmm0, %xmm7
movaps %xmm7, 0x230(%rsp)
movaps %xmm4, %xmm0
shufps $0x0, %xmm4, %xmm0 # xmm0 = xmm0[0,0],xmm4[0,0]
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
maxps %xmm0, %xmm4
movaps %xmm4, 0x240(%rsp)
movaps %xmm6, %xmm0
shufps $0x55, %xmm6, %xmm0 # xmm0 = xmm0[1,1],xmm6[1,1]
movaps %xmm0, 0x1c0(%rsp)
movaps %xmm5, %xmm0
shufps $0x55, %xmm5, %xmm0 # xmm0 = xmm0[1,1],xmm5[1,1]
movaps %xmm0, 0x1b0(%rsp)
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
movaps %xmm6, 0x10(%rsp)
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
movaps %xmm5, 0x250(%rsp)
leaq 0x290(%rsp), %rsi
cmpq %r10, %rsi
je 0x1ad069
leaq -0x10(%rsi), %rbp
movmskps %xmm8, %eax
notq %rax
andq -0x8(%rsi), %rax
je 0x1ad056
movq (%rbp), %r12
testb $0x8, %r12b
jne 0x1ace71
movaps 0x20(%r12,%rbx), %xmm0
mulps 0x220(%rsp), %xmm0
subps 0x210(%rsp), %xmm0
movq 0x88(%rsp), %rsi
movaps 0x20(%r12,%rsi), %xmm1
mulps 0x200(%rsp), %xmm1
subps 0x1f0(%rsp), %xmm1
pmaxsd %xmm0, %xmm1
movaps 0x20(%r12,%r15), %xmm0
mulps 0x20(%rsp), %xmm0
subps 0x260(%rsp), %xmm0
pmaxsd 0x230(%rsp), %xmm0
pmaxsd %xmm1, %xmm0
movaps 0x20(%r12,%r14), %xmm1
mulps 0x1e0(%rsp), %xmm1
subps 0x1d0(%rsp), %xmm1
movq 0x80(%rsp), %rsi
movaps 0x20(%r12,%rsi), %xmm2
mulps 0x1c0(%rsp), %xmm2
subps 0x1b0(%rsp), %xmm2
pminsd %xmm1, %xmm2
movaps 0x20(%r12,%r13), %xmm1
mulps 0x10(%rsp), %xmm1
subps 0x250(%rsp), %xmm1
pminsd 0x240(%rsp), %xmm1
pminsd %xmm2, %xmm1
cmpleps %xmm1, %xmm0
movmskps %xmm0, %esi
testl %esi, %esi
je 0x1ace6d
movzbl %sil, %r9d
movq %r11, %r8
xorl %eax, %eax
bsfq %r9, %rsi
movss 0x20(%r12,%rsi,4), %xmm2
movss 0x30(%r12,%rsi,4), %xmm1
movss 0x40(%r12,%rsi,4), %xmm3
movss 0x50(%r12,%rsi,4), %xmm0
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm10, %xmm2
mulps %xmm14, %xmm2
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
subps %xmm11, %xmm3
mulps %xmm15, %xmm3
movss 0x60(%r12,%rsi,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
subps %xmm12, %xmm4
mulps %xmm13, %xmm4
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
subps %xmm10, %xmm1
mulps %xmm14, %xmm1
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm11, %xmm0
mulps %xmm15, %xmm0
movss 0x70(%r12,%rsi,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
subps %xmm12, %xmm5
mulps %xmm13, %xmm5
movaps %xmm2, %xmm6
pminsd %xmm1, %xmm6
movaps %xmm3, %xmm7
pminsd %xmm0, %xmm7
pmaxsd %xmm6, %xmm7
movaps %xmm4, %xmm6
pminsd %xmm5, %xmm6
pmaxsd 0x30(%rsp), %xmm6
pmaxsd %xmm7, %xmm6
pmaxsd %xmm2, %xmm1
pmaxsd %xmm3, %xmm0
pminsd %xmm1, %xmm0
pmaxsd %xmm4, %xmm5
pminsd 0x40(%rsp), %xmm5
pminsd %xmm0, %xmm5
cmpleps %xmm5, %xmm6
movmskps %xmm6, %edi
testl %edi, %edi
je 0x1ace47
movq (%r12,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
cmpq $0x8, %r8
je 0x1ace40
movq %r8, (%rbp)
movq %rax, 0x8(%rbp)
addq $0x10, %rbp
movzbl %dil, %eax
movq %rsi, %r8
leaq -0x1(%r9), %rsi
andq %rsi, %r9
jne 0x1acd59
cmpq $0x8, %r8
setne %sil
movq %r8, %r12
testb %sil, %sil
jne 0x1acc81
jmp 0x1ad056
xorl %esi, %esi
jmp 0x1ace5f
testq %rax, %rax
je 0x1ad056
movl %r12d, %eax
andl $0xf, %eax
movaps %xmm8, %xmm0
addq $-0x8, %rax
movq %rax, 0x70(%rsp)
je 0x1ad024
andq $-0x10, %r12
movdqa %xmm8, %xmm2
pxor %xmm9, %xmm2
pushq $0x1
popq %rdi
movaps %xmm8, 0x180(%rsp)
movq (%rcx), %rax
movl -0x8(%r12,%rdi,8), %r8d
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %rax
movd 0x34(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
pand 0x90(%rdx), %xmm1
pcmpeqd 0x1d3eb36(%rip), %xmm1 # 0x1eeba10
pandn %xmm2, %xmm1
movmskps %xmm1, %esi
movaps 0x190(%rsp), %xmm0
testl %esi, %esi
je 0x1ad001
movdqa %xmm2, 0x1a0(%rsp)
movq %rdi, 0x78(%rsp)
movl -0x4(%r12,%rdi,8), %edi
movaps %xmm1, 0x270(%rsp)
leaq 0x270(%rsp), %rsi
movq %rsi, 0xb8(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0xc0(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0xd0(%rsp)
movq %rdx, 0xd8(%rsp)
movl $0x4, 0xe0(%rsp)
movl %r8d, 0xe4(%rsp)
movl %edi, 0xc8(%rsp)
movq %rax, 0xe8(%rsp)
andq $0x0, 0xf0(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0xf8(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x1acf80
movq 0x68(%rax), %rcx
leaq 0xb8(%rsp), %rdi
callq *%rcx
movq 0x68(%rsp), %rdx
movaps 0x80(%rdx), %xmm0
cmpltps 0x1d3ea72(%rip), %xmm0 # 0x1eeba10
movq 0x60(%rsp), %rcx
movaps 0x180(%rsp), %xmm8
pcmpeqd %xmm9, %xmm9
movaps 0x170(%rsp), %xmm10
movaps 0x160(%rsp), %xmm11
movaps 0x150(%rsp), %xmm12
movaps 0x140(%rsp), %xmm13
movaps 0x130(%rsp), %xmm14
movaps 0x120(%rsp), %xmm15
leaq 0x280(%rsp), %r10
pushq $0x8
popq %r11
movq 0x78(%rsp), %rdi
movdqa 0x1a0(%rsp), %xmm2
andnps %xmm2, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1ad020
leaq 0x1(%rdi), %rax
movaps %xmm0, %xmm2
cmpq 0x70(%rsp), %rdi
movq %rax, %rdi
jb 0x1acead
xorps %xmm9, %xmm0
orps %xmm0, %xmm8
movaps %xmm8, %xmm1
andnps (%rsp), %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1ad065
movaps %xmm8, %xmm0
movaps 0x40(%rsp), %xmm2
blendvps %xmm0, 0x1d3e9b7(%rip), %xmm2 # 0x1eeba00
movaps %xmm2, 0x40(%rsp)
xorl %eax, %eax
movaps %xmm1, (%rsp)
jmp 0x1ad058
xorl %eax, %eax
movq %rbp, %rsi
testb %al, %al
je 0x1acc5f
jmp 0x1ad069
movb $0x1, %al
jmp 0x1ad050
movq 0x58(%rsp), %rsi
testq %rsi, %rsi
jne 0x1ac870
pcmpeqd %xmm1, %xmm1
movdqa 0x100(%rsp), %xmm0
pcmpeqd %xmm1, %xmm0
pand %xmm8, %xmm0
movaps 0x80(%rdx), %xmm1
blendvps %xmm0, 0x1d3e963(%rip), %xmm1 # 0x1eeba00
movaps %xmm1, 0x80(%rdx)
addq $0x11c8, %rsp # imm = 0x11C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint4_sse2.h
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::sse42::SubGridMBIntersectorKPluecker<4, 4, true>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubGridQuadMIntersectorKPluecker<4, 4, true>&, embree::RayHitK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1438, %rsp # imm = 0x1438
movq %r9, %rbx
movq %rcx, %r14
movq 0x1478(%rsp), %rbp
movq 0x1470(%rsp), %rax
leaq 0x500(%rsp), %rcx
movq %rdx, -0x10(%rcx)
movq %rcx, 0x8(%rsp)
andl $0x0, -0x8(%rcx)
movss (%rax,%r14,4), %xmm7
movss 0x10(%rax,%r14,4), %xmm5
movss 0x20(%rax,%r14,4), %xmm6
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movss 0x60(%rax,%r14,4), %xmm8
movss 0x70(%rax,%r14,4), %xmm1
movss 0x80(%rax,%r14,4), %xmm2
movss 0x1d69ad0(%rip), %xmm0 # 0x1f1ff10
movaps %xmm8, %xmm3
mulss %xmm0, %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0x340(%rsp)
movaps %xmm1, %xmm3
mulss %xmm0, %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0x330(%rsp)
mulss %xmm2, %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x350(%rsp)
movss 0x1d69a95(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulss %xmm0, %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm1, 0x370(%rsp)
mulss %xmm0, %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm2, 0x360(%rsp)
movslq 0x90(%rax,%r14,4), %rcx
movslq 0xa0(%rax,%r14,4), %rdx
movslq 0xb0(%rax,%r14,4), %rsi
movq %rcx, 0x2c8(%rsp)
xorq $0x10, %rcx
movq %rcx, 0x2b0(%rsp)
movq %rdx, 0x2c0(%rsp)
xorq $0x10, %rdx
movq %rdx, 0x2a8(%rsp)
movq %rsi, 0x2b8(%rsp)
xorq $0x10, %rsi
movq %rsi, 0x2a0(%rsp)
movss 0xc0(%rax,%r14,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x320(%rsp)
movss 0xd0(%rax,%r14,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x240(%rsp)
pushq $0x1
popq %rax
movl %r14d, %ecx
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f6e1f2(%rip), %rax # 0x2124730
movq %rax, 0x138(%rsp)
movaps %xmm5, 0x230(%rsp)
movaps %xmm6, 0x220(%rsp)
movq %r9, 0x2e0(%rsp)
movq %r14, 0x148(%rsp)
movaps %xmm7, 0x310(%rsp)
movaps %xmm8, 0x300(%rsp)
movq 0x8(%rsp), %rax
leaq 0x4f0(%rsp), %rcx
cmpq %rcx, %rax
je 0x1b85c2
movss -0x8(%rax), %xmm0
addq $-0x10, %rax
ucomiss 0x80(%rbx,%r14,4), %xmm0
ja 0x1b657c
movq %rax, 0x8(%rsp)
movq (%rax), %rdx
movq %rdx, %rsi
testb $0x8, %sil
movq 0x2d8(%rsp), %rax
jne 0x1b66e4
movq %rsi, %rax
andq $-0x10, %rax
movss 0x70(%rbx,%r14,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movq 0x2c8(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%rcx), %xmm2
subps %xmm7, %xmm2
mulps 0x340(%rsp), %xmm2
movq 0x2c0(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rcx), %xmm3
subps %xmm5, %xmm3
movq 0x2b8(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm1
mulps %xmm0, %xmm1
addps 0x20(%rax,%rcx), %xmm1
mulps 0x330(%rsp), %xmm3
subps %xmm6, %xmm1
mulps 0x350(%rsp), %xmm1
maxps %xmm1, %xmm3
movaps 0x320(%rsp), %xmm1
maxps %xmm2, %xmm1
maxps %xmm3, %xmm1
movq 0x2b0(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%rcx), %xmm3
subps %xmm7, %xmm3
movq 0x2a8(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%rcx), %xmm4
mulps %xmm8, %xmm3
subps %xmm5, %xmm4
mulps 0x370(%rsp), %xmm4
movq 0x2a0(%rsp), %rcx
movaps 0x80(%rax,%rcx), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%rcx), %xmm2
subps %xmm6, %xmm2
mulps 0x360(%rsp), %xmm2
minps %xmm2, %xmm4
movaps 0x240(%rsp), %xmm2
minps %xmm3, %xmm2
minps %xmm4, %xmm2
movl %esi, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1b673a
movaps %xmm1, %xmm0
cmpleps %xmm2, %xmm0
pslld $0x1f, %xmm0
movmskps %xmm0, %eax
movaps %xmm1, 0x150(%rsp)
movq %rdx, %rsi
testb $0x8, %sil
movq %rax, 0x2d8(%rsp)
jne 0x1b6736
testq %rax, %rax
je 0x1b675f
movq %rdx, %r10
andq $-0x10, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %r8
movq %rax, %rsi
xorl %eax, %eax
movq (%r10,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %r8
jne 0x1b6764
testl %eax, %eax
je 0x1b65a9
jmp 0x1b695c
pushq $0x6
jmp 0x1b6761
movaps %xmm1, %xmm3
cmpleps %xmm2, %xmm3
movaps 0xe0(%rax), %xmm2
cmpleps %xmm0, %xmm2
cmpltps 0xf0(%rax), %xmm0
andps %xmm2, %xmm0
andps %xmm3, %xmm0
jmp 0x1b66d1
pushq $0x4
popq %rax
jmp 0x1b6729
movl 0x150(%rsp,%rcx,4), %edi
bsfq %r8, %r9
leaq -0x1(%r8), %rcx
movq (%r10,%r9,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
movl 0x150(%rsp,%r9,4), %r9d
andq %r8, %rcx
jne 0x1b67cc
movq 0x8(%rsp), %r8
leaq 0x10(%r8), %rcx
cmpl %r9d, %edi
jae 0x1b67b8
movq %rsi, (%r8)
movl %r9d, 0x8(%r8)
movq %rcx, 0x8(%rsp)
jmp 0x1b6729
movq %rdx, (%r8)
movl %edi, 0x8(%r8)
movq %rcx, 0x8(%rsp)
movq %rsi, %rdx
jmp 0x1b6729
movq %rdx, %xmm1
movd %edi, %xmm0
punpcklqdq %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0]
movq %rsi, %xmm3
movd %r9d, %xmm0
punpcklqdq %xmm0, %xmm3 # xmm3 = xmm3[0],xmm0[0]
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdx
movq (%r10,%rsi,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movq %rdi, %xmm2
movd 0x150(%rsp,%rsi,4), %xmm0
punpcklqdq %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
movdqa %xmm3, %xmm0
pcmpgtd %xmm1, %xmm0
andq %rcx, %rdx
jne 0x1b6886
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm4
blendvps %xmm0, %xmm1, %xmm4
blendvps %xmm0, %xmm3, %xmm1
movdqa %xmm2, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm2, %xmm3
blendvps %xmm0, %xmm4, %xmm3
blendvps %xmm0, %xmm2, %xmm4
movaps %xmm4, %xmm0
pcmpgtd %xmm1, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm4, %xmm2
blendvps %xmm0, %xmm1, %xmm2
blendvps %xmm0, %xmm4, %xmm1
movq 0x8(%rsp), %rcx
movaps %xmm1, (%rcx)
movaps %xmm2, 0x10(%rcx)
movq %xmm3, %rdx
addq $0x20, %rcx
jmp 0x1b67ae
bsfq %rdx, %rcx
movq (%r10,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
movq %rdx, %xmm5
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm3, %xmm4
blendvps %xmm0, %xmm1, %xmm4
movd 0x150(%rsp,%rcx,4), %xmm6
blendvps %xmm0, %xmm3, %xmm1
punpcklqdq %xmm6, %xmm5 # xmm5 = xmm5[0],xmm6[0]
movaps 0x220(%rsp), %xmm6
movdqa %xmm5, %xmm0
pcmpgtd %xmm2, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movdqa %xmm5, %xmm3
blendvps %xmm0, %xmm2, %xmm3
blendvps %xmm0, %xmm5, %xmm2
movaps %xmm2, %xmm0
pcmpgtd %xmm1, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm2, %xmm5
blendvps %xmm0, %xmm1, %xmm5
blendvps %xmm0, %xmm2, %xmm1
movaps %xmm3, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm3, %xmm2
blendvps %xmm0, %xmm4, %xmm2
blendvps %xmm0, %xmm3, %xmm4
movaps %xmm5, %xmm0
pcmpgtd %xmm4, %xmm0
pshufd $0xaa, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
movaps %xmm5, %xmm3
blendvps %xmm0, %xmm4, %xmm3
blendvps %xmm0, %xmm5, %xmm4
movaps 0x230(%rsp), %xmm5
movq 0x8(%rsp), %rcx
movaps %xmm1, (%rcx)
movaps %xmm4, 0x10(%rcx)
movaps %xmm3, 0x20(%rcx)
movq %xmm2, %rdx
addq $0x30, %rcx
jmp 0x1b67ae
cmpl $0x6, %eax
jne 0x1b6577
movl %edx, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x298(%rsp)
je 0x1b8586
andq $-0x10, %rdx
xorl %eax, %eax
movq %rdx, 0x140(%rsp)
movq %rax, 0x2d0(%rsp)
imulq $0x90, %rax, %rax
movss 0x70(%rbx,%r14,4), %xmm2
subss 0x80(%rdx,%rax), %xmm2
mulss 0x84(%rdx,%rax), %xmm2
movq 0x20(%rdx,%rax), %xmm1
movq 0x24(%rdx,%rax), %xmm3
movdqa %xmm1, %xmm0
pminub %xmm3, %xmm0
pcmpeqb %xmm1, %xmm0
pmovzxbd %xmm1, %xmm1 # xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
cvtdq2ps %xmm1, %xmm8
movss 0x38(%rdx,%rax), %xmm5
movss 0x3c(%rdx,%rax), %xmm6
movss 0x40(%rdx,%rax), %xmm4
movss 0x44(%rdx,%rax), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movq 0x50(%rdx,%rax), %xmm1
pmovzxbd %xmm1, %xmm1 # xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
cvtdq2ps %xmm1, %xmm1
movss 0x74(%rdx,%rax), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x68(%rdx,%rax), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulps %xmm9, %xmm1
addps %xmm10, %xmm1
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm8, %xmm1
mulps %xmm2, %xmm1
addps %xmm8, %xmm1
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm8
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movq 0x54(%rdx,%rax), %xmm3
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm3
mulps %xmm9, %xmm3
addps %xmm10, %xmm3
subps %xmm8, %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
movq 0x28(%rdx,%rax), %xmm5
pmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
cvtdq2ps %xmm5, %xmm7
movss 0x48(%rdx,%rax), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm8, %xmm7
addps %xmm6, %xmm7
movq 0x58(%rdx,%rax), %xmm5
pmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
cvtdq2ps %xmm5, %xmm5
movss 0x78(%rdx,%rax), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x6c(%rdx,%rax), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulps %xmm9, %xmm5
addps %xmm10, %xmm5
subps %xmm7, %xmm5
mulps %xmm2, %xmm5
addps %xmm7, %xmm5
movq 0x2c(%rdx,%rax), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm7
mulps %xmm8, %xmm7
addps %xmm6, %xmm7
movq 0x5c(%rdx,%rax), %xmm6
pmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
cvtdq2ps %xmm6, %xmm6
mulps %xmm9, %xmm6
addps %xmm10, %xmm6
subps %xmm7, %xmm6
mulps %xmm2, %xmm6
addps %xmm7, %xmm6
movq 0x30(%rdx,%rax), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm8
movss 0x4c(%rdx,%rax), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm9, %xmm8
addps %xmm4, %xmm8
movq 0x60(%rdx,%rax), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm7
movss 0x7c(%rdx,%rax), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movss 0x70(%rdx,%rax), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm10, %xmm7
addps %xmm11, %xmm7
subps %xmm8, %xmm7
mulps %xmm2, %xmm7
addps %xmm8, %xmm7
movq 0x34(%rdx,%rax), %xmm8
pmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
cvtdq2ps %xmm8, %xmm8
mulps %xmm9, %xmm8
addps %xmm4, %xmm8
movq %rax, 0x80(%rsp)
movq 0x64(%rdx,%rax), %xmm4
pmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
addps %xmm11, %xmm4
subps %xmm8, %xmm4
mulps %xmm2, %xmm4
addps %xmm8, %xmm4
movaps 0x310(%rsp), %xmm2
subps %xmm2, %xmm1
mulps 0x340(%rsp), %xmm1
movaps 0x230(%rsp), %xmm8
subps %xmm8, %xmm5
mulps 0x330(%rsp), %xmm5
subps %xmm2, %xmm3
mulps 0x300(%rsp), %xmm3
subps %xmm8, %xmm6
mulps 0x370(%rsp), %xmm6
movaps %xmm1, %xmm2
pminsd %xmm3, %xmm2
pmaxsd %xmm1, %xmm3
movaps %xmm5, %xmm1
pminsd %xmm6, %xmm1
pmaxsd %xmm2, %xmm1
pmaxsd %xmm5, %xmm6
movaps 0x220(%rsp), %xmm2
subps %xmm2, %xmm7
mulps 0x350(%rsp), %xmm7
subps %xmm2, %xmm4
mulps 0x360(%rsp), %xmm4
pminsd %xmm3, %xmm6
movaps %xmm7, %xmm2
pminsd %xmm4, %xmm2
pmaxsd %xmm7, %xmm4
pmaxsd 0x320(%rsp), %xmm2
pmaxsd %xmm1, %xmm2
pminsd 0x240(%rsp), %xmm4
pminsd %xmm6, %xmm4
movdqa %xmm2, 0x4c0(%rsp)
pcmpgtd %xmm4, %xmm2
pmovsxbd %xmm0, %xmm0
pandn %xmm0, %xmm2
movmskps %xmm2, %eax
testl %eax, %eax
je 0x1b8565
movq 0x80(%rsp), %rcx
addq 0x140(%rsp), %rcx
movq %rcx, 0x80(%rsp)
movzbl %al, %ecx
bsfq %rcx, %rax
movss 0x4c0(%rsp,%rax,4), %xmm0
movss 0x80(%rbx,%r14,4), %xmm1
ucomiss %xmm1, %xmm0
ja 0x1b7e49
movaps %xmm1, 0xe0(%rsp)
movq %rcx, 0x2f8(%rsp)
movq 0x80(%rsp), %rcx
movzwl (%rcx,%rax,8), %r9d
movzwl 0x2(%rcx,%rax,8), %r11d
movl %r11d, 0x50(%rsp)
movl 0x88(%rcx), %edx
movq %rdx, 0x98(%rsp)
movl 0x4(%rcx,%rax,8), %esi
movq %rsi, 0x90(%rsp)
movq (%rbp), %rax
movq %rax, 0x2e8(%rsp)
movq 0x1e8(%rax), %rax
movq (%rax,%rdx,8), %rcx
movq 0x58(%rcx), %r10
movq %rsi, %rax
imulq 0x68(%rcx), %rax
movq %rax, %rsi
movss 0x28(%rcx), %xmm0
movss 0x2c(%rcx), %xmm1
movss 0x30(%rcx), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%rbx), %xmm4
subps %xmm1, %xmm4
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm4
mulps %xmm3, %xmm4
roundps $0x1, %xmm4, %xmm1
addss 0x1d39c83(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
maxps 0x1d34cb9(%rip), %xmm1 # 0x1eeba10
subps %xmm1, %xmm4
movaps %xmm4, 0x4e0(%rsp)
cvtps2dq %xmm1, %xmm0
movapd %xmm0, 0x4d0(%rsp)
movslq 0x4d0(%rsp,%r14,4), %rdx
movl %r9d, %edi
movl $0x7fff, %eax # imm = 0x7FFF
andl %eax, %edi
movl %edi, 0x210(%rsp)
movq %rsi, %r8
movq %rsi, 0x2f0(%rsp)
movq %r10, 0x88(%rsp)
movl (%r10,%rsi), %esi
addl %edi, %esi
movl %r11d, %edi
andl %eax, %edi
movl %edi, 0x200(%rsp)
movl 0x4(%r10,%r8), %r14d
movl %r14d, %eax
imull %edi, %eax
addl %esi, %eax
movq 0xe0(%rcx), %rsi
movq %rsi, 0x30(%rsp)
imulq $0x38, %rdx, %r13
movq 0x10(%rsi,%r13), %r11
movq %r11, %rcx
imulq %rax, %rcx
movq (%rsi,%r13), %r15
movups (%r15,%rcx), %xmm0
leaq 0x1(%rax), %rcx
movq 0x48(%rsi,%r13), %rdi
movq %rdi, %rdx
imulq %rcx, %rdx
movq %rdx, 0x20(%rsp)
imulq %r11, %rcx
movups (%r15,%rcx), %xmm1
leaq (%rax,%r14), %rsi
movq %rsi, %rcx
imulq %r11, %rcx
movups (%r15,%rcx), %xmm2
leaq (%rax,%r14), %rbx
incq %rbx
movq %rbx, %rcx
imulq %r11, %rcx
movups (%r15,%rcx), %xmm3
movq %rdi, %rbp
imulq %rax, %rbp
movq %rdi, %r12
imulq %rsi, %r12
movq %rdi, %r10
imulq %rbx, %r10
xorl %edx, %edx
testw %r9w, %r9w
setns %dl
addq %rdx, %rax
incq %rax
movq %rdi, %r8
imulq %rax, %r8
imulq %r11, %rax
movups (%r15,%rax), %xmm6
addq %rbx, %rdx
movq %rdx, %rcx
imulq %r11, %rcx
movq %rdi, %rax
imulq %rdx, %rax
cmpw $0x0, 0x50(%rsp)
movl $0x0, %r9d
cmovnsq %r14, %r9
movups (%r15,%rcx), %xmm8
addq %r9, %rsi
movq %rdi, %r14
imulq %rsi, %r14
imulq %r11, %rsi
movups (%r15,%rsi), %xmm4
addq %r9, %rbx
movq %rdi, %rcx
imulq %rbx, %rcx
imulq %r11, %rbx
movups (%r15,%rbx), %xmm11
movq 0x2e0(%rsp), %rbx
addq %rdx, %r9
imulq %r9, %r11
movups (%r15,%r11), %xmm12
movq 0x30(%rsp), %rdx
movq 0x38(%rdx,%r13), %rdx
movups (%rdx,%rbp), %xmm10
movq 0x1478(%rsp), %rbp
movq 0x20(%rsp), %rsi
movups (%rdx,%rsi), %xmm15
movups (%rdx,%r12), %xmm14
movups (%rdx,%r10), %xmm5
leaq 0x7(%rsp), %rsi
movups (%rdx,%r8), %xmm9
movq 0x88(%rsp), %r8
movups (%rdx,%rax), %xmm7
movq 0x148(%rsp), %rax
movss 0x4e0(%rsp,%rax,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
subps %xmm0, %xmm10
mulps %xmm13, %xmm10
addps %xmm0, %xmm10
movups (%rdx,%r14), %xmm0
movq 0x148(%rsp), %r14
subps %xmm1, %xmm15
mulps %xmm13, %xmm15
addps %xmm1, %xmm15
movups (%rdx,%rcx), %xmm1
imulq %rdi, %r9
movq 0x90(%rsp), %rdi
subps %xmm2, %xmm14
mulps %xmm13, %xmm14
addps %xmm2, %xmm14
movups (%rdx,%r9), %xmm2
movq 0x2f0(%rsp), %r9
movq 0x98(%rsp), %rdx
subps %xmm3, %xmm5
mulps %xmm13, %xmm5
addps %xmm3, %xmm5
subps %xmm6, %xmm9
mulps %xmm13, %xmm9
addps %xmm6, %xmm9
subps %xmm8, %xmm7
mulps %xmm13, %xmm7
addps %xmm8, %xmm7
subps %xmm4, %xmm0
mulps %xmm13, %xmm0
addps %xmm4, %xmm0
subps %xmm11, %xmm1
mulps %xmm13, %xmm1
addps %xmm11, %xmm1
subps %xmm12, %xmm2
mulps %xmm13, %xmm2
addps %xmm12, %xmm2
movaps %xmm10, %xmm4
unpcklps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
unpckhps %xmm5, %xmm10 # xmm10 = xmm10[2],xmm5[2],xmm10[3],xmm5[3]
movaps %xmm15, %xmm3
unpcklps %xmm14, %xmm3 # xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1]
movaps %xmm15, %xmm6
unpckhps %xmm14, %xmm6 # xmm6 = xmm6[2],xmm14[2],xmm6[3],xmm14[3]
unpcklps %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[0],xmm10[1],xmm6[1]
movaps %xmm4, %xmm6
unpcklps %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
unpckhps %xmm3, %xmm6 # xmm6 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
movaps %xmm6, %xmm11
movaps %xmm15, %xmm12
unpcklps %xmm7, %xmm12 # xmm12 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
unpckhps %xmm7, %xmm15 # xmm15 = xmm15[2],xmm7[2],xmm15[3],xmm7[3]
movaps %xmm9, %xmm3
unpcklps %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
unpckhps %xmm5, %xmm9 # xmm9 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
unpcklps %xmm9, %xmm15 # xmm15 = xmm15[0],xmm9[0],xmm15[1],xmm9[1]
movaps %xmm12, %xmm8
unpcklps %xmm3, %xmm8 # xmm8 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
unpckhps %xmm3, %xmm12 # xmm12 = xmm12[2],xmm3[2],xmm12[3],xmm3[3]
movaps %xmm5, %xmm6
unpcklps %xmm2, %xmm6 # xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
movaps %xmm5, %xmm3
unpckhps %xmm2, %xmm3 # xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
movaps %xmm7, %xmm2
unpcklps %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
unpckhps %xmm1, %xmm7 # xmm7 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
unpcklps %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
movaps %xmm3, 0x280(%rsp)
movaps %xmm6, %xmm3
unpcklps %xmm2, %xmm3 # xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
movaps %xmm3, 0x50(%rsp)
unpckhps %xmm2, %xmm6 # xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
movaps %xmm6, 0x120(%rsp)
movaps %xmm14, %xmm3
unpcklps %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
unpckhps %xmm1, %xmm14 # xmm14 = xmm14[2],xmm1[2],xmm14[3],xmm1[3]
movaps %xmm5, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
unpckhps %xmm0, %xmm5 # xmm5 = xmm5[2],xmm0[2],xmm5[3],xmm0[3]
unpcklps %xmm5, %xmm14 # xmm14 = xmm14[0],xmm5[0],xmm14[1],xmm5[1]
movaps %xmm3, %xmm0
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
unpckhps %xmm1, %xmm3 # xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
movq %rsi, 0x180(%rsp)
movss (%rbx,%r14,4), %xmm1
movss 0x10(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm1, %xmm4
subps %xmm2, %xmm11
movaps %xmm8, 0x270(%rsp)
movaps %xmm8, %xmm5
subps %xmm1, %xmm5
movaps %xmm0, 0x260(%rsp)
subps %xmm1, %xmm0
movaps %xmm12, 0x100(%rsp)
subps %xmm2, %xmm12
movaps %xmm12, 0x10(%rsp)
movaps %xmm3, 0x20(%rsp)
movaps %xmm3, %xmm1
subps %xmm2, %xmm1
movss 0x20(%rbx,%r14,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
subps %xmm3, %xmm10
movaps %xmm15, 0x110(%rsp)
subps %xmm3, %xmm15
movaps %xmm14, 0x30(%rsp)
movaps %xmm14, %xmm2
subps %xmm3, %xmm2
movaps %xmm0, %xmm7
subps %xmm4, %xmm7
movaps %xmm4, %xmm9
movaps %xmm1, %xmm12
movaps %xmm11, %xmm4
subps %xmm11, %xmm12
movaps %xmm2, %xmm11
subps %xmm10, %xmm11
movaps %xmm1, %xmm3
addps %xmm4, %xmm3
movaps %xmm2, %xmm6
addps %xmm10, %xmm6
movaps %xmm7, %xmm8
mulps %xmm3, %xmm8
mulps %xmm11, %xmm3
movaps %xmm12, %xmm13
mulps %xmm6, %xmm13
subps %xmm3, %xmm13
movaps %xmm0, %xmm3
addps %xmm9, %xmm3
movaps %xmm9, %xmm14
movaps %xmm9, 0x40(%rsp)
movaps %xmm7, 0xd0(%rsp)
mulps %xmm7, %xmm6
movaps %xmm3, %xmm9
movaps %xmm11, 0x4b0(%rsp)
mulps %xmm11, %xmm9
subps %xmm6, %xmm9
movaps %xmm12, 0x490(%rsp)
mulps %xmm12, %xmm3
subps %xmm3, %xmm8
movss 0x50(%rbx,%r14,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movss 0x60(%rbx,%r14,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm7, %xmm8
mulps %xmm11, %xmm9
addps %xmm8, %xmm9
movss 0x40(%rbx,%r14,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0xf0(%rsp)
mulps %xmm3, %xmm13
addps %xmm9, %xmm13
movaps %xmm14, %xmm3
subps %xmm5, %xmm3
movaps %xmm4, %xmm6
movaps 0x10(%rsp), %xmm8
subps %xmm8, %xmm4
movaps %xmm4, %xmm12
movaps %xmm10, %xmm4
subps %xmm15, %xmm4
movaps %xmm6, 0x250(%rsp)
addps %xmm8, %xmm6
movaps %xmm10, 0x4a0(%rsp)
movaps %xmm10, %xmm14
addps %xmm15, %xmm14
movaps %xmm3, %xmm9
mulps %xmm6, %xmm9
mulps %xmm4, %xmm6
movaps %xmm12, %xmm10
mulps %xmm14, %xmm12
subps %xmm6, %xmm12
movaps 0x40(%rsp), %xmm8
addps %xmm5, %xmm8
mulps %xmm3, %xmm14
movaps %xmm8, %xmm6
movaps %xmm4, 0x470(%rsp)
mulps %xmm4, %xmm6
subps %xmm14, %xmm6
mulps %xmm10, %xmm8
subps %xmm8, %xmm9
mulps %xmm7, %xmm9
mulps %xmm11, %xmm6
addps %xmm9, %xmm6
movaps 0xf0(%rsp), %xmm8
mulps %xmm8, %xmm12
addps %xmm6, %xmm12
movaps %xmm5, %xmm14
subps %xmm0, %xmm14
addps %xmm5, %xmm0
movaps 0x10(%rsp), %xmm4
movaps %xmm4, %xmm6
subps %xmm1, %xmm6
addps %xmm4, %xmm1
movaps %xmm15, %xmm9
subps %xmm2, %xmm9
addps %xmm15, %xmm2
movaps %xmm14, %xmm5
mulps %xmm1, %xmm5
mulps %xmm9, %xmm1
movaps %xmm6, %xmm15
mulps %xmm2, %xmm15
subps %xmm1, %xmm15
mulps %xmm14, %xmm2
movaps %xmm0, %xmm1
mulps %xmm9, %xmm1
subps %xmm2, %xmm1
mulps %xmm6, %xmm0
subps %xmm0, %xmm5
movaps %xmm7, 0x480(%rsp)
mulps %xmm7, %xmm5
movaps %xmm11, 0x10(%rsp)
mulps %xmm11, %xmm1
movaps %xmm8, %xmm7
addps %xmm5, %xmm1
mulps %xmm8, %xmm15
addps %xmm1, %xmm15
movaps %xmm13, %xmm8
addps %xmm12, %xmm8
addps %xmm15, %xmm8
movaps %xmm13, %xmm0
minps %xmm12, %xmm0
minps %xmm15, %xmm0
movaps %xmm13, 0x440(%rsp)
movaps %xmm13, %xmm5
movaps %xmm12, 0x450(%rsp)
maxps %xmm12, %xmm5
maxps %xmm15, %xmm5
movaps %xmm8, %xmm1
andps 0x1d353d8(%rip), %xmm1 # 0x1eec6c0
movaps %xmm1, 0x430(%rsp)
mulps 0x1d3aa79(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm5
xorps 0x1d353ce(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm5
movmskps %xmm5, %eax
movss 0x210(%rsp), %xmm0
movaps %xmm0, 0x210(%rsp)
movss 0x200(%rsp), %xmm0
movaps %xmm0, 0x200(%rsp)
movd %edx, %xmm0
movdqa %xmm0, 0x390(%rsp)
movd %edi, %xmm0
movdqa %xmm0, 0x380(%rsp)
testl %eax, %eax
je 0x1b7761
movaps %xmm10, %xmm11
movaps %xmm10, %xmm0
movaps 0x4b0(%rsp), %xmm4
mulps %xmm4, %xmm0
movaps %xmm3, %xmm1
movaps 0x490(%rsp), %xmm10
mulps %xmm10, %xmm1
movaps %xmm3, 0x460(%rsp)
movaps %xmm6, %xmm12
movaps 0x470(%rsp), %xmm3
mulps %xmm3, %xmm6
movaps %xmm14, %xmm15
mulps %xmm11, %xmm15
mulps %xmm3, %xmm10
subps %xmm0, %xmm10
movaps 0xd0(%rsp), %xmm13
movaps %xmm13, %xmm2
mulps %xmm11, %xmm13
mulps %xmm9, %xmm11
subps %xmm6, %xmm11
movaps %xmm8, 0xd0(%rsp)
movaps 0x1d352fc(%rip), %xmm8 # 0x1eec6c0
andps %xmm8, %xmm0
andps %xmm8, %xmm6
cmpltps %xmm6, %xmm0
movaps 0x460(%rsp), %xmm6
blendvps %xmm0, %xmm10, %xmm11
movaps %xmm6, %xmm0
mulps %xmm9, %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm14
mulps %xmm6, %xmm4
subps %xmm2, %xmm4
subps %xmm0, %xmm14
andps %xmm8, %xmm2
andps %xmm8, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm4, %xmm14
mulps %xmm12, %xmm6
subps %xmm1, %xmm13
subps %xmm15, %xmm6
andps %xmm8, %xmm1
andps %xmm8, %xmm15
movaps 0xd0(%rsp), %xmm8
cmpltps %xmm15, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm13, %xmm6
movaps 0x480(%rsp), %xmm1
mulps %xmm6, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm7
addps %xmm0, %xmm7
addps %xmm7, %xmm7
movaps 0x4a0(%rsp), %xmm1
mulps %xmm6, %xmm1
movaps 0x250(%rsp), %xmm0
mulps %xmm14, %xmm0
addps %xmm1, %xmm0
movaps 0x40(%rsp), %xmm3
mulps %xmm11, %xmm3
addps %xmm0, %xmm3
rcpps %xmm7, %xmm1
movaps %xmm7, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d35583(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
addps %xmm3, %xmm3
mulps %xmm3, %xmm0
movaps 0xe0(%rsp), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm5, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b7761
cmpneqps 0x1d34539(%rip), %xmm7 # 0x1eeba10
andps %xmm7, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b7761
movaps 0x440(%rsp), %xmm10
movaps %xmm10, 0x150(%rsp)
movaps 0x450(%rsp), %xmm9
movaps %xmm9, 0x160(%rsp)
movaps %xmm8, 0x170(%rsp)
movq %rsi, 0x180(%rsp)
movaps %xmm1, 0x190(%rsp)
movaps %xmm0, 0x1c0(%rsp)
movaps %xmm11, 0x1d0(%rsp)
movaps %xmm14, 0x1e0(%rsp)
movaps %xmm6, 0x1f0(%rsp)
pshufd $0x0, 0x210(%rsp), %xmm3 # xmm3 = mem[0,0,0,0]
pshufd $0x0, 0x200(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movzwl 0x8(%r8,%r9), %eax
decl %eax
cvtsi2ss %eax, %xmm2
paddd 0x1d65424(%rip), %xmm3 # 0x1f1c990
paddd 0x1d6542c(%rip), %xmm0 # 0x1f1c9a0
movaps %xmm2, %xmm4
rcpss %xmm2, %xmm4
mulss %xmm4, %xmm2
movss 0x1d39a71(%rip), %xmm6 # 0x1ef0ff8
movaps %xmm6, %xmm1
subss %xmm2, %xmm1
movzwl 0xa(%r8,%r9), %eax
decl %eax
cvtsi2ss %eax, %xmm5
mulss %xmm4, %xmm1
movaps %xmm5, %xmm4
rcpss %xmm5, %xmm4
mulss %xmm4, %xmm5
movaps %xmm6, %xmm2
subss %xmm5, %xmm2
mulss %xmm4, %xmm2
cvtdq2ps %xmm3, %xmm3
mulps %xmm8, %xmm3
addps %xmm10, %xmm3
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm3, %xmm1
movaps %xmm1, 0x150(%rsp)
cvtdq2ps %xmm0, %xmm0
mulps %xmm8, %xmm0
addps %xmm9, %xmm0
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm0, %xmm2
movaps %xmm2, 0x160(%rsp)
movq 0x2e8(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdx,8), %r13
movl 0x90(%rbx,%r14,4), %eax
testl %eax, 0x34(%r13)
je 0x1b7761
movaps 0x190(%rsp), %xmm0
rcpps %xmm8, %xmm3
mulps %xmm3, %xmm8
movaps 0x1d353ec(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm8, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
movaps 0x430(%rsp), %xmm3
cmpnltps 0x1d3a6ff(%rip), %xmm3 # 0x1ef1d40
andps %xmm4, %xmm3
mulps %xmm3, %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x1a0(%rsp)
mulps %xmm3, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x1b0(%rsp)
movaps 0x1d34389(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, 0x1c0(%rsp), %xmm2
movaps %xmm0, 0x60(%rsp)
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b769b
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %r15
movq 0x10(%rbp), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1b7e5b
cmpq $0x0, 0x40(%r13)
jne 0x1b7e5b
movd 0x1a0(%rsp,%r15,4), %xmm0
movss 0x1b0(%rsp,%r15,4), %xmm1
movss 0x1d0(%rsp,%r15,4), %xmm2
movss 0x1e0(%rsp,%r15,4), %xmm3
movss 0x1f0(%rsp,%r15,4), %xmm4
movss 0x1c0(%rsp,%r15,4), %xmm5
movss %xmm5, 0x80(%rbx,%r14,4)
movss %xmm2, 0xc0(%rbx,%r14,4)
movss %xmm3, 0xd0(%rbx,%r14,4)
movss %xmm4, 0xe0(%rbx,%r14,4)
movd %xmm0, 0xf0(%rbx,%r14,4)
movss %xmm1, 0x100(%rbx,%r14,4)
movl %edi, 0x110(%rbx,%r14,4)
movl %edx, 0x120(%rbx,%r14,4)
movq 0x8(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x130(%rbx,%r14,4)
movq 0x8(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rbx,%r14,4)
movss (%rbx,%r14,4), %xmm1
movss 0x10(%rbx,%r14,4), %xmm2
movss 0x20(%rbx,%r14,4), %xmm0
movss 0x40(%rbx,%r14,4), %xmm3
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0x40(%rsp)
movss 0x50(%rbx,%r14,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm3, 0x10(%rsp)
movaps 0x50(%rsp), %xmm13
subps %xmm1, %xmm13
movaps 0x120(%rsp), %xmm7
subps %xmm2, %xmm7
movaps 0x280(%rsp), %xmm9
subps %xmm0, %xmm9
movaps 0x260(%rsp), %xmm14
subps %xmm1, %xmm14
movaps 0x20(%rsp), %xmm3
subps %xmm2, %xmm3
movaps %xmm3, 0x20(%rsp)
movaps 0x30(%rsp), %xmm3
subps %xmm0, %xmm3
movaps %xmm3, 0x30(%rsp)
movaps 0x270(%rsp), %xmm15
subps %xmm1, %xmm15
movaps 0x100(%rsp), %xmm12
subps %xmm2, %xmm12
movaps 0x110(%rsp), %xmm10
subps %xmm0, %xmm10
movaps %xmm15, %xmm3
subps %xmm13, %xmm3
movaps %xmm12, %xmm6
subps %xmm7, %xmm6
movaps %xmm10, %xmm5
subps %xmm9, %xmm5
movaps %xmm15, %xmm0
addps %xmm13, %xmm0
movaps %xmm12, %xmm1
addps %xmm7, %xmm1
movaps %xmm10, %xmm2
addps %xmm9, %xmm2
movaps %xmm3, %xmm4
mulps %xmm1, %xmm4
mulps %xmm5, %xmm1
movaps %xmm6, %xmm8
mulps %xmm2, %xmm8
subps %xmm1, %xmm8
movaps %xmm3, 0x110(%rsp)
mulps %xmm3, %xmm2
movaps %xmm0, %xmm1
movaps %xmm5, 0x270(%rsp)
mulps %xmm5, %xmm1
subps %xmm2, %xmm1
movss 0x60(%rbx,%r14,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movaps %xmm6, 0x100(%rsp)
mulps %xmm6, %xmm0
subps %xmm0, %xmm4
movaps %xmm13, %xmm11
subps %xmm14, %xmm11
mulps %xmm3, %xmm4
mulps 0x10(%rsp), %xmm1
addps %xmm4, %xmm1
movaps %xmm7, %xmm6
movaps 0x20(%rsp), %xmm4
subps %xmm4, %xmm6
mulps 0x40(%rsp), %xmm8
addps %xmm1, %xmm8
movaps %xmm9, %xmm5
movaps 0x30(%rsp), %xmm2
subps %xmm2, %xmm5
movaps %xmm7, 0x120(%rsp)
movaps %xmm7, %xmm0
addps %xmm4, %xmm0
movaps %xmm9, 0x280(%rsp)
addps %xmm2, %xmm9
movaps %xmm11, %xmm2
mulps %xmm0, %xmm2
mulps %xmm5, %xmm0
movaps %xmm6, %xmm7
mulps %xmm9, %xmm7
subps %xmm0, %xmm7
movaps %xmm13, 0x50(%rsp)
movaps %xmm13, %xmm0
addps %xmm14, %xmm0
movaps %xmm11, %xmm13
mulps %xmm11, %xmm9
movaps %xmm0, %xmm4
movaps %xmm5, 0x260(%rsp)
mulps %xmm5, %xmm4
subps %xmm9, %xmm4
movaps %xmm6, 0xf0(%rsp)
mulps %xmm6, %xmm0
subps %xmm0, %xmm2
mulps %xmm3, %xmm2
movaps 0x10(%rsp), %xmm11
mulps %xmm11, %xmm4
addps %xmm2, %xmm4
movaps 0x40(%rsp), %xmm9
mulps %xmm9, %xmm7
addps %xmm4, %xmm7
movaps %xmm14, %xmm5
subps %xmm15, %xmm5
addps %xmm14, %xmm15
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm4
subps %xmm12, %xmm4
addps %xmm0, %xmm12
movaps 0x30(%rsp), %xmm0
movaps %xmm0, %xmm14
subps %xmm10, %xmm14
addps %xmm0, %xmm10
movaps %xmm5, %xmm1
mulps %xmm12, %xmm1
mulps %xmm14, %xmm12
movaps %xmm4, %xmm0
mulps %xmm10, %xmm0
subps %xmm12, %xmm0
mulps %xmm5, %xmm10
movaps %xmm15, %xmm2
mulps %xmm14, %xmm2
subps %xmm10, %xmm2
mulps %xmm4, %xmm15
subps %xmm15, %xmm1
movaps %xmm3, 0x30(%rsp)
mulps %xmm3, %xmm1
mulps %xmm11, %xmm2
addps %xmm1, %xmm2
mulps %xmm9, %xmm0
addps %xmm2, %xmm0
movaps %xmm8, %xmm2
addps %xmm7, %xmm2
addps %xmm0, %xmm2
movaps %xmm8, %xmm1
minps %xmm7, %xmm1
minps %xmm0, %xmm1
movaps %xmm8, 0xe0(%rsp)
movaps %xmm8, %xmm10
movaps %xmm7, 0x250(%rsp)
maxps %xmm7, %xmm10
maxps %xmm0, %xmm10
movaps %xmm2, %xmm0
andps 0x1d34cd5(%rip), %xmm0 # 0x1eec6c0
movaps %xmm0, 0xd0(%rsp)
mulps 0x1d3a376(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm10
xorps 0x1d34cca(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm10
movmskps %xmm10, %eax
testl %eax, %eax
je 0x1b7e41
movaps %xmm9, %xmm8
movaps 0xf0(%rsp), %xmm3
movaps %xmm3, %xmm0
movaps 0x270(%rsp), %xmm11
mulps %xmm11, %xmm0
movaps %xmm13, %xmm6
movaps %xmm13, %xmm1
movaps 0x100(%rsp), %xmm9
mulps %xmm9, %xmm1
movaps %xmm4, %xmm15
movaps 0x260(%rsp), %xmm7
mulps %xmm7, %xmm15
movaps %xmm2, 0x20(%rsp)
movaps %xmm5, %xmm13
mulps %xmm3, %xmm13
mulps %xmm7, %xmm9
subps %xmm0, %xmm9
movaps %xmm4, 0x100(%rsp)
movaps 0x110(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm3, %xmm4
mulps %xmm14, %xmm3
subps %xmm15, %xmm3
movaps %xmm14, 0x110(%rsp)
movaps 0x50(%rsp), %xmm14
movaps 0x1d34c1b(%rip), %xmm12 # 0x1eec6c0
andps %xmm12, %xmm0
andps %xmm12, %xmm15
cmpltps %xmm15, %xmm0
blendvps %xmm0, %xmm9, %xmm3
movaps %xmm6, %xmm0
mulps 0x110(%rsp), %xmm0
mulps %xmm7, %xmm2
mulps %xmm7, %xmm5
mulps %xmm6, %xmm11
subps %xmm2, %xmm11
subps %xmm0, %xmm5
andps %xmm12, %xmm2
andps %xmm12, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm11, %xmm5
mulps 0x100(%rsp), %xmm6
subps %xmm1, %xmm4
subps %xmm13, %xmm6
andps %xmm12, %xmm1
andps %xmm12, %xmm13
cmpltps %xmm13, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm4, %xmm6
movaps 0x30(%rsp), %xmm1
mulps %xmm6, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm8
addps %xmm0, %xmm8
addps %xmm8, %xmm8
movaps 0x280(%rsp), %xmm1
mulps %xmm6, %xmm1
movaps 0x120(%rsp), %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm14
addps %xmm0, %xmm14
movaps 0x20(%rsp), %xmm11
rcpps %xmm8, %xmm1
movaps %xmm8, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d34eab(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm14, %xmm14
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm14, %xmm0
movss 0x80(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm10, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b7e41
cmpneqps 0x1d33e5b(%rip), %xmm8 # 0x1eeba10
andps %xmm8, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b7e41
movaps %xmm11, 0x170(%rsp)
movq %rsi, 0x180(%rsp)
movaps %xmm1, 0x190(%rsp)
movaps %xmm0, 0x1c0(%rsp)
movaps %xmm3, 0x1d0(%rsp)
movaps %xmm5, 0x1e0(%rsp)
movaps %xmm6, 0x1f0(%rsp)
movaps %xmm11, %xmm3
subps 0xe0(%rsp), %xmm3
movaps %xmm3, 0x150(%rsp)
movaps %xmm11, %xmm0
subps 0x250(%rsp), %xmm0
movaps %xmm0, 0x160(%rsp)
pshufd $0x0, 0x210(%rsp), %xmm5 # xmm5 = mem[0,0,0,0]
pshufd $0x0, 0x200(%rsp), %xmm4 # xmm4 = mem[0,0,0,0]
paddd 0x1d64d4f(%rip), %xmm5 # 0x1f1c990
movzwl 0x8(%r8,%r9), %eax
decl %eax
cvtsi2ss %eax, %xmm2
paddd 0x1d64d4b(%rip), %xmm4 # 0x1f1c9a0
movaps %xmm2, %xmm6
rcpss %xmm2, %xmm6
mulss %xmm6, %xmm2
movss 0x1d3938f(%rip), %xmm9 # 0x1ef0ff8
movaps %xmm9, %xmm1
subss %xmm2, %xmm1
mulss %xmm6, %xmm1
movzwl 0xa(%r8,%r9), %eax
decl %eax
cvtsi2ss %eax, %xmm6
movaps %xmm6, %xmm7
rcpss %xmm6, %xmm7
mulss %xmm7, %xmm6
movaps %xmm9, %xmm2
subss %xmm6, %xmm2
mulss %xmm7, %xmm2
cvtdq2ps %xmm5, %xmm5
mulps %xmm11, %xmm5
addps %xmm3, %xmm5
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm5, %xmm1
movaps %xmm1, 0x150(%rsp)
cvtdq2ps %xmm4, %xmm3
mulps %xmm11, %xmm3
addps %xmm0, %xmm3
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm3, %xmm2
movaps %xmm2, 0x160(%rsp)
movq (%rbp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdx,8), %r12
movl 0x90(%rbx,%r14,4), %eax
testl %eax, 0x34(%r12)
je 0x1b7e41
movaps 0x190(%rsp), %xmm0
rcpps %xmm11, %xmm3
mulps %xmm3, %xmm11
movaps 0x1d34d0d(%rip), %xmm5 # 0x1eeca10
movaps %xmm5, %xmm4
subps %xmm11, %xmm4
mulps %xmm3, %xmm4
addps %xmm3, %xmm4
movaps 0xd0(%rsp), %xmm3
cmpnltps 0x1d3a020(%rip), %xmm3 # 0x1ef1d40
andps %xmm4, %xmm3
mulps %xmm3, %xmm1
minps %xmm5, %xmm1
movaps %xmm1, 0x1a0(%rsp)
mulps %xmm3, %xmm2
minps %xmm5, %xmm2
movaps %xmm2, 0x1b0(%rsp)
movaps 0x1d33caa(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, 0x1c0(%rsp), %xmm2
movaps %xmm0, 0x60(%rsp)
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b7d7a
movaps %xmm1, %xmm0
movmskps %xmm0, %eax
bsfq %rax, %r15
movq 0x10(%rbp), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1b8203
cmpq $0x0, 0x40(%r12)
jne 0x1b8203
movss 0x1a0(%rsp,%r15,4), %xmm0
movss 0x1b0(%rsp,%r15,4), %xmm1
movss 0x1d0(%rsp,%r15,4), %xmm2
movss 0x1e0(%rsp,%r15,4), %xmm3
movss 0x1f0(%rsp,%r15,4), %xmm4
movss 0x1c0(%rsp,%r15,4), %xmm5
movss %xmm5, 0x80(%rbx,%r14,4)
movss %xmm2, 0xc0(%rbx,%r14,4)
movss %xmm3, 0xd0(%rbx,%r14,4)
movss %xmm4, 0xe0(%rbx,%r14,4)
movss %xmm0, 0xf0(%rbx,%r14,4)
movss %xmm1, 0x100(%rbx,%r14,4)
movl %edi, 0x110(%rbx,%r14,4)
movl %edx, 0x120(%rbx,%r14,4)
movq 0x8(%rbp), %rax
movl (%rax), %eax
movl %eax, 0x130(%rbx,%r14,4)
movq 0x8(%rbp), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rbx,%r14,4)
movq 0x2f8(%rsp), %rcx
leaq -0x1(%rcx), %rax
andq %rax, %rcx
jne 0x1b6c84
jmp 0x1b8565
pshufd $0x0, 0x390(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x40(%rsp)
pshufd $0x0, 0x380(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0xf0(%rsp)
movq 0x138(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0xe0(%rsp)
movq %r9, %r12
movss 0x80(%rbx,%r14,4), %xmm0
movss %xmm0, 0x10(%rsp)
movss 0x1c0(%rsp,%r15,4), %xmm0
movss 0x1a0(%rsp,%r15,4), %xmm1
movss 0x1b0(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%rbx,%r14,4)
movq 0x8(%rbp), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x1d0(%rsp,%r15,4), %xmm0
movss 0x1e0(%rsp,%r15,4), %xmm3
movss 0x1f0(%rsp,%r15,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x3a0(%rsp)
movaps %xmm3, 0x3b0(%rsp)
movaps %xmm4, 0x3c0(%rsp)
movaps %xmm1, 0x3d0(%rsp)
movaps %xmm2, 0x3e0(%rsp)
movaps 0xf0(%rsp), %xmm0
movaps %xmm0, 0x3f0(%rsp)
movdqa 0x40(%rsp), %xmm0
movdqa %xmm0, 0x400(%rsp)
leaq 0x410(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x420(%rsp)
movdqa 0xe0(%rsp), %xmm0
movdqa %xmm0, 0x70(%rsp)
leaq 0x70(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x18(%r13), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %rbx, 0xb8(%rsp)
leaq 0x3a0(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x4, 0xc8(%rsp)
movq 0x40(%r13), %rax
testq %rax, %rax
je 0x1b8009
leaq 0xa0(%rsp), %rdi
callq *%rax
movq %r12, %r9
movq 0x88(%rsp), %r8
movq 0x90(%rsp), %rdi
movq 0x98(%rsp), %rdx
leaq 0x7(%rsp), %rsi
movdqa 0x70(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1b815f
movq 0x10(%rbp), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1b805d
testb $0x2, (%rcx)
jne 0x1b8033
testb $0x40, 0x3e(%r13)
je 0x1b805d
leaq 0xa0(%rsp), %rdi
callq *%rax
movq %r12, %r9
movq 0x88(%rsp), %r8
movq 0x90(%rsp), %rdi
movq 0x98(%rsp), %rdx
leaq 0x7(%rsp), %rsi
movdqa 0x70(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d339a0(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d33da4(%rip), %xmm1 # 0x1eebe20
je 0x1b816f
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1b816f
pcmpeqd 0x1d338a9(%rip), %xmm1 # 0x1eeba10
pxor 0x1d33cb1(%rip), %xmm1 # 0x1eebe20
ptest 0x1d349a8(%rip), %xmm1 # 0x1eecb20
jne 0x1b818a
movd 0x10(%rsp), %xmm0
movd %xmm0, 0x80(%rbx,%r14,4)
andl $0x0, 0x60(%rsp,%r15,4)
movss 0x80(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps 0x1c0(%rsp), %xmm1
movaps %xmm1, %xmm0
cmpleps %xmm2, %xmm0
andps 0x60(%rsp), %xmm0
movaps %xmm0, 0x60(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1b81f6
movaps 0x1d3382b(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1b81ef
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %r15
testb %al, %al
jne 0x1b7e94
jmp 0x1b7761
pshufd $0x0, 0x390(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x20(%rsp)
pshufd $0x0, 0x380(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x50(%rsp)
movq 0x138(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x120(%rsp)
movss 0x80(%rbx,%r14,4), %xmm0
movss %xmm0, 0x30(%rsp)
movss 0x1c0(%rsp,%r15,4), %xmm0
movss 0x1a0(%rsp,%r15,4), %xmm1
movss 0x1b0(%rsp,%r15,4), %xmm2
movss %xmm0, 0x80(%rbx,%r14,4)
movq 0x8(%rbp), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x1d0(%rsp,%r15,4), %xmm0
movss 0x1e0(%rsp,%r15,4), %xmm3
movss 0x1f0(%rsp,%r15,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x3a0(%rsp)
movaps %xmm3, 0x3b0(%rsp)
movaps %xmm4, 0x3c0(%rsp)
movaps %xmm1, 0x3d0(%rsp)
movaps %xmm2, 0x3e0(%rsp)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, 0x3f0(%rsp)
movdqa 0x20(%rsp), %xmm0
movdqa %xmm0, 0x400(%rsp)
leaq 0x410(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x420(%rsp)
movdqa 0x120(%rsp), %xmm0
movdqa %xmm0, 0x70(%rsp)
leaq 0x70(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x18(%r12), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %rbx, 0xb8(%rsp)
leaq 0x3a0(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x4, 0xc8(%rsp)
movq 0x40(%r12), %rax
testq %rax, %rax
je 0x1b838a
leaq 0xa0(%rsp), %rdi
callq *%rax
movdqa 0x70(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1b84c1
movq 0x10(%rbp), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1b83bf
testb $0x2, (%rcx)
jne 0x1b83b5
testb $0x40, 0x3e(%r12)
je 0x1b83bf
leaq 0xa0(%rsp), %rdi
callq *%rax
movdqa 0x70(%rsp), %xmm0
ptest %xmm0, %xmm0
pcmpeqd 0x1d3363e(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d33a42(%rip), %xmm1 # 0x1eebe20
je 0x1b84d1
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
movaps (%rcx), %xmm2
movups 0xc0(%rax), %xmm3
movups 0xd0(%rax), %xmm4
movups 0xe0(%rax), %xmm5
blendvps %xmm0, %xmm3, %xmm2
movups 0xf0(%rax), %xmm3
movups %xmm2, 0xc0(%rax)
movaps 0x10(%rcx), %xmm2
blendvps %xmm0, %xmm4, %xmm2
movups %xmm2, 0xd0(%rax)
movaps 0x20(%rcx), %xmm2
blendvps %xmm0, %xmm5, %xmm2
movups %xmm2, 0xe0(%rax)
movaps 0x30(%rcx), %xmm2
blendvps %xmm0, %xmm3, %xmm2
movups %xmm2, 0xf0(%rax)
movups 0x100(%rax), %xmm2
movaps 0x40(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x100(%rax)
movups 0x110(%rax), %xmm2
movaps 0x50(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x110(%rax)
movups 0x120(%rax), %xmm2
movaps 0x60(%rcx), %xmm3
blendvps %xmm0, %xmm2, %xmm3
movups %xmm3, 0x120(%rax)
movaps 0x70(%rcx), %xmm2
blendvps %xmm0, 0x130(%rax), %xmm2
movaps %xmm2, 0x130(%rax)
movaps 0x80(%rcx), %xmm2
blendvps %xmm0, 0x140(%rax), %xmm2
movaps %xmm2, 0x140(%rax)
jmp 0x1b84d1
pcmpeqd 0x1d33547(%rip), %xmm1 # 0x1eeba10
pxor 0x1d3394f(%rip), %xmm1 # 0x1eebe20
ptest 0x1d34646(%rip), %xmm1 # 0x1eecb20
jne 0x1b84ec
movd 0x30(%rsp), %xmm0
movd %xmm0, 0x80(%rbx,%r14,4)
andl $0x0, 0x60(%rsp,%r15,4)
movss 0x80(%rbx,%r14,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps 0x1c0(%rsp), %xmm1
movaps %xmm1, %xmm0
cmpleps %xmm2, %xmm0
andps 0x60(%rsp), %xmm0
movaps %xmm0, 0x60(%rsp)
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1b8558
movaps 0x1d334c9(%rip), %xmm2 # 0x1eeb9f0
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, %xmm3
shufps $0xb1, %xmm2, %xmm3 # xmm3 = xmm3[1,0],xmm2[3,2]
minps %xmm2, %xmm3
movaps %xmm3, %xmm1
shufps $0x4e, %xmm3, %xmm1 # xmm1 = xmm1[2,3],xmm3[0,1]
minps %xmm3, %xmm1
cmpeqps %xmm2, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %ecx
testl %ecx, %ecx
je 0x1b8551
movaps %xmm1, %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %r15
testb %al, %al
jne 0x1b8236
jmp 0x1b7e41
movq 0x2d0(%rsp), %rax
incq %rax
cmpq 0x298(%rsp), %rax
movq 0x140(%rsp), %rdx
jne 0x1b698a
movss 0x80(%rbx,%r14,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x240(%rsp)
movaps 0x310(%rsp), %xmm7
movaps 0x230(%rsp), %xmm5
movaps 0x220(%rsp), %xmm6
movaps 0x300(%rsp), %xmm8
jmp 0x1b6577
addq $0x1438, %rsp # imm = 0x1438
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::sse42::SubGridMBIntersectorKPluecker<4, 4, true>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubGridQuadMIntersectorKPluecker<4, 4, true>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xcb8, %rsp # imm = 0xCB8
movq %r9, %r14
movq %rcx, %r15
movq 0xcf0(%rsp), %rax
leaq 0x518(%rsp), %rbp
movq %rdx, -0x8(%rbp)
movss (%rax,%rcx,4), %xmm12
movss 0x10(%rax,%rcx,4), %xmm13
movss 0x20(%rax,%rcx,4), %xmm14
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm15
movss 0x70(%rax,%rcx,4), %xmm8
movss 0x80(%rax,%rcx,4), %xmm5
movss 0x1d678ce(%rip), %xmm9 # 0x1f1ff10
movaps %xmm15, %xmm10
mulss %xmm9, %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movaps %xmm8, %xmm11
mulss %xmm9, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm5, %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x1d678a4(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulss %xmm0, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulss %xmm0, %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %rdi
movslq 0xa0(%rax,%rcx,4), %r8
movslq 0xb0(%rax,%rcx,4), %r9
movq %rdi, %r10
xorq $0x10, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rdx
xorq $0x10, %rdx
movss 0xc0(%rax,%rcx,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
pushq $0x1
popq %rax
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1f6c04b(%rip), %rax # 0x2124730
movq %rax, 0x138(%rsp)
movq %rdx, 0x28(%rsp)
movq %r14, 0xe8(%rsp)
movq %rcx, 0x260(%rsp)
movaps %xmm12, 0x380(%rsp)
movaps %xmm13, 0x370(%rsp)
movaps %xmm14, 0x360(%rsp)
movaps %xmm15, 0x350(%rsp)
movaps %xmm8, 0x230(%rsp)
movaps %xmm5, 0x220(%rsp)
movaps %xmm9, 0x210(%rsp)
movaps %xmm10, 0x200(%rsp)
movaps %xmm11, 0x1f0(%rsp)
movq %rdi, 0x88(%rsp)
movq %r8, 0x80(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x70(%rsp)
movq %r11, 0x68(%rsp)
movaps %xmm7, 0x1e0(%rsp)
movaps %xmm6, 0x240(%rsp)
movq %rbp, %rcx
leaq 0x510(%rsp), %rax
cmpq %rax, %rbp
je 0x1ba437
leaq -0x8(%rcx), %rbp
movq %rcx, 0x288(%rsp)
movq -0x8(%rcx), %r13
testb $0x8, %r13b
jne 0x1b8887
movq %r13, %rax
andq $-0x10, %rax
movss 0x70(%r14,%r15,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x80(%rax,%rdi), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%rdi), %xmm2
subps %xmm12, %xmm2
mulps %xmm10, %xmm2
movaps 0x80(%rax,%r8), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r8), %xmm3
subps %xmm13, %xmm3
movaps 0x80(%rax,%r9), %xmm1
mulps %xmm0, %xmm1
addps 0x20(%rax,%r9), %xmm1
mulps %xmm11, %xmm3
subps %xmm14, %xmm1
mulps %xmm9, %xmm1
maxps %xmm1, %xmm3
movaps %xmm7, %xmm1
maxps %xmm2, %xmm1
maxps %xmm3, %xmm1
movaps 0x80(%rax,%r10), %xmm3
mulps %xmm0, %xmm3
addps 0x20(%rax,%r10), %xmm3
subps %xmm12, %xmm3
movaps 0x80(%rax,%r11), %xmm4
mulps %xmm0, %xmm4
addps 0x20(%rax,%r11), %xmm4
mulps %xmm15, %xmm3
subps %xmm13, %xmm4
mulps %xmm8, %xmm4
movaps 0x80(%rax,%rdx), %xmm2
mulps %xmm0, %xmm2
addps 0x20(%rax,%rdx), %xmm2
subps %xmm14, %xmm2
mulps %xmm5, %xmm2
minps %xmm2, %xmm4
movaps %xmm6, %xmm2
minps %xmm3, %xmm2
minps %xmm4, %xmm2
movl %r13d, %ecx
andl $0x7, %ecx
cmpleps %xmm2, %xmm1
cmpl $0x6, %ecx
je 0x1b88d0
pslld $0x1f, %xmm1
movmskps %xmm1, %ebx
testb $0x8, %r13b
jne 0x1b88cc
testq %rbx, %rbx
je 0x1b88eb
andq $-0x10, %r13
bsfq %rbx, %rcx
leaq -0x1(%rbx), %rsi
xorl %eax, %eax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rbx, %rsi
jne 0x1b88f0
movq %rcx, %r13
testl %eax, %eax
je 0x1b87a5
jmp 0x1b893b
pushq $0x6
jmp 0x1b88ed
movaps 0xe0(%rax), %xmm2
cmpleps %xmm0, %xmm2
cmpltps 0xf0(%rax), %xmm0
andps %xmm2, %xmm0
andps %xmm0, %xmm1
jmp 0x1b887f
pushq $0x4
popq %rax
jmp 0x1b88c2
movq %rcx, (%rbp)
addq $0x8, %rbp
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rcx
movq (%r13,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %rcx
je 0x1b8931
movq %rdx, (%rbp)
addq $0x8, %rbp
bsfq %rcx, %rdx
leaq -0x1(%rcx), %rsi
jmp 0x1b8900
movq %rdx, %r13
movq 0x28(%rsp), %rdx
jmp 0x1b88c2
cmpl $0x6, %eax
jne 0x1ba427
movl %r13d, %ecx
andl $0xf, %ecx
addq $-0x8, %rcx
setne %sil
je 0x1ba410
andq $-0x10, %r13
xorl %eax, %eax
movq %rax, 0x278(%rsp)
imulq $0x90, %rax, %r12
movss 0x70(%r14,%r15,4), %xmm2
subss 0x80(%r13,%r12), %xmm2
mulss 0x84(%r13,%r12), %xmm2
movq 0x20(%r13,%r12), %xmm1
movq 0x24(%r13,%r12), %xmm3
movdqa %xmm1, %xmm0
pminub %xmm3, %xmm0
pcmpeqb %xmm1, %xmm0
pmovzxbd %xmm1, %xmm1 # xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
cvtdq2ps %xmm1, %xmm8
movss 0x38(%r13,%r12), %xmm5
movss 0x3c(%r13,%r12), %xmm6
movss 0x40(%r13,%r12), %xmm4
movss 0x44(%r13,%r12), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movq 0x50(%r13,%r12), %xmm1
pmovzxbd %xmm1, %xmm1 # xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
cvtdq2ps %xmm1, %xmm1
movss 0x74(%r13,%r12), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x68(%r13,%r12), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulps %xmm9, %xmm1
addps %xmm10, %xmm1
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm8, %xmm1
mulps %xmm2, %xmm1
addps %xmm8, %xmm1
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm8
mulps %xmm7, %xmm8
addps %xmm5, %xmm8
movq 0x54(%r13,%r12), %xmm3
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm3
mulps %xmm9, %xmm3
addps %xmm10, %xmm3
subps %xmm8, %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
movq 0x28(%r13,%r12), %xmm5
pmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
cvtdq2ps %xmm5, %xmm7
movss 0x48(%r13,%r12), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm8, %xmm7
addps %xmm6, %xmm7
movq 0x58(%r13,%r12), %xmm5
pmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
cvtdq2ps %xmm5, %xmm5
movss 0x78(%r13,%r12), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
movss 0x6c(%r13,%r12), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulps %xmm9, %xmm5
addps %xmm10, %xmm5
subps %xmm7, %xmm5
mulps %xmm2, %xmm5
addps %xmm7, %xmm5
movq 0x2c(%r13,%r12), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm7
mulps %xmm8, %xmm7
addps %xmm6, %xmm7
movq 0x5c(%r13,%r12), %xmm6
pmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
cvtdq2ps %xmm6, %xmm6
mulps %xmm9, %xmm6
addps %xmm10, %xmm6
subps %xmm7, %xmm6
mulps %xmm2, %xmm6
addps %xmm7, %xmm6
movq 0x30(%r13,%r12), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm8
movss 0x4c(%r13,%r12), %xmm9
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm9, %xmm8
addps %xmm4, %xmm8
movq 0x60(%r13,%r12), %xmm7
pmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
cvtdq2ps %xmm7, %xmm7
movss 0x7c(%r13,%r12), %xmm10
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movss 0x70(%r13,%r12), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm10, %xmm7
addps %xmm11, %xmm7
subps %xmm8, %xmm7
mulps %xmm2, %xmm7
addps %xmm8, %xmm7
movq 0x34(%r13,%r12), %xmm8
pmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
cvtdq2ps %xmm8, %xmm8
mulps %xmm9, %xmm8
movaps 0x210(%rsp), %xmm9
addps %xmm4, %xmm8
movq 0x64(%r13,%r12), %xmm4
pmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
cvtdq2ps %xmm4, %xmm4
mulps %xmm10, %xmm4
movaps 0x200(%rsp), %xmm10
addps %xmm11, %xmm4
movaps 0x1f0(%rsp), %xmm11
subps %xmm8, %xmm4
mulps %xmm2, %xmm4
addps %xmm8, %xmm4
movaps 0x230(%rsp), %xmm8
subps %xmm12, %xmm1
mulps %xmm10, %xmm1
subps %xmm13, %xmm5
mulps %xmm11, %xmm5
subps %xmm12, %xmm3
mulps %xmm15, %xmm3
subps %xmm13, %xmm6
mulps %xmm8, %xmm6
movaps %xmm1, %xmm2
pminsd %xmm3, %xmm2
pmaxsd %xmm1, %xmm3
movaps %xmm5, %xmm1
pminsd %xmm6, %xmm1
pmaxsd %xmm2, %xmm1
pmaxsd %xmm5, %xmm6
movaps 0x220(%rsp), %xmm5
subps %xmm14, %xmm7
mulps %xmm9, %xmm7
subps %xmm14, %xmm4
mulps %xmm5, %xmm4
pminsd %xmm3, %xmm6
movaps %xmm7, %xmm2
pminsd %xmm4, %xmm2
pmaxsd %xmm7, %xmm4
movdqa 0x1e0(%rsp), %xmm7
pmaxsd %xmm7, %xmm2
pmaxsd %xmm1, %xmm2
pminsd 0x240(%rsp), %xmm4
pminsd %xmm6, %xmm4
pcmpgtd %xmm4, %xmm2
pmovsxbd %xmm0, %xmm0
pandn %xmm0, %xmm2
movmskps %xmm2, %eax
testl %eax, %eax
je 0x1ba38d
addq %r13, %r12
movzbl %al, %eax
movq %rbp, 0x270(%rsp)
movq %rcx, 0x258(%rsp)
movb %sil, 0xe(%rsp)
movq %r12, 0x250(%rsp)
movq %rax, 0x268(%rsp)
bsfq %rax, %rax
movzwl (%r12,%rax,8), %r9d
movzwl 0x2(%r12,%rax,8), %r10d
movl %r10d, 0x40(%rsp)
movl 0x88(%r12), %edx
movq %rdx, 0xf0(%rsp)
movl 0x4(%r12,%rax,8), %ecx
movq %rcx, 0xa0(%rsp)
movq 0xcf8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x280(%rsp)
movq 0x1e8(%rax), %rax
movq (%rax,%rdx,8), %rax
imulq 0x68(%rax), %rcx
movq %rcx, %rsi
movq 0x58(%rax), %rdi
movss 0x28(%rax), %xmm0
movss 0x2c(%rax), %xmm1
movss 0x30(%rax), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%r14), %xmm4
subps %xmm1, %xmm4
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm4
mulps %xmm3, %xmm4
roundps $0x1, %xmm4, %xmm1
addss 0x1d37cc9(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
maxps 0x1d32cff(%rip), %xmm1 # 0x1eeba10
subps %xmm1, %xmm4
movaps %xmm4, 0x500(%rsp)
cvtps2dq %xmm1, %xmm0
movapd %xmm0, 0x4f0(%rsp)
movslq 0x4f0(%rsp,%r15,4), %rcx
movl %r9d, %r8d
movl %r9d, %r12d
movl $0x7fff, %edx # imm = 0x7FFF
andl %edx, %r8d
movl %r8d, 0x170(%rsp)
movq %rsi, %r9
movq %rsi, 0x190(%rsp)
movl (%rdi,%rsi), %esi
addl %r8d, %esi
movl %r10d, %r8d
andl %edx, %r8d
movl %r8d, 0x90(%rsp)
movq %rdi, 0x180(%rsp)
movl 0x4(%rdi,%r9), %edi
movl %edi, %edx
imull %r8d, %edx
addl %esi, %edx
movq 0xe0(%rax), %r9
movq %r9, 0x10(%rsp)
imulq $0x38, %rcx, %r8
movq %r8, 0x50(%rsp)
movq 0x10(%r9,%r8), %rax
movq %rax, %rsi
imulq %rdx, %rsi
movq (%r9,%r8), %rcx
movups (%rcx,%rsi), %xmm0
leaq 0x1(%rdx), %rsi
movq 0x48(%r9,%r8), %r8
movq %r8, %r9
imulq %rsi, %r9
movq %r9, 0x30(%rsp)
imulq %rax, %rsi
movups (%rcx,%rsi), %xmm15
leaq (%rdx,%rdi), %rbp
movq %rbp, %rsi
imulq %rax, %rsi
movups (%rcx,%rsi), %xmm1
leaq (%rdx,%rdi), %r14
incq %r14
movq %r14, %rsi
imulq %rax, %rsi
movups (%rcx,%rsi), %xmm2
movq %r8, %r11
imulq %rdx, %r11
movq %r8, %r10
imulq %rbp, %r10
movq %r8, %r9
imulq %r14, %r9
xorl %r15d, %r15d
testw %r12w, %r12w
setns %r15b
addq %r15, %rdx
incq %rdx
movq %r8, %rsi
imulq %rdx, %rsi
imulq %rax, %rdx
movups (%rcx,%rdx), %xmm5
addq %r14, %r15
movq %r15, %rdx
imulq %rax, %rdx
movups (%rcx,%rdx), %xmm6
movq %r8, %rdx
imulq %r15, %rdx
cmpw $0x0, 0x40(%rsp)
movl $0x0, %r12d
cmovnsq %rdi, %r12
addq %r12, %rbp
movq %r8, %rdi
imulq %rbp, %rdi
imulq %rax, %rbp
movups (%rcx,%rbp), %xmm7
addq %r12, %r14
movq %r8, %rbp
imulq %r14, %rbp
imulq %rax, %r14
movups (%rcx,%r14), %xmm9
addq %r15, %r12
movq 0x260(%rsp), %r15
imulq %r12, %rax
movups (%rcx,%rax), %xmm3
movaps %xmm3, 0x460(%rsp)
movq 0x50(%rsp), %rax
movq 0x10(%rsp), %rcx
movq 0x38(%rcx,%rax), %rax
movups (%rax,%r11), %xmm4
movq 0x68(%rsp), %r11
movq 0x30(%rsp), %r14
movups (%rax,%r14), %xmm14
movups (%rax,%r10), %xmm11
movq 0x70(%rsp), %r10
movups (%rax,%r9), %xmm10
movq 0x78(%rsp), %r9
movups (%rax,%rsi), %xmm8
movb 0xe(%rsp), %sil
movups (%rax,%rdx), %xmm12
movq 0x28(%rsp), %rdx
movss 0x500(%rsp,%r15,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
subps %xmm0, %xmm4
mulps %xmm13, %xmm4
addps %xmm0, %xmm4
movups (%rax,%rdi), %xmm0
movq 0x88(%rsp), %rdi
subps %xmm15, %xmm14
mulps %xmm13, %xmm14
addps %xmm15, %xmm14
movups (%rax,%rbp), %xmm15
movq 0x270(%rsp), %rbp
imulq %r8, %r12
movq 0x80(%rsp), %r8
subps %xmm1, %xmm11
mulps %xmm13, %xmm11
addps %xmm1, %xmm11
movups (%rax,%r12), %xmm1
movaps %xmm1, 0x430(%rsp)
subps %xmm2, %xmm10
mulps %xmm13, %xmm10
addps %xmm2, %xmm10
subps %xmm5, %xmm8
mulps %xmm13, %xmm8
addps %xmm5, %xmm8
subps %xmm6, %xmm12
mulps %xmm13, %xmm12
addps %xmm6, %xmm12
subps %xmm7, %xmm0
mulps %xmm13, %xmm0
addps %xmm7, %xmm0
subps %xmm9, %xmm15
movaps %xmm13, 0x450(%rsp)
mulps %xmm13, %xmm15
addps %xmm9, %xmm15
movaps %xmm4, %xmm3
unpcklps %xmm10, %xmm3 # xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
unpckhps %xmm10, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
movaps %xmm14, %xmm1
unpcklps %xmm11, %xmm1 # xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
movaps %xmm14, %xmm2
unpckhps %xmm11, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
unpcklps %xmm2, %xmm4 # xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
movaps %xmm3, %xmm2
unpcklps %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
movaps %xmm2, %xmm6
unpckhps %xmm1, %xmm3 # xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
movaps %xmm3, %xmm9
movaps %xmm14, %xmm13
unpcklps %xmm12, %xmm13 # xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
movaps %xmm12, 0x470(%rsp)
unpckhps %xmm12, %xmm14 # xmm14 = xmm14[2],xmm12[2],xmm14[3],xmm12[3]
movaps %xmm8, %xmm1
unpcklps %xmm10, %xmm1 # xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
unpckhps %xmm10, %xmm8 # xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
unpcklps %xmm8, %xmm14 # xmm14 = xmm14[0],xmm8[0],xmm14[1],xmm8[1]
movaps %xmm13, %xmm3
unpcklps %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
unpckhps %xmm1, %xmm13 # xmm13 = xmm13[2],xmm1[2],xmm13[3],xmm1[3]
movaps %xmm10, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
movaps %xmm10, 0x1d0(%rsp)
movaps %xmm10, %xmm2
unpckhps %xmm0, %xmm2 # xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
movaps %xmm11, %xmm5
unpcklps %xmm15, %xmm5 # xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1]
movaps %xmm15, 0x440(%rsp)
unpckhps %xmm15, %xmm11 # xmm11 = xmm11[2],xmm15[2],xmm11[3],xmm15[3]
unpcklps %xmm2, %xmm11 # xmm11 = xmm11[0],xmm2[0],xmm11[1],xmm2[1]
movaps %xmm5, %xmm15
unpcklps %xmm1, %xmm15 # xmm15 = xmm15[0],xmm1[0],xmm15[1],xmm1[1]
unpckhps %xmm1, %xmm5 # xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
leaq 0xf(%rsp), %rax
movq %rax, 0x2c0(%rsp)
movq 0xe8(%rsp), %rax
movss (%rax,%r15,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm6, %xmm7
subps %xmm1, %xmm7
movaps %xmm3, 0x490(%rsp)
subps %xmm1, %xmm3
movaps %xmm3, 0xc0(%rsp)
movaps %xmm15, 0x110(%rsp)
subps %xmm1, %xmm15
movss 0x10(%rax,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm2, %xmm9
movaps %xmm13, 0x480(%rsp)
movaps %xmm13, %xmm3
subps %xmm2, %xmm3
movaps %xmm5, 0x120(%rsp)
movaps %xmm5, %xmm1
subps %xmm2, %xmm1
movss 0x20(%rax,%r15,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
subps %xmm6, %xmm4
movaps %xmm14, 0x4a0(%rsp)
subps %xmm6, %xmm14
movaps %xmm14, 0x50(%rsp)
movaps %xmm11, 0x40(%rsp)
movaps %xmm11, %xmm2
subps %xmm6, %xmm2
movaps %xmm15, %xmm5
subps %xmm7, %xmm5
movaps %xmm7, %xmm13
movaps %xmm1, %xmm12
subps %xmm9, %xmm12
movaps %xmm9, %xmm0
movaps %xmm2, %xmm11
subps %xmm4, %xmm11
movaps %xmm1, %xmm6
addps %xmm9, %xmm6
movaps %xmm2, %xmm7
addps %xmm4, %xmm7
movaps %xmm5, %xmm8
mulps %xmm6, %xmm8
mulps %xmm11, %xmm6
movaps %xmm12, %xmm10
mulps %xmm7, %xmm10
subps %xmm6, %xmm10
movaps %xmm15, %xmm6
addps %xmm13, %xmm6
movaps %xmm13, %xmm14
movaps %xmm13, 0x10(%rsp)
movaps %xmm5, 0x1a0(%rsp)
mulps %xmm5, %xmm7
movaps %xmm6, %xmm9
movaps %xmm11, 0x4e0(%rsp)
mulps %xmm11, %xmm9
subps %xmm7, %xmm9
movaps %xmm12, 0x4d0(%rsp)
mulps %xmm12, %xmm6
subps %xmm6, %xmm8
movss 0x50(%rax,%r15,4), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movss 0x60(%rax,%r15,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm11, %xmm8
mulps %xmm13, %xmm9
addps %xmm8, %xmm9
movq %rax, %r14
movss 0x40(%rax,%r15,4), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movaps %xmm5, 0x30(%rsp)
mulps %xmm5, %xmm10
addps %xmm9, %xmm10
movaps %xmm14, %xmm5
movaps 0xc0(%rsp), %xmm6
subps %xmm6, %xmm5
movaps %xmm5, %xmm9
movaps %xmm0, %xmm5
movaps %xmm0, %xmm14
movaps %xmm3, %xmm8
movaps %xmm3, 0xb0(%rsp)
subps %xmm3, %xmm14
movaps %xmm4, %xmm3
movaps 0x50(%rsp), %xmm0
subps %xmm0, %xmm3
movaps %xmm5, 0x1b0(%rsp)
movaps %xmm5, %xmm7
addps %xmm8, %xmm7
movaps %xmm4, 0x1c0(%rsp)
movaps %xmm4, %xmm12
addps %xmm0, %xmm12
movaps %xmm9, %xmm8
movaps %xmm9, %xmm0
movaps %xmm9, 0x100(%rsp)
mulps %xmm7, %xmm8
mulps %xmm3, %xmm7
movaps %xmm14, %xmm9
mulps %xmm12, %xmm9
subps %xmm7, %xmm9
movaps 0x10(%rsp), %xmm5
movaps %xmm6, %xmm4
addps %xmm6, %xmm5
mulps %xmm0, %xmm12
movaps %xmm5, %xmm7
movaps %xmm3, 0x4c0(%rsp)
mulps %xmm3, %xmm7
subps %xmm12, %xmm7
movaps %xmm14, %xmm0
mulps %xmm14, %xmm5
subps %xmm5, %xmm8
mulps %xmm11, %xmm8
mulps %xmm13, %xmm7
addps %xmm8, %xmm7
movaps 0x30(%rsp), %xmm6
mulps %xmm6, %xmm9
addps %xmm7, %xmm9
movaps %xmm4, %xmm12
subps %xmm15, %xmm12
addps %xmm4, %xmm15
movaps 0xb0(%rsp), %xmm3
movaps %xmm3, %xmm7
subps %xmm1, %xmm7
addps %xmm3, %xmm1
movaps 0x50(%rsp), %xmm3
movaps %xmm3, %xmm4
subps %xmm2, %xmm4
addps %xmm3, %xmm2
movaps %xmm12, %xmm5
mulps %xmm1, %xmm5
mulps %xmm4, %xmm1
movaps %xmm7, %xmm14
mulps %xmm2, %xmm14
subps %xmm1, %xmm14
mulps %xmm12, %xmm2
movaps %xmm15, %xmm1
mulps %xmm4, %xmm1
subps %xmm2, %xmm1
mulps %xmm7, %xmm15
subps %xmm15, %xmm5
movaps %xmm11, 0xc0(%rsp)
mulps %xmm11, %xmm5
movaps %xmm13, %xmm2
mulps %xmm13, %xmm1
movaps 0x100(%rsp), %xmm11
movaps %xmm0, %xmm15
addps %xmm5, %xmm1
mulps %xmm6, %xmm14
addps %xmm1, %xmm14
movaps %xmm10, %xmm3
addps %xmm9, %xmm3
addps %xmm14, %xmm3
movaps %xmm10, %xmm0
minps %xmm9, %xmm0
minps %xmm14, %xmm0
movaps %xmm10, 0xb0(%rsp)
movaps %xmm10, %xmm13
movaps %xmm9, 0x4b0(%rsp)
maxps %xmm9, %xmm13
maxps %xmm14, %xmm13
movaps %xmm3, %xmm1
andps 0x1d333d1(%rip), %xmm1 # 0x1eec6c0
movaps %xmm1, 0x420(%rsp)
mulps 0x1d38a72(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm13
xorps 0x1d333c6(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm13
movmskps %xmm13, %eax
movss 0x170(%rsp), %xmm0
movaps %xmm0, 0x50(%rsp)
movss 0x90(%rsp), %xmm0
movaps %xmm0, 0x170(%rsp)
testl %eax, %eax
movss 0xa0(%rsp), %xmm0
movaps %xmm0, 0x340(%rsp)
je 0x1b99af
movaps %xmm15, %xmm0
movaps 0x4e0(%rsp), %xmm10
mulps %xmm10, %xmm0
movaps %xmm11, %xmm1
movaps %xmm2, 0x90(%rsp)
movaps 0x4d0(%rsp), %xmm6
mulps %xmm6, %xmm1
movaps %xmm3, 0xa0(%rsp)
movaps %xmm7, %xmm3
movaps 0x4c0(%rsp), %xmm9
mulps %xmm9, %xmm7
movaps %xmm12, %xmm14
mulps %xmm15, %xmm14
mulps %xmm9, %xmm6
subps %xmm0, %xmm6
movaps 0x1a0(%rsp), %xmm8
movaps %xmm8, %xmm2
mulps %xmm15, %xmm8
mulps %xmm4, %xmm15
subps %xmm7, %xmm15
movaps 0x1d33303(%rip), %xmm5 # 0x1eec6c0
andps %xmm5, %xmm0
andps %xmm5, %xmm7
cmpltps %xmm7, %xmm0
movaps 0xa0(%rsp), %xmm7
blendvps %xmm0, %xmm6, %xmm15
movaps %xmm11, %xmm0
mulps %xmm4, %xmm0
mulps %xmm9, %xmm2
mulps %xmm9, %xmm12
mulps %xmm11, %xmm10
subps %xmm2, %xmm10
subps %xmm0, %xmm12
andps %xmm5, %xmm2
andps %xmm5, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm10, %xmm12
mulps %xmm3, %xmm11
subps %xmm1, %xmm8
subps %xmm14, %xmm11
andps %xmm5, %xmm1
andps %xmm5, %xmm14
movaps 0x30(%rsp), %xmm5
cmpltps %xmm14, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm8, %xmm11
movaps 0xc0(%rsp), %xmm0
mulps %xmm11, %xmm0
movaps 0x90(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
mulps %xmm15, %xmm5
addps %xmm1, %xmm5
addps %xmm5, %xmm5
movaps 0x1c0(%rsp), %xmm0
mulps %xmm11, %xmm0
movaps 0x1b0(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
movaps 0x10(%rsp), %xmm3
mulps %xmm15, %xmm3
addps %xmm1, %xmm3
rcpps %xmm5, %xmm1
movaps %xmm5, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d3358b(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movss 0x80(%r14,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r14,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm13, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b99af
cmpneqps 0x1d3253e(%rip), %xmm5 # 0x1eeba10
andps %xmm5, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1b99af
movaps 0xb0(%rsp), %xmm8
movaps %xmm8, 0x290(%rsp)
movaps 0x4b0(%rsp), %xmm10
movaps %xmm10, 0x2a0(%rsp)
movaps %xmm7, 0x2b0(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x2c0(%rsp)
movaps %xmm1, 0x2d0(%rsp)
movaps %xmm0, 0x300(%rsp)
movaps %xmm15, 0x310(%rsp)
movaps %xmm12, 0x320(%rsp)
movaps %xmm11, 0x330(%rsp)
pshufd $0x0, 0x50(%rsp), %xmm3 # xmm3 = mem[0,0,0,0]
pshufd $0x0, 0x170(%rsp), %xmm2 # xmm2 = mem[0,0,0,0]
movq 0x180(%rsp), %r12
movq 0x190(%rsp), %rcx
movzwl 0x8(%r12,%rcx), %eax
decl %eax
cvtsi2ss %eax, %xmm1
paddd 0x1d63417(%rip), %xmm3 # 0x1f1c990
paddd 0x1d6341f(%rip), %xmm2 # 0x1f1c9a0
movaps %xmm1, %xmm4
rcpss %xmm1, %xmm4
mulss %xmm4, %xmm1
movss 0x1d37a64(%rip), %xmm6 # 0x1ef0ff8
movaps %xmm6, %xmm0
subss %xmm1, %xmm0
movzwl 0xa(%r12,%rcx), %eax
decl %eax
cvtsi2ss %eax, %xmm5
mulss %xmm4, %xmm0
movaps %xmm5, %xmm4
rcpss %xmm5, %xmm4
mulss %xmm4, %xmm5
movaps %xmm6, %xmm1
subss %xmm5, %xmm1
mulss %xmm4, %xmm1
cvtdq2ps %xmm3, %xmm3
mulps %xmm7, %xmm3
addps %xmm8, %xmm3
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm3, %xmm0
movaps %xmm0, 0x290(%rsp)
cvtdq2ps %xmm2, %xmm2
mulps %xmm7, %xmm2
addps %xmm10, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm2, %xmm1
movaps %xmm1, 0x2a0(%rsp)
movq 0x280(%rsp), %rax
movq 0x1e8(%rax), %rax
movq 0xf0(%rsp), %r12
movq (%rax,%r12,8), %r12
movl 0x90(%r14,%r15,4), %eax
testl %eax, 0x34(%r12)
je 0x1b99af
movq 0xcf8(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1b9640
cmpq $0x0, 0x48(%r12)
je 0x1ba3af
rcpps %xmm7, %xmm2
mulps %xmm2, %xmm7
movaps 0x1d333c3(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm7, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
movaps 0x420(%rsp), %xmm2
cmpnltps 0x1d386d7(%rip), %xmm2 # 0x1ef1d40
andps %xmm3, %xmm2
mulps %xmm2, %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x2e0(%rsp)
mulps %xmm2, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x2f0(%rsp)
movaps 0x2d0(%rsp), %xmm0
movmskps %xmm0, %eax
bsfq %rax, %rcx
movq %rax, 0x10(%rsp)
testl %eax, %eax
sete 0x90(%rsp)
je 0x1b99af
movd 0xf0(%rsp), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0xc0(%rsp)
pshufd $0x0, 0x340(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x100(%rsp)
movq %rcx, 0x30(%rsp)
movq 0x138(%rsp), %rax
movaps (%rax), %xmm0
movq 0x30(%rsp), %rax
movaps %xmm0, 0xb0(%rsp)
movss 0x80(%r14,%r15,4), %xmm5
movss 0x300(%rsp,%rax,4), %xmm0
movss 0x2e0(%rsp,%rax,4), %xmm1
movss 0x2f0(%rsp,%rax,4), %xmm2
movss %xmm0, 0x80(%r14,%r15,4)
movq %rax, %rcx
movq 0xcf8(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x310(%rsp,%rcx,4), %xmm0
movss 0x320(%rsp,%rcx,4), %xmm3
movq %rcx, 0x30(%rsp)
movss 0x330(%rsp,%rcx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x390(%rsp)
movaps %xmm3, 0x3a0(%rsp)
movaps %xmm4, 0x3b0(%rsp)
movaps %xmm1, 0x3c0(%rsp)
movaps %xmm2, 0x3d0(%rsp)
movaps 0x100(%rsp), %xmm0
movaps %xmm0, 0x3e0(%rsp)
movdqa 0xc0(%rsp), %xmm0
movdqa %xmm0, 0x3f0(%rsp)
leaq 0x400(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movdqa 0xb0(%rsp), %xmm0
movdqa %xmm0, 0xd0(%rsp)
leaq 0xd0(%rsp), %rcx
movq %rcx, 0x140(%rsp)
movq 0x18(%r12), %rcx
movq %rcx, 0x148(%rsp)
movq %rax, 0x150(%rsp)
movq 0xe8(%rsp), %rax
movq %rax, 0x158(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0x160(%rsp)
movl $0x4, 0x168(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
movss %xmm5, 0xa0(%rsp)
je 0x1b989a
leaq 0x140(%rsp), %rdi
callq *%rax
movss 0xa0(%rsp), %xmm5
movb 0xe(%rsp), %sil
movq 0x28(%rsp), %rdx
movq 0x68(%rsp), %r11
movq 0x70(%rsp), %r10
movq 0x78(%rsp), %r9
movq 0x80(%rsp), %r8
movq 0x88(%rsp), %rdi
movdqa 0xd0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1b994a
movq 0xcf8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1b990c
testb $0x2, (%rcx)
jne 0x1b98d0
testb $0x40, 0x3e(%r12)
je 0x1b990c
leaq 0x140(%rsp), %rdi
callq *%rax
movss 0xa0(%rsp), %xmm5
movb 0xe(%rsp), %sil
movq 0x28(%rsp), %rdx
movq 0x68(%rsp), %r11
movq 0x70(%rsp), %r10
movq 0x78(%rsp), %r9
movq 0x80(%rsp), %r8
movq 0x88(%rsp), %rdi
movdqa 0xd0(%rsp), %xmm0
pcmpeqd 0x1d320f3(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d324f7(%rip), %xmm1 # 0x1eebe20
movq 0x158(%rsp), %rax
movaps 0x1d320c8(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1b995a
pcmpeqd 0x1d320be(%rip), %xmm1 # 0x1eeba10
pxor 0x1d324c6(%rip), %xmm1 # 0x1eebe20
ptest 0x1d331bd(%rip), %xmm1 # 0x1eecb20
jne 0x1b99a1
movss %xmm5, 0x80(%r14,%r15,4)
movq 0x30(%rsp), %rax
movq 0x10(%rsp), %rcx
btcq %rax, %rcx
movq %rcx, 0x10(%rsp)
movq 0x10(%rsp), %rax
bsfq %rax, %rax
cmpq $0x0, 0x10(%rsp)
sete 0x90(%rsp)
jne 0x1b96f3
jmp 0x1b99af
testb $0x1, 0x90(%rsp)
je 0x1ba3af
movaps 0x460(%rsp), %xmm0
movaps 0x430(%rsp), %xmm2
subps %xmm0, %xmm2
movaps 0x450(%rsp), %xmm1
mulps %xmm2, %xmm1
addps %xmm0, %xmm1
movaps 0x1d0(%rsp), %xmm10
movaps %xmm10, %xmm8
unpcklps %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
unpckhps %xmm1, %xmm10 # xmm10 = xmm10[2],xmm1[2],xmm10[3],xmm1[3]
movaps 0x470(%rsp), %xmm1
movaps %xmm1, %xmm0
movaps 0x440(%rsp), %xmm2
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
unpcklps %xmm1, %xmm10 # xmm10 = xmm10[0],xmm1[0],xmm10[1],xmm1[1]
movaps %xmm8, %xmm12
unpcklps %xmm0, %xmm12 # xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
unpckhps %xmm0, %xmm8 # xmm8 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
movss (%r14,%r15,4), %xmm1
movss 0x10(%r14,%r15,4), %xmm2
movss 0x20(%r14,%r15,4), %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm1, %xmm12
subps %xmm2, %xmm8
subps %xmm0, %xmm10
movaps 0x110(%rsp), %xmm3
subps %xmm1, %xmm3
movaps %xmm3, 0x110(%rsp)
movaps 0x120(%rsp), %xmm3
subps %xmm2, %xmm3
movaps %xmm3, 0x120(%rsp)
movaps 0x40(%rsp), %xmm3
subps %xmm0, %xmm3
movaps %xmm3, 0x40(%rsp)
movaps 0x490(%rsp), %xmm3
subps %xmm1, %xmm3
movaps 0x480(%rsp), %xmm4
subps %xmm2, %xmm4
movaps 0x4a0(%rsp), %xmm14
subps %xmm0, %xmm14
movaps %xmm3, %xmm6
subps %xmm12, %xmm6
movaps %xmm4, %xmm13
subps %xmm8, %xmm13
movaps %xmm14, %xmm9
subps %xmm10, %xmm9
movaps %xmm3, %xmm0
addps %xmm12, %xmm0
movaps %xmm4, %xmm1
addps %xmm8, %xmm1
movaps %xmm14, %xmm2
addps %xmm10, %xmm2
movaps %xmm6, %xmm5
mulps %xmm1, %xmm5
mulps %xmm9, %xmm1
movaps %xmm13, %xmm7
mulps %xmm2, %xmm7
subps %xmm1, %xmm7
movaps %xmm6, 0x90(%rsp)
mulps %xmm6, %xmm2
movaps %xmm0, %xmm1
movaps %xmm9, 0xc0(%rsp)
mulps %xmm9, %xmm1
subps %xmm2, %xmm1
movss 0x50(%r14,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm2, 0x10(%rsp)
movaps %xmm13, 0x100(%rsp)
mulps %xmm13, %xmm0
subps %xmm0, %xmm5
movss 0x60(%r14,%r15,4), %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm11, %xmm5
mulps %xmm2, %xmm1
addps %xmm5, %xmm1
movss 0x40(%r14,%r15,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm6, %xmm7
addps %xmm1, %xmm7
movaps %xmm12, %xmm15
movaps 0x110(%rsp), %xmm5
subps %xmm5, %xmm15
movaps %xmm8, %xmm13
movaps 0x120(%rsp), %xmm1
subps %xmm1, %xmm13
movaps %xmm10, %xmm9
movaps 0x40(%rsp), %xmm2
subps %xmm2, %xmm9
movaps %xmm8, 0xa0(%rsp)
movaps %xmm8, %xmm0
addps %xmm1, %xmm0
movaps %xmm1, %xmm8
movaps %xmm10, 0x1d0(%rsp)
movaps %xmm10, %xmm1
addps %xmm2, %xmm1
movaps %xmm15, %xmm2
mulps %xmm0, %xmm2
mulps %xmm9, %xmm0
movaps %xmm13, %xmm10
mulps %xmm1, %xmm10
subps %xmm0, %xmm10
movaps %xmm12, 0x30(%rsp)
movaps %xmm12, %xmm0
addps %xmm5, %xmm0
movaps %xmm5, %xmm12
mulps %xmm15, %xmm1
movaps %xmm0, %xmm5
movaps %xmm9, 0xb0(%rsp)
mulps %xmm9, %xmm5
subps %xmm1, %xmm5
mulps %xmm13, %xmm0
subps %xmm0, %xmm2
mulps %xmm11, %xmm2
mulps 0x10(%rsp), %xmm5
addps %xmm2, %xmm5
mulps %xmm6, %xmm10
addps %xmm5, %xmm10
movaps %xmm12, %xmm0
subps %xmm3, %xmm12
addps %xmm0, %xmm3
movaps %xmm8, %xmm9
subps %xmm4, %xmm9
addps %xmm8, %xmm4
movaps 0x40(%rsp), %xmm0
movaps %xmm0, %xmm8
subps %xmm14, %xmm8
addps %xmm0, %xmm14
movaps %xmm12, %xmm1
mulps %xmm4, %xmm1
mulps %xmm8, %xmm4
movaps %xmm9, %xmm0
mulps %xmm14, %xmm0
subps %xmm4, %xmm0
movaps %xmm6, %xmm4
mulps %xmm12, %xmm14
movaps %xmm3, %xmm2
mulps %xmm8, %xmm2
subps %xmm14, %xmm2
mulps %xmm9, %xmm3
subps %xmm3, %xmm1
movaps %xmm11, 0x40(%rsp)
mulps %xmm11, %xmm1
mulps 0x10(%rsp), %xmm2
addps %xmm1, %xmm2
mulps %xmm6, %xmm0
addps %xmm2, %xmm0
movaps %xmm7, %xmm6
addps %xmm10, %xmm6
addps %xmm0, %xmm6
movaps %xmm7, %xmm1
minps %xmm10, %xmm1
minps %xmm0, %xmm1
movaps %xmm7, 0x1b0(%rsp)
movaps %xmm7, %xmm5
movaps %xmm10, 0x1c0(%rsp)
maxps %xmm10, %xmm5
maxps %xmm0, %xmm5
movaps %xmm6, %xmm0
andps 0x1d32a37(%rip), %xmm0 # 0x1eec6c0
movaps %xmm0, 0x1a0(%rsp)
mulps 0x1d380d8(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm5
xorps 0x1d32a2d(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm5
movmskps %xmm5, %eax
testl %eax, %eax
je 0x1ba30c
movaps %xmm13, %xmm11
movaps %xmm15, %xmm13
movaps %xmm11, %xmm0
movaps %xmm4, 0x120(%rsp)
movaps 0xc0(%rsp), %xmm4
mulps %xmm4, %xmm0
movaps %xmm15, %xmm1
movaps 0x100(%rsp), %xmm7
mulps %xmm7, %xmm1
movaps %xmm9, %xmm14
movaps %xmm9, 0x110(%rsp)
movaps 0xb0(%rsp), %xmm9
mulps %xmm9, %xmm14
movaps %xmm12, %xmm15
mulps %xmm11, %xmm15
mulps %xmm9, %xmm7
subps %xmm0, %xmm7
movaps 0x90(%rsp), %xmm3
movaps %xmm3, %xmm2
mulps %xmm11, %xmm3
mulps %xmm8, %xmm11
subps %xmm14, %xmm11
movaps 0x1d32995(%rip), %xmm10 # 0x1eec6c0
andps %xmm10, %xmm0
andps %xmm10, %xmm14
cmpltps %xmm14, %xmm0
blendvps %xmm0, %xmm7, %xmm11
movaps %xmm13, %xmm0
mulps %xmm8, %xmm0
mulps %xmm9, %xmm2
mulps %xmm9, %xmm12
mulps %xmm13, %xmm4
subps %xmm2, %xmm4
subps %xmm0, %xmm12
andps %xmm10, %xmm2
andps %xmm10, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm4, %xmm12
movaps 0x120(%rsp), %xmm4
mulps 0x110(%rsp), %xmm13
subps %xmm1, %xmm3
subps %xmm15, %xmm13
andps %xmm10, %xmm1
andps %xmm10, %xmm15
cmpltps %xmm15, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm3, %xmm13
movaps 0x40(%rsp), %xmm1
mulps %xmm13, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm12, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm4
addps %xmm0, %xmm4
addps %xmm4, %xmm4
movaps 0x1d0(%rsp), %xmm0
mulps %xmm13, %xmm0
movaps 0xa0(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
movaps 0x30(%rsp), %xmm3
mulps %xmm11, %xmm3
addps %xmm1, %xmm3
rcpps %xmm4, %xmm1
movaps %xmm4, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d32c1e(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movss 0x80(%r14,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r14,%r15,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm5, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1ba30c
cmpneqps 0x1d31bd2(%rip), %xmm4 # 0x1eeba10
andps %xmm4, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1ba30c
movaps %xmm6, 0x2b0(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x2c0(%rsp)
movaps %xmm1, 0x2d0(%rsp)
movaps %xmm0, 0x300(%rsp)
movaps %xmm11, 0x310(%rsp)
movaps %xmm12, 0x320(%rsp)
movaps %xmm13, 0x330(%rsp)
movaps %xmm6, %xmm3
subps 0x1b0(%rsp), %xmm3
movaps %xmm3, 0x290(%rsp)
movaps %xmm6, %xmm2
subps 0x1c0(%rsp), %xmm2
movaps %xmm2, 0x2a0(%rsp)
pshufd $0x0, 0x50(%rsp), %xmm5 # xmm5 = mem[0,0,0,0]
pshufd $0x0, 0x170(%rsp), %xmm4 # xmm4 = mem[0,0,0,0]
paddd 0x1d62ac5(%rip), %xmm5 # 0x1f1c990
movq 0x180(%rsp), %r12
movq 0x190(%rsp), %rcx
movzwl 0x8(%r12,%rcx), %eax
decl %eax
cvtsi2ss %eax, %xmm1
paddd 0x1d62ab1(%rip), %xmm4 # 0x1f1c9a0
movaps %xmm6, %xmm9
movaps %xmm1, %xmm6
rcpss %xmm1, %xmm6
mulss %xmm6, %xmm1
movss 0x1d370f1(%rip), %xmm8 # 0x1ef0ff8
movaps %xmm8, %xmm0
subss %xmm1, %xmm0
mulss %xmm6, %xmm0
movzwl 0xa(%r12,%rcx), %eax
decl %eax
cvtsi2ss %eax, %xmm6
movaps %xmm6, %xmm7
rcpss %xmm6, %xmm7
mulss %xmm7, %xmm6
movaps %xmm8, %xmm1
subss %xmm6, %xmm1
mulss %xmm7, %xmm1
cvtdq2ps %xmm5, %xmm5
mulps %xmm9, %xmm5
addps %xmm3, %xmm5
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm5, %xmm0
movaps %xmm0, 0x290(%rsp)
cvtdq2ps %xmm4, %xmm3
mulps %xmm9, %xmm3
addps %xmm2, %xmm3
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm3, %xmm1
movaps %xmm1, 0x2a0(%rsp)
movq 0xcf8(%rsp), %rax
movq (%rax), %rax
movq 0x1e8(%rax), %rax
movq 0xf0(%rsp), %rcx
movq (%rax,%rcx,8), %r12
movl 0x90(%r14,%r15,4), %eax
testl %eax, 0x34(%r12)
je 0x1ba30c
movaps %xmm9, %xmm6
movq 0xcf8(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1b9fbc
cmpq $0x0, 0x48(%r12)
je 0x1ba3af
rcpps %xmm6, %xmm2
mulps %xmm2, %xmm6
movaps 0x1d32a47(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm6, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
movaps 0x1a0(%rsp), %xmm2
cmpnltps 0x1d37d5b(%rip), %xmm2 # 0x1ef1d40
andps %xmm3, %xmm2
mulps %xmm2, %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x2e0(%rsp)
mulps %xmm2, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x2f0(%rsp)
movaps 0x2d0(%rsp), %xmm0
movmskps %xmm0, %ecx
bsfq %rcx, %rax
testl %ecx, %ecx
sete 0x40(%rsp)
je 0x1ba30c
movd 0xf0(%rsp), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0xf0(%rsp)
pshufd $0x0, 0x340(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x190(%rsp)
movq %rax, 0x50(%rsp)
movq 0x138(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x180(%rsp)
movq 0x50(%rsp), %rax
movq %rcx, 0x10(%rsp)
movss 0x80(%r14,%r15,4), %xmm5
movss 0x300(%rsp,%rax,4), %xmm0
movss 0x2e0(%rsp,%rax,4), %xmm1
movss 0x2f0(%rsp,%rax,4), %xmm2
movss %xmm0, 0x80(%r14,%r15,4)
movq %rax, %rcx
movq 0xcf8(%rsp), %rax
movq 0x8(%rax), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x310(%rsp,%rcx,4), %xmm0
movss 0x320(%rsp,%rcx,4), %xmm3
movq %rcx, 0x50(%rsp)
movss 0x330(%rsp,%rcx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x390(%rsp)
movaps %xmm3, 0x3a0(%rsp)
movaps %xmm4, 0x3b0(%rsp)
movaps %xmm1, 0x3c0(%rsp)
movaps %xmm2, 0x3d0(%rsp)
movaps 0x190(%rsp), %xmm0
movaps %xmm0, 0x3e0(%rsp)
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm0, 0x3f0(%rsp)
leaq 0x400(%rsp), %rcx
pcmpeqd %xmm0, %xmm0
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x410(%rsp)
movdqa 0x180(%rsp), %xmm0
movdqa %xmm0, 0xd0(%rsp)
leaq 0xd0(%rsp), %rcx
movq %rcx, 0x140(%rsp)
movq 0x18(%r12), %rcx
movq %rcx, 0x148(%rsp)
movq %rax, 0x150(%rsp)
movq 0xe8(%rsp), %rax
movq %rax, 0x158(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0x160(%rsp)
movl $0x4, 0x168(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
movss %xmm5, 0x30(%rsp)
je 0x1ba20d
leaq 0x140(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movb 0xe(%rsp), %sil
movq 0x28(%rsp), %rdx
movq 0x68(%rsp), %r11
movq 0x70(%rsp), %r10
movq 0x78(%rsp), %r9
movq 0x80(%rsp), %r8
movq 0x88(%rsp), %rdi
movdqa 0xd0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1ba2ba
movq 0xcf8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1ba27c
testb $0x2, (%rcx)
jne 0x1ba243
testb $0x40, 0x3e(%r12)
je 0x1ba27c
leaq 0x140(%rsp), %rdi
callq *%rax
movss 0x30(%rsp), %xmm5
movb 0xe(%rsp), %sil
movq 0x28(%rsp), %rdx
movq 0x68(%rsp), %r11
movq 0x70(%rsp), %r10
movq 0x78(%rsp), %r9
movq 0x80(%rsp), %r8
movq 0x88(%rsp), %rdi
movdqa 0xd0(%rsp), %xmm0
pcmpeqd 0x1d31783(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d31b87(%rip), %xmm1 # 0x1eebe20
movq 0x158(%rsp), %rax
movaps 0x1d31758(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1ba2ca
pcmpeqd 0x1d3174e(%rip), %xmm1 # 0x1eeba10
pxor 0x1d31b56(%rip), %xmm1 # 0x1eebe20
ptest 0x1d3284d(%rip), %xmm1 # 0x1eecb20
movq 0x10(%rsp), %rcx
movq 0x50(%rsp), %rax
jne 0x1ba301
movss %xmm5, 0x80(%r14,%r15,4)
btcq %rax, %rcx
bsfq %rcx, %rax
testq %rcx, %rcx
sete 0x40(%rsp)
jne 0x1ba067
jmp 0x1ba30c
testb $0x1, 0x40(%rsp)
je 0x1ba3af
movq 0x268(%rsp), %rcx
leaq -0x1(%rcx), %rax
andq %rax, %rcx
movq %rcx, %rax
movaps 0x380(%rsp), %xmm12
movaps 0x370(%rsp), %xmm13
movaps 0x360(%rsp), %xmm14
movaps 0x350(%rsp), %xmm15
movaps 0x230(%rsp), %xmm8
movaps 0x220(%rsp), %xmm5
movaps 0x210(%rsp), %xmm9
movaps 0x200(%rsp), %xmm10
movaps 0x1f0(%rsp), %xmm11
movdqa 0x1e0(%rsp), %xmm7
movq 0x258(%rsp), %rcx
movq 0x250(%rsp), %r12
jne 0x1b8c62
movq 0x278(%rsp), %rax
incq %rax
cmpq %rcx, %rax
setb %sil
movaps 0x240(%rsp), %xmm6
jne 0x1b895e
jmp 0x1ba410
movaps 0x380(%rsp), %xmm12
movaps 0x370(%rsp), %xmm13
movaps 0x360(%rsp), %xmm14
movaps 0x350(%rsp), %xmm15
movaps 0x230(%rsp), %xmm8
movaps 0x220(%rsp), %xmm5
movaps 0x210(%rsp), %xmm9
movaps 0x200(%rsp), %xmm10
movaps 0x1f0(%rsp), %xmm11
movdqa 0x1e0(%rsp), %xmm7
movaps 0x240(%rsp), %xmm6
xorl %eax, %eax
testb $0x1, %sil
je 0x1ba427
movl $0xff800000, 0x80(%r14,%r15,4) # imm = 0xFF800000
pushq $0x1
popq %rax
testb $0x3, %al
movq 0x288(%rsp), %rcx
je 0x1b8781
leaq 0x510(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xcb8, %rsp # imm = 0xCB8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::BVHNIntersectorKHybrid<4, 4, 1, true, embree::sse42::SubGridIntersectorKPluecker<4, 4, true>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::sse42::SubGridQuadMIntersectorKPluecker<4, 4, true>&, embree::RayK<4>&, embree::sse42::TravRayK<4, true> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xc58, %rsp # imm = 0xC58
movq %r9, %r14
movq 0xc90(%rsp), %rax
leaq 0x4b8(%rsp), %rsi
movq %rsi, 0x20(%rsp)
movq %rdx, -0x8(%rsi)
movss (%rax,%rcx,4), %xmm8
movss 0x10(%rax,%rcx,4), %xmm9
movss 0x20(%rax,%rcx,4), %xmm10
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
movss 0x60(%rax,%rcx,4), %xmm11
movss 0x70(%rax,%rcx,4), %xmm12
movss 0x80(%rax,%rcx,4), %xmm13
movss 0x1d5f7f9(%rip), %xmm14 # 0x1f1ff10
movaps %xmm11, %xmm15
mulss %xmm14, %xmm15
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movaps %xmm12, %xmm7
mulss %xmm14, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulss %xmm13, %xmm14
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movss 0x1d5f7d0(%rip), %xmm0 # 0x1f1ff14
mulss %xmm0, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulss %xmm0, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulss %xmm0, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movslq 0x90(%rax,%rcx,4), %r13
movslq 0xa0(%rax,%rcx,4), %rdi
movslq 0xb0(%rax,%rcx,4), %r8
movq %r13, %r9
xorq $0x10, %r9
movq %rdi, %r10
xorq $0x10, %r10
movq %r8, %r11
xorq $0x10, %r11
movss 0xc0(%rax,%rcx,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movss 0xd0(%rax,%rcx,4), %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movq %r13, %rax
shrq $0x2, %rax
movq %rax, 0x218(%rsp)
movq %r9, %rax
shrq $0x2, %rax
movq %rax, 0x210(%rsp)
movq %rdi, %rax
shrq $0x2, %rax
movq %rax, 0x208(%rsp)
movq %r10, %rax
shrq $0x2, %rax
movq %rax, 0x200(%rsp)
movq %r8, %rax
shrq $0x2, %rax
movq %rax, 0x1f8(%rsp)
pushq $0x1
popq %rax
movq %rcx, 0x8(%rsp)
shll %cl, %eax
movq %r11, %rcx
shrq $0x2, %rcx
movq %rcx, 0x1f0(%rsp)
cltq
shlq $0x4, %rax
addq 0x1f63f16(%rip), %rax # 0x2124730
movq %rax, 0x148(%rsp)
movq 0xc98(%rsp), %rdx
movaps %xmm8, 0x360(%rsp)
movaps %xmm9, 0x350(%rsp)
movaps %xmm10, 0x340(%rsp)
movaps %xmm11, 0x330(%rsp)
movaps %xmm12, 0x320(%rsp)
movaps %xmm13, 0x310(%rsp)
movaps %xmm14, 0x300(%rsp)
movaps %xmm15, 0x2f0(%rsp)
movaps %xmm7, 0x1c0(%rsp)
movq %rdi, 0x48(%rsp)
movq %r8, 0x40(%rsp)
movq %r9, 0x38(%rsp)
movq %r10, 0x30(%rsp)
movq %r11, 0x28(%rsp)
movaps %xmm6, 0x1b0(%rsp)
movaps %xmm4, 0x1a0(%rsp)
movq %r14, 0x1e0(%rsp)
movq 0x20(%rsp), %rcx
leaq 0x4b0(%rsp), %rax
cmpq %rax, %rcx
je 0x1c2108
leaq -0x8(%rcx), %rax
movq %rax, 0x20(%rsp)
movq %rcx, 0x238(%rsp)
movq -0x8(%rcx), %rbx
testb $0x8, %bl
jne 0x1c0947
movaps 0x20(%rbx,%r13), %xmm0
subps %xmm8, %xmm0
mulps %xmm15, %xmm0
movaps 0x20(%rbx,%rdi), %xmm1
subps %xmm9, %xmm1
mulps %xmm7, %xmm1
maxps %xmm1, %xmm0
movaps 0x20(%rbx,%r8), %xmm1
subps %xmm10, %xmm1
mulps %xmm14, %xmm1
movaps 0x20(%rbx,%r9), %xmm2
subps %xmm8, %xmm2
mulps %xmm11, %xmm2
movaps 0x20(%rbx,%r10), %xmm3
subps %xmm9, %xmm3
mulps %xmm12, %xmm3
minps %xmm3, %xmm2
movaps 0x20(%rbx,%r11), %xmm3
subps %xmm10, %xmm3
mulps %xmm13, %xmm3
maxps %xmm6, %xmm1
maxps %xmm1, %xmm0
minps %xmm4, %xmm3
minps %xmm3, %xmm2
cmpleps %xmm2, %xmm0
movmskps %xmm0, %r12d
testb $0x8, %bl
jne 0x1c097d
testq %r12, %r12
je 0x1c0981
andq $-0x10, %rbx
bsfq %r12, %rcx
leaq -0x1(%r12), %rsi
xorl %eax, %eax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %r12, %rsi
jne 0x1c0986
movq %rcx, %rbx
testl %eax, %eax
je 0x1c08d6
jmp 0x1c09d7
pushq $0x6
jmp 0x1c0983
pushq $0x4
popq %rax
jmp 0x1c0973
movq 0x20(%rsp), %rdx
movq %rcx, (%rdx)
addq $0x8, %rdx
movq %rdx, 0x20(%rsp)
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rcx
movq (%rbx,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
andq %rsi, %rcx
je 0x1c09ca
movq 0x20(%rsp), %rsi
movq %rdx, (%rsi)
addq $0x8, %rsi
movq %rsi, 0x20(%rsp)
bsfq %rcx, %rdx
leaq -0x1(%rcx), %rsi
jmp 0x1c099f
movq %rdx, %rbx
movq 0xc98(%rsp), %rdx
jmp 0x1c0973
cmpl $0x6, %eax
jne 0x1c20f8
movl %ebx, %esi
andl $0xf, %esi
addq $-0x8, %rsi
setne %cl
je 0x1c20dd
andq $-0x10, %rbx
xorl %eax, %eax
movb %cl, 0x7(%rsp)
movq %rax, 0x220(%rsp)
imulq $0x58, %rax, %rax
movq %rdx, %rbp
leaq (%rbx,%rax), %rdx
movq 0x20(%rbx,%rax), %xmm1
movq 0x24(%rbx,%rax), %xmm0
pminub %xmm1, %xmm0
pcmpeqb %xmm1, %xmm0
movss 0x38(%rbx,%rax), %xmm3
movss 0x3c(%rbx,%rax), %xmm4
movss 0x44(%rbx,%rax), %xmm5
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movq 0x218(%rsp), %rcx
movq 0x20(%rcx,%rdx), %xmm1
pmovzxbd %xmm1, %xmm1 # xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
cvtdq2ps %xmm1, %xmm1
mulps %xmm5, %xmm1
addps %xmm3, %xmm1
movq 0x210(%rsp), %rcx
movq 0x20(%rcx,%rdx), %xmm2
pmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
cvtdq2ps %xmm2, %xmm2
mulps %xmm5, %xmm2
addps %xmm3, %xmm2
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movss 0x48(%rbx,%rax), %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
movq 0x208(%rsp), %rcx
movq 0x20(%rcx,%rdx), %xmm3
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm6
mulps %xmm5, %xmm6
addps %xmm4, %xmm6
movq 0x200(%rsp), %rcx
movq 0x20(%rcx,%rdx), %xmm3
pmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
cvtdq2ps %xmm3, %xmm3
mulps %xmm5, %xmm3
addps %xmm4, %xmm3
movss 0x4c(%rbx,%rax), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movq 0x1f8(%rsp), %rcx
movq 0x20(%rcx,%rdx), %xmm4
pmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
cvtdq2ps %xmm4, %xmm4
mulps %xmm7, %xmm4
movq 0x1f0(%rsp), %rcx
movq %rdx, 0x230(%rsp)
movq 0x20(%rcx,%rdx), %xmm5
movq %rbp, %rdx
pmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
cvtdq2ps %xmm5, %xmm5
mulps %xmm7, %xmm5
movss 0x40(%rbx,%rax), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
addps %xmm7, %xmm4
addps %xmm7, %xmm5
movaps 0x1c0(%rsp), %xmm7
subps %xmm8, %xmm1
mulps %xmm15, %xmm1
subps %xmm9, %xmm6
mulps %xmm7, %xmm6
maxps %xmm6, %xmm1
movaps 0x1b0(%rsp), %xmm6
subps %xmm8, %xmm2
mulps %xmm11, %xmm2
subps %xmm9, %xmm3
mulps %xmm12, %xmm3
minps %xmm3, %xmm2
subps %xmm10, %xmm4
mulps %xmm14, %xmm4
maxps %xmm6, %xmm4
maxps %xmm4, %xmm1
movaps 0x1a0(%rsp), %xmm4
subps %xmm10, %xmm5
mulps %xmm13, %xmm5
minps %xmm4, %xmm5
minps %xmm5, %xmm2
cmpleps %xmm2, %xmm1
pxor 0x1d2b2a9(%rip), %xmm0 # 0x1eebe20
pmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
pandn %xmm1, %xmm0
pslld $0x1f, %xmm0
movmskps %xmm0, %eax
testl %eax, %eax
je 0x1c2060
movzbl %al, %ecx
movq %rsi, 0x1d8(%rsp)
movq %rcx, 0x1e8(%rsp)
bsfq %rcx, %rax
movq 0x230(%rsp), %rcx
movzwl (%rcx,%rax,8), %r9d
movzwl 0x2(%rcx,%rax,8), %r8d
movl 0x50(%rcx), %ebp
movl 0x4(%rcx,%rax,8), %esi
movq %rsi, 0x10(%rsp)
movq (%rdx), %rax
movq %rax, 0x228(%rsp)
movq 0x1e8(%rax), %rax
movq (%rax,%rbp,8), %rax
movq 0x58(%rax), %r15
imulq 0x68(%rax), %rsi
movq 0x90(%rax), %rdi
movl %r9d, %edx
movl $0x7fff, %ecx # imm = 0x7FFF
andl %ecx, %edx
movl %edx, 0xa0(%rsp)
movl (%r15,%rsi), %r10d
addl %edx, %r10d
movl %r8d, %edx
andl %ecx, %edx
movl %edx, 0x90(%rsp)
movl 0x4(%r15,%rsi), %ecx
movl %ecx, %r11d
imull %edx, %r11d
addl %r10d, %r11d
leaq 0x1(%r11), %r14
movq 0xa0(%rax), %r10
movq %r10, %rax
imulq %r11, %rax
movups (%rdi,%rax), %xmm14
imulq %r10, %r14
movups (%rdi,%r14), %xmm7
leaq (%r11,%rcx), %rax
movq %rax, %r14
imulq %r10, %r14
movups (%rdi,%r14), %xmm8
leaq (%r11,%rcx), %r14
incq %r14
movq %r13, %rdx
movq %r14, %r13
imulq %r10, %r13
movups (%rdi,%r13), %xmm4
xorl %r13d, %r13d
testw %r9w, %r9w
setns %r13b
leaq (%r11,%r13), %r9
incq %r9
movq 0x28(%rsp), %r11
imulq %r10, %r9
movups (%rdi,%r9), %xmm0
addq %r14, %r13
movq %r13, %r9
imulq %r10, %r9
movups (%rdi,%r9), %xmm9
movq 0x38(%rsp), %r9
testw %r8w, %r8w
movl $0x0, %r8d
cmovnsq %rcx, %r8
addq %r8, %rax
imulq %r10, %rax
movups (%rdi,%rax), %xmm1
addq %r8, %r14
imulq %r10, %r14
movups (%rdi,%r14), %xmm10
movq 0x1e0(%rsp), %r14
addq %r13, %r8
movq 0x8(%rsp), %rcx
movq %rdx, %r13
movq 0xc98(%rsp), %rdx
imulq %r10, %r8
movq 0x30(%rsp), %r10
movups (%rdi,%r8), %xmm2
movaps %xmm2, 0x420(%rsp)
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %rdi
movaps %xmm14, %xmm12
unpcklps %xmm4, %xmm12 # xmm12 = xmm12[0],xmm4[0],xmm12[1],xmm4[1]
unpckhps %xmm4, %xmm14 # xmm14 = xmm14[2],xmm4[2],xmm14[3],xmm4[3]
movaps %xmm7, %xmm2
unpcklps %xmm8, %xmm2 # xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
movaps %xmm7, %xmm3
unpckhps %xmm8, %xmm3 # xmm3 = xmm3[2],xmm8[2],xmm3[3],xmm8[3]
unpcklps %xmm3, %xmm14 # xmm14 = xmm14[0],xmm3[0],xmm14[1],xmm3[1]
movaps %xmm12, %xmm15
unpcklps %xmm2, %xmm15 # xmm15 = xmm15[0],xmm2[0],xmm15[1],xmm2[1]
unpckhps %xmm2, %xmm12 # xmm12 = xmm12[2],xmm2[2],xmm12[3],xmm2[3]
movaps %xmm7, %xmm6
unpcklps %xmm9, %xmm6 # xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
movaps %xmm9, 0x440(%rsp)
unpckhps %xmm9, %xmm7 # xmm7 = xmm7[2],xmm9[2],xmm7[3],xmm9[3]
movaps %xmm0, %xmm2
unpcklps %xmm4, %xmm2 # xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
unpckhps %xmm4, %xmm0 # xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
unpcklps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
movaps %xmm6, %xmm13
unpcklps %xmm2, %xmm13 # xmm13 = xmm13[0],xmm2[0],xmm13[1],xmm2[1]
unpckhps %xmm2, %xmm6 # xmm6 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
movaps %xmm8, %xmm3
unpcklps %xmm10, %xmm3 # xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
movaps %xmm10, 0x430(%rsp)
unpckhps %xmm10, %xmm8 # xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
movaps %xmm4, %xmm0
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
movaps %xmm4, 0x130(%rsp)
movaps %xmm4, %xmm2
unpckhps %xmm1, %xmm2 # xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
unpcklps %xmm2, %xmm8 # xmm8 = xmm8[0],xmm2[0],xmm8[1],xmm2[1]
movaps %xmm3, %xmm2
unpcklps %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
unpckhps %xmm0, %xmm3 # xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
leaq 0x6(%rsp), %rax
movq %rax, 0x270(%rsp)
movss (%r14,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
subps %xmm1, %xmm15
movaps %xmm13, 0x470(%rsp)
subps %xmm1, %xmm13
movaps %xmm13, %xmm0
movaps %xmm2, 0x450(%rsp)
subps %xmm1, %xmm2
movaps %xmm2, %xmm1
movss 0x10(%r14,%rcx,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
subps %xmm2, %xmm12
movaps %xmm6, 0x460(%rsp)
subps %xmm2, %xmm6
movaps %xmm3, 0x80(%rsp)
movaps %xmm3, %xmm13
subps %xmm2, %xmm13
movss 0x20(%r14,%rcx,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
subps %xmm3, %xmm14
movaps %xmm7, 0x480(%rsp)
movaps %xmm7, %xmm4
subps %xmm3, %xmm4
movaps %xmm8, 0x70(%rsp)
movaps %xmm8, %xmm2
subps %xmm3, %xmm2
movaps %xmm1, %xmm9
movaps %xmm1, 0x50(%rsp)
subps %xmm15, %xmm9
movaps %xmm13, %xmm11
subps %xmm12, %xmm11
movaps %xmm2, %xmm10
subps %xmm14, %xmm10
movaps %xmm13, %xmm3
addps %xmm12, %xmm3
movaps %xmm2, %xmm7
addps %xmm14, %xmm7
movaps %xmm9, %xmm8
mulps %xmm3, %xmm8
mulps %xmm10, %xmm3
movaps %xmm11, %xmm5
mulps %xmm7, %xmm5
subps %xmm3, %xmm5
movaps %xmm1, %xmm3
addps %xmm15, %xmm3
movaps %xmm9, 0x110(%rsp)
mulps %xmm9, %xmm7
movaps %xmm3, %xmm9
movaps %xmm10, 0x100(%rsp)
mulps %xmm10, %xmm9
subps %xmm7, %xmm9
movaps %xmm11, 0xf0(%rsp)
mulps %xmm11, %xmm3
subps %xmm3, %xmm8
movss 0x50(%r14,%rcx,4), %xmm3
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
movss 0x60(%r14,%rcx,4), %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm7, %xmm8
mulps %xmm3, %xmm9
addps %xmm8, %xmm9
movss 0x40(%r14,%rcx,4), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm1, 0x60(%rsp)
mulps %xmm1, %xmm5
addps %xmm9, %xmm5
movaps %xmm15, %xmm10
movaps %xmm0, %xmm1
subps %xmm0, %xmm10
movaps %xmm12, %xmm11
subps %xmm6, %xmm11
movaps %xmm14, %xmm0
subps %xmm4, %xmm0
movaps %xmm12, 0xd0(%rsp)
addps %xmm6, %xmm12
movaps %xmm14, 0x120(%rsp)
addps %xmm4, %xmm14
movaps %xmm10, %xmm8
mulps %xmm12, %xmm8
mulps %xmm0, %xmm12
movaps %xmm11, %xmm9
mulps %xmm14, %xmm9
subps %xmm12, %xmm9
movaps %xmm15, 0xe0(%rsp)
addps %xmm1, %xmm15
mulps %xmm10, %xmm14
movaps %xmm15, %xmm12
movaps %xmm0, 0x190(%rsp)
mulps %xmm0, %xmm12
subps %xmm14, %xmm12
mulps %xmm11, %xmm15
subps %xmm15, %xmm8
mulps %xmm7, %xmm8
mulps %xmm3, %xmm12
addps %xmm8, %xmm12
movaps 0x60(%rsp), %xmm15
mulps %xmm15, %xmm9
addps %xmm12, %xmm9
movaps %xmm1, %xmm0
movaps %xmm1, %xmm12
movaps 0x50(%rsp), %xmm1
subps %xmm1, %xmm12
addps %xmm0, %xmm1
movaps %xmm1, %xmm0
movaps %xmm6, %xmm8
subps %xmm13, %xmm8
addps %xmm6, %xmm13
movaps %xmm4, %xmm14
subps %xmm2, %xmm14
addps %xmm4, %xmm2
movaps %xmm12, %xmm6
mulps %xmm13, %xmm6
mulps %xmm14, %xmm13
movaps %xmm8, %xmm4
mulps %xmm2, %xmm4
subps %xmm13, %xmm4
mulps %xmm12, %xmm2
mulps %xmm14, %xmm1
subps %xmm2, %xmm1
mulps %xmm8, %xmm0
subps %xmm0, %xmm6
movaps %xmm7, 0xc0(%rsp)
mulps %xmm7, %xmm6
movaps %xmm3, 0x50(%rsp)
mulps %xmm3, %xmm1
addps %xmm6, %xmm1
mulps %xmm15, %xmm4
addps %xmm1, %xmm4
movaps %xmm5, %xmm1
addps %xmm9, %xmm1
addps %xmm4, %xmm1
movaps %xmm5, %xmm0
minps %xmm9, %xmm0
minps %xmm4, %xmm0
movaps %xmm5, 0x490(%rsp)
movaps %xmm5, %xmm6
movaps %xmm9, 0x4a0(%rsp)
maxps %xmm9, %xmm6
maxps %xmm4, %xmm6
movaps %xmm1, 0x180(%rsp)
andps 0x1d2b6b5(%rip), %xmm1 # 0x1eec6c0
movaps %xmm1, 0x410(%rsp)
mulps 0x1d30d56(%rip), %xmm1 # 0x1ef1d70
cmpleps %xmm1, %xmm6
xorps 0x1d2b6ab(%rip), %xmm1 # 0x1eec6d0
cmpnltps %xmm1, %xmm0
orps %xmm0, %xmm6
movmskps %xmm6, %eax
movss 0xa0(%rsp), %xmm0
movaps %xmm0, 0xa0(%rsp)
movss 0x90(%rsp), %xmm0
movaps %xmm0, 0x90(%rsp)
testl %eax, %eax
movss 0x10(%rsp), %xmm0
movaps %xmm0, 0x370(%rsp)
je 0x1c16d5
movaps %xmm15, %xmm7
movaps %xmm11, %xmm0
movaps 0x100(%rsp), %xmm5
mulps %xmm5, %xmm0
movaps %xmm10, %xmm1
movaps 0xf0(%rsp), %xmm9
mulps %xmm9, %xmm1
movaps %xmm8, %xmm13
movaps 0x190(%rsp), %xmm3
mulps %xmm3, %xmm13
movaps %xmm12, %xmm4
mulps %xmm11, %xmm4
mulps %xmm3, %xmm9
subps %xmm0, %xmm9
movaps 0x110(%rsp), %xmm15
movaps %xmm15, %xmm2
mulps %xmm11, %xmm15
mulps %xmm14, %xmm11
subps %xmm13, %xmm11
movaps %xmm8, 0x10(%rsp)
movaps 0x1d2b5ee(%rip), %xmm8 # 0x1eec6c0
andps %xmm8, %xmm0
andps %xmm8, %xmm13
cmpltps %xmm13, %xmm0
blendvps %xmm0, %xmm9, %xmm11
movaps %xmm10, %xmm0
mulps %xmm14, %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm12
mulps %xmm10, %xmm5
subps %xmm2, %xmm5
subps %xmm0, %xmm12
andps %xmm8, %xmm2
andps %xmm8, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm5, %xmm12
mulps 0x10(%rsp), %xmm10
subps %xmm1, %xmm15
subps %xmm4, %xmm10
andps %xmm8, %xmm1
andps %xmm8, %xmm4
cmpltps %xmm4, %xmm1
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm15, %xmm10
movaps 0xc0(%rsp), %xmm1
mulps %xmm10, %xmm1
movaps 0x50(%rsp), %xmm0
mulps %xmm12, %xmm0
addps %xmm1, %xmm0
mulps %xmm11, %xmm7
addps %xmm0, %xmm7
addps %xmm7, %xmm7
movaps 0x120(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xd0(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
movaps 0xe0(%rsp), %xmm3
mulps %xmm11, %xmm3
addps %xmm1, %xmm3
rcpps %xmm7, %xmm1
movaps %xmm7, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d2b87d(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movq 0x8(%rsp), %rax
movss 0x80(%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm6, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1c16d5
cmpneqps 0x1d2a82c(%rip), %xmm7 # 0x1eeba10
andps %xmm7, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1c16d5
movaps 0x490(%rsp), %xmm15
movaps %xmm15, 0x240(%rsp)
movaps 0x4a0(%rsp), %xmm8
movaps %xmm8, 0x250(%rsp)
movaps 0x180(%rsp), %xmm9
movaps %xmm9, 0x260(%rsp)
leaq 0x6(%rsp), %rax
movq %rax, 0x270(%rsp)
movaps %xmm1, 0x280(%rsp)
movaps %xmm0, 0x2b0(%rsp)
movaps %xmm11, 0x2c0(%rsp)
movaps %xmm12, 0x2d0(%rsp)
movaps %xmm10, 0x2e0(%rsp)
movdqa 0xa0(%rsp), %xmm0
pshufd $0x0, %xmm0, %xmm3 # xmm3 = xmm0[0,0,0,0]
pshufd $0x0, 0x90(%rsp), %xmm2 # xmm2 = mem[0,0,0,0]
movzwl 0x8(%r15,%rsi), %eax
decl %eax
cvtsi2ss %eax, %xmm1
paddd 0x1d5b704(%rip), %xmm3 # 0x1f1c990
paddd 0x1d5b70c(%rip), %xmm2 # 0x1f1c9a0
movaps %xmm1, %xmm4
rcpss %xmm1, %xmm4
mulss %xmm4, %xmm1
movss 0x1d2fd51(%rip), %xmm6 # 0x1ef0ff8
movaps %xmm6, %xmm0
subss %xmm1, %xmm0
movzwl 0xa(%r15,%rsi), %eax
decl %eax
cvtsi2ss %eax, %xmm5
mulss %xmm4, %xmm0
movaps %xmm5, %xmm4
rcpss %xmm5, %xmm4
mulss %xmm4, %xmm5
movaps %xmm6, %xmm1
subss %xmm5, %xmm1
mulss %xmm4, %xmm1
cvtdq2ps %xmm3, %xmm3
mulps %xmm9, %xmm3
addps %xmm15, %xmm3
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm3, %xmm0
movaps %xmm0, 0x240(%rsp)
cvtdq2ps %xmm2, %xmm2
mulps %xmm9, %xmm2
addps %xmm8, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm2, %xmm1
movaps %xmm1, 0x250(%rsp)
movq 0x228(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rbp,8), %rcx
movq 0x8(%rsp), %rax
movl 0x90(%r14,%rax,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c16d5
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c1347
cmpq $0x0, 0x48(%rcx)
je 0x1c2079
movaps 0x180(%rsp), %xmm5
rcpps %xmm5, %xmm2
mulps %xmm2, %xmm5
movaps 0x1d2b6b4(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm5, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
movaps 0x410(%rsp), %xmm2
cmpnltps 0x1d309c8(%rip), %xmm2 # 0x1ef1d40
andps %xmm3, %xmm2
mulps %xmm2, %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x290(%rsp)
mulps %xmm2, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x2a0(%rsp)
movaps 0x280(%rsp), %xmm0
movmskps %xmm0, %eax
movq %rax, 0x60(%rsp)
movq 0x60(%rsp), %rax
bsfq %rax, %rax
movq %rax, 0x10(%rsp)
cmpl $0x0, 0x60(%rsp)
sete 0xd0(%rsp)
je 0x1c16d5
movd %ebp, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x110(%rsp)
pshufd $0x0, 0x370(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
movdqa %xmm0, 0x100(%rsp)
movq 0x148(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0xf0(%rsp)
movq %rsi, 0xc0(%rsp)
movq %rcx, 0x50(%rsp)
movq 0x8(%rsp), %rax
movss 0x80(%r14,%rax,4), %xmm5
movq 0x10(%rsp), %rcx
movss 0x2b0(%rsp,%rcx,4), %xmm0
movss 0x290(%rsp,%rcx,4), %xmm1
movss 0x2a0(%rsp,%rcx,4), %xmm2
movq 0x8(%rsp), %rax
movss %xmm0, 0x80(%r14,%rax,4)
movq 0x8(%rdx), %rax
movq %rax, 0x120(%rsp)
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x2c0(%rsp,%rcx,4), %xmm0
movss 0x2d0(%rsp,%rcx,4), %xmm3
movss 0x2e0(%rsp,%rcx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x380(%rsp)
movaps %xmm3, 0x390(%rsp)
movaps %xmm4, 0x3a0(%rsp)
movaps %xmm1, 0x3b0(%rsp)
movaps %xmm2, 0x3c0(%rsp)
movaps 0x100(%rsp), %xmm0
movaps %xmm0, 0x3d0(%rsp)
movdqa 0x110(%rsp), %xmm0
movdqa %xmm0, 0x3e0(%rsp)
pcmpeqd %xmm0, %xmm0
leaq 0x3f0(%rsp), %rcx
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movq 0x120(%rsp), %rax
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3f0(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movdqa 0xf0(%rsp), %xmm0
movdqa %xmm0, 0xb0(%rsp)
leaq 0xb0(%rsp), %rcx
movq %rcx, 0x150(%rsp)
movq 0x50(%rsp), %rcx
movq 0x18(%rcx), %rcx
movq %rcx, 0x158(%rsp)
movq %rax, 0x160(%rsp)
movq %r14, 0x168(%rsp)
leaq 0x380(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x4, 0x178(%rsp)
movq 0x50(%rsp), %rax
movq 0x48(%rax), %rax
testq %rax, %rax
movss %xmm5, 0xe0(%rsp)
je 0x1c15c3
leaq 0x150(%rsp), %rdi
callq *%rax
movss 0xe0(%rsp), %xmm5
movq 0xc0(%rsp), %rsi
movq 0xc98(%rsp), %rdx
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
movq 0x38(%rsp), %r9
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %rdi
movdqa 0xb0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1c166e
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1630
testb $0x2, (%rcx)
jne 0x1c15f4
movq 0x50(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1630
leaq 0x150(%rsp), %rdi
callq *%rax
movss 0xe0(%rsp), %xmm5
movq 0xc0(%rsp), %rsi
movq 0xc98(%rsp), %rdx
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
movq 0x38(%rsp), %r9
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %rdi
movdqa 0xb0(%rsp), %xmm0
pcmpeqd 0x1d2a3cf(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d2a7d3(%rip), %xmm1 # 0x1eebe20
movq 0x168(%rsp), %rax
movaps 0x1d2a3a4(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1c167e
pcmpeqd 0x1d2a39a(%rip), %xmm1 # 0x1eeba10
pxor 0x1d2a7a2(%rip), %xmm1 # 0x1eebe20
ptest 0x1d2b499(%rip), %xmm1 # 0x1eecb20
movq 0x10(%rsp), %rcx
jne 0x1c16c7
movq 0x8(%rsp), %rax
movss %xmm5, 0x80(%r14,%rax,4)
movq 0x60(%rsp), %rax
btcq %rcx, %rax
bsfq %rax, %rcx
movq %rcx, 0x10(%rsp)
movq %rax, 0x60(%rsp)
testq %rax, %rax
sete 0xd0(%rsp)
jne 0x1c140d
jmp 0x1c16d5
testb $0x1, 0xd0(%rsp)
je 0x1c2079
movaps 0x130(%rsp), %xmm3
movaps %xmm3, %xmm8
movaps 0x420(%rsp), %xmm0
unpcklps %xmm0, %xmm8 # xmm8 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
unpckhps %xmm0, %xmm3 # xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
movaps 0x440(%rsp), %xmm1
movaps %xmm1, %xmm0
movaps 0x430(%rsp), %xmm2
unpcklps %xmm2, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
unpckhps %xmm2, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
unpcklps %xmm1, %xmm3 # xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
movaps %xmm8, %xmm12
unpcklps %xmm0, %xmm12 # xmm12 = xmm12[0],xmm0[0],xmm12[1],xmm0[1]
unpckhps %xmm0, %xmm8 # xmm8 = xmm8[2],xmm0[2],xmm8[3],xmm0[3]
movq 0x8(%rsp), %rax
movss (%r14,%rax,4), %xmm1
movss 0x10(%r14,%rax,4), %xmm2
movss 0x20(%r14,%rax,4), %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm1, %xmm12
subps %xmm2, %xmm8
subps %xmm0, %xmm3
movaps 0x450(%rsp), %xmm4
subps %xmm1, %xmm4
movaps 0x80(%rsp), %xmm10
subps %xmm2, %xmm10
movaps 0x70(%rsp), %xmm5
subps %xmm0, %xmm5
movaps %xmm5, 0x70(%rsp)
movaps 0x470(%rsp), %xmm15
subps %xmm1, %xmm15
movaps 0x460(%rsp), %xmm7
subps %xmm2, %xmm7
movaps 0x480(%rsp), %xmm14
subps %xmm0, %xmm14
movaps %xmm15, %xmm6
subps %xmm12, %xmm6
movaps %xmm7, %xmm13
subps %xmm8, %xmm13
movaps %xmm14, %xmm9
subps %xmm3, %xmm9
movaps %xmm15, %xmm0
addps %xmm12, %xmm0
movaps %xmm7, %xmm1
addps %xmm8, %xmm1
movaps %xmm14, %xmm2
addps %xmm3, %xmm2
movaps %xmm6, %xmm5
mulps %xmm1, %xmm5
mulps %xmm9, %xmm1
movaps %xmm13, %xmm11
mulps %xmm2, %xmm11
subps %xmm1, %xmm11
movaps %xmm6, 0xe0(%rsp)
mulps %xmm6, %xmm2
movaps %xmm0, %xmm1
movaps %xmm9, 0xd0(%rsp)
mulps %xmm9, %xmm1
subps %xmm2, %xmm1
movss 0x50(%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm2, 0x10(%rsp)
movaps %xmm13, 0x110(%rsp)
mulps %xmm13, %xmm0
subps %xmm0, %xmm5
movss 0x60(%r14,%rax,4), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm6, %xmm5
mulps %xmm2, %xmm1
addps %xmm5, %xmm1
movss 0x40(%r14,%rax,4), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm0, 0x50(%rsp)
mulps %xmm0, %xmm11
addps %xmm1, %xmm11
movaps %xmm12, %xmm13
subps %xmm4, %xmm13
movaps %xmm8, %xmm9
movaps %xmm10, %xmm1
movaps %xmm10, 0x80(%rsp)
subps %xmm10, %xmm9
movaps %xmm3, %xmm10
movaps 0x70(%rsp), %xmm2
subps %xmm2, %xmm10
movaps %xmm8, 0x120(%rsp)
movaps %xmm8, %xmm0
addps %xmm1, %xmm0
movaps %xmm3, 0x130(%rsp)
addps %xmm2, %xmm3
movaps %xmm13, %xmm2
mulps %xmm0, %xmm2
mulps %xmm10, %xmm0
movaps %xmm9, %xmm8
mulps %xmm3, %xmm8
subps %xmm0, %xmm8
movaps %xmm12, 0x60(%rsp)
movaps %xmm12, %xmm0
addps %xmm4, %xmm0
mulps %xmm13, %xmm3
movaps %xmm0, %xmm5
movaps %xmm10, 0x100(%rsp)
mulps %xmm10, %xmm5
subps %xmm3, %xmm5
mulps %xmm9, %xmm0
subps %xmm0, %xmm2
mulps %xmm6, %xmm2
mulps 0x10(%rsp), %xmm5
addps %xmm2, %xmm5
movaps 0x50(%rsp), %xmm3
mulps %xmm3, %xmm8
addps %xmm5, %xmm8
movaps %xmm4, %xmm12
subps %xmm15, %xmm12
addps %xmm4, %xmm15
movaps %xmm8, %xmm4
movaps 0x80(%rsp), %xmm0
movaps %xmm0, %xmm10
subps %xmm7, %xmm10
addps %xmm0, %xmm7
movaps 0x70(%rsp), %xmm0
movaps %xmm0, %xmm8
subps %xmm14, %xmm8
addps %xmm0, %xmm14
movaps %xmm12, %xmm1
mulps %xmm7, %xmm1
mulps %xmm8, %xmm7
movaps %xmm10, %xmm0
mulps %xmm14, %xmm0
subps %xmm7, %xmm0
mulps %xmm12, %xmm14
movaps %xmm15, %xmm2
mulps %xmm8, %xmm2
subps %xmm14, %xmm2
mulps %xmm10, %xmm15
subps %xmm15, %xmm1
movaps %xmm6, 0x70(%rsp)
mulps %xmm6, %xmm1
mulps 0x10(%rsp), %xmm2
addps %xmm1, %xmm2
mulps %xmm3, %xmm0
addps %xmm2, %xmm0
movaps %xmm11, %xmm2
addps %xmm4, %xmm2
addps %xmm0, %xmm2
movaps %xmm11, %xmm1
minps %xmm4, %xmm1
minps %xmm0, %xmm1
movaps %xmm11, 0xc0(%rsp)
movaps %xmm11, %xmm5
movaps %xmm4, 0xf0(%rsp)
maxps %xmm4, %xmm5
maxps %xmm0, %xmm5
movaps %xmm2, 0x190(%rsp)
movaps %xmm2, %xmm0
andps 0x1d2ad28(%rip), %xmm0 # 0x1eec6c0
movaps %xmm0, 0x180(%rsp)
mulps 0x1d303c9(%rip), %xmm0 # 0x1ef1d70
cmpleps %xmm0, %xmm5
xorps 0x1d2ad1e(%rip), %xmm0 # 0x1eec6d0
cmpnltps %xmm0, %xmm1
orps %xmm1, %xmm5
movmskps %xmm5, %eax
testl %eax, %eax
je 0x1c1fe3
movaps %xmm9, %xmm6
movaps %xmm3, %xmm9
movaps %xmm6, %xmm0
movaps 0xd0(%rsp), %xmm4
mulps %xmm4, %xmm0
movaps %xmm13, %xmm1
movaps 0x110(%rsp), %xmm11
mulps %xmm11, %xmm1
movaps %xmm10, %xmm14
movaps 0x100(%rsp), %xmm3
mulps %xmm3, %xmm14
movaps %xmm12, %xmm15
mulps %xmm6, %xmm15
mulps %xmm3, %xmm11
subps %xmm0, %xmm11
movaps 0xe0(%rsp), %xmm7
movaps %xmm7, %xmm2
mulps %xmm6, %xmm7
mulps %xmm8, %xmm6
subps %xmm14, %xmm6
movaps %xmm10, 0x80(%rsp)
movaps 0x1d2ac8e(%rip), %xmm10 # 0x1eec6c0
andps %xmm10, %xmm0
andps %xmm10, %xmm14
cmpltps %xmm14, %xmm0
blendvps %xmm0, %xmm11, %xmm6
movaps %xmm13, %xmm0
mulps %xmm8, %xmm0
mulps %xmm3, %xmm2
mulps %xmm3, %xmm12
mulps %xmm13, %xmm4
subps %xmm2, %xmm4
subps %xmm0, %xmm12
andps %xmm10, %xmm2
andps %xmm10, %xmm0
cmpltps %xmm0, %xmm2
movaps %xmm2, %xmm0
blendvps %xmm0, %xmm4, %xmm12
mulps 0x80(%rsp), %xmm13
subps %xmm1, %xmm7
subps %xmm15, %xmm13
andps %xmm10, %xmm1
andps %xmm10, %xmm15
cmpltps %xmm15, %xmm1
movdqa 0xa0(%rsp), %xmm14
movdqa 0x370(%rsp), %xmm10
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm7, %xmm13
movaps 0x70(%rsp), %xmm1
mulps %xmm13, %xmm1
movaps 0x10(%rsp), %xmm0
mulps %xmm12, %xmm0
addps %xmm1, %xmm0
mulps %xmm6, %xmm9
addps %xmm0, %xmm9
addps %xmm9, %xmm9
movaps 0x130(%rsp), %xmm0
mulps %xmm13, %xmm0
movaps 0x120(%rsp), %xmm1
mulps %xmm12, %xmm1
addps %xmm0, %xmm1
movaps 0x60(%rsp), %xmm3
mulps %xmm6, %xmm3
addps %xmm1, %xmm3
rcpps %xmm9, %xmm1
movaps %xmm9, %xmm2
mulps %xmm1, %xmm2
movaps 0x1d2af09(%rip), %xmm0 # 0x1eeca10
subps %xmm2, %xmm0
addps %xmm3, %xmm3
mulps %xmm1, %xmm0
addps %xmm1, %xmm0
mulps %xmm3, %xmm0
movq 0x8(%rsp), %rax
movss 0x80(%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movaps %xmm0, %xmm1
cmpleps %xmm2, %xmm1
movss 0x30(%r14,%rax,4), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
cmpleps %xmm0, %xmm2
andps %xmm2, %xmm1
andps %xmm5, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1c1fe3
cmpneqps 0x1d29eb7(%rip), %xmm9 # 0x1eeba10
andps %xmm9, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0x1c1fe3
movaps 0x190(%rsp), %xmm9
movaps %xmm9, 0x260(%rsp)
leaq 0x6(%rsp), %rax
movq %rax, 0x270(%rsp)
movaps %xmm1, 0x280(%rsp)
movaps %xmm0, 0x2b0(%rsp)
movaps %xmm6, 0x2c0(%rsp)
movaps %xmm12, 0x2d0(%rsp)
movaps %xmm13, 0x2e0(%rsp)
movaps %xmm9, %xmm3
subps 0xc0(%rsp), %xmm3
movaps %xmm3, 0x240(%rsp)
movaps %xmm9, %xmm2
subps 0xf0(%rsp), %xmm2
movaps %xmm2, 0x250(%rsp)
pshufd $0x0, %xmm14, %xmm5 # xmm5 = xmm14[0,0,0,0]
pshufd $0x0, 0x90(%rsp), %xmm4 # xmm4 = mem[0,0,0,0]
paddd 0x1d5ad9f(%rip), %xmm5 # 0x1f1c990
movzwl 0x8(%r15,%rsi), %eax
decl %eax
cvtsi2ss %eax, %xmm1
paddd 0x1d5ad9b(%rip), %xmm4 # 0x1f1c9a0
movaps %xmm1, %xmm6
rcpss %xmm1, %xmm6
mulss %xmm6, %xmm1
movss 0x1d2f3df(%rip), %xmm8 # 0x1ef0ff8
movaps %xmm8, %xmm0
subss %xmm1, %xmm0
mulss %xmm6, %xmm0
movzwl 0xa(%r15,%rsi), %eax
decl %eax
cvtsi2ss %eax, %xmm6
movaps %xmm6, %xmm7
rcpss %xmm6, %xmm7
mulss %xmm7, %xmm6
movaps %xmm8, %xmm1
subss %xmm6, %xmm1
mulss %xmm7, %xmm1
cvtdq2ps %xmm5, %xmm5
mulps %xmm9, %xmm5
addps %xmm3, %xmm5
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm5, %xmm0
movaps %xmm0, 0x240(%rsp)
cvtdq2ps %xmm4, %xmm3
mulps %xmm9, %xmm3
addps %xmm2, %xmm3
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm3, %xmm1
movaps %xmm1, 0x250(%rsp)
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rbp,8), %r15
movq 0x8(%rsp), %rax
movl 0x90(%r14,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c1fe3
movaps %xmm9, %xmm8
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c1cb9
cmpq $0x0, 0x48(%r15)
je 0x1c2079
rcpps %xmm8, %xmm2
mulps %xmm2, %xmm8
movaps 0x1d2ad48(%rip), %xmm4 # 0x1eeca10
movaps %xmm4, %xmm3
subps %xmm8, %xmm3
mulps %xmm2, %xmm3
addps %xmm2, %xmm3
movaps 0x180(%rsp), %xmm2
cmpnltps 0x1d3005b(%rip), %xmm2 # 0x1ef1d40
andps %xmm3, %xmm2
mulps %xmm2, %xmm0
minps %xmm4, %xmm0
movaps %xmm0, 0x290(%rsp)
mulps %xmm2, %xmm1
minps %xmm4, %xmm1
movaps %xmm1, 0x2a0(%rsp)
movaps 0x280(%rsp), %xmm0
movmskps %xmm0, %esi
bsfq %rsi, %rcx
testl %esi, %esi
sete 0x90(%rsp)
je 0x1c1fe3
movd %ebp, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x80(%rsp)
pshufd $0x0, %xmm10, %xmm0 # xmm0 = xmm10[0,0,0,0]
movdqa %xmm0, 0x130(%rsp)
movq 0x148(%rsp), %rax
movaps (%rax), %xmm0
movaps %xmm0, 0x10(%rsp)
movq 0x8(%rsp), %rax
movss 0x80(%r14,%rax,4), %xmm5
movss 0x2b0(%rsp,%rcx,4), %xmm0
movss 0x290(%rsp,%rcx,4), %xmm1
movss 0x2a0(%rsp,%rcx,4), %xmm2
movss %xmm0, 0x80(%r14,%rax,4)
movq 0x8(%rdx), %rax
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
movss 0x2c0(%rsp,%rcx,4), %xmm0
movss 0x2d0(%rsp,%rcx,4), %xmm3
movq %rcx, 0x70(%rsp)
movss 0x2e0(%rsp,%rcx,4), %xmm4
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps %xmm0, 0x380(%rsp)
movaps %xmm3, 0x390(%rsp)
movaps %xmm4, 0x3a0(%rsp)
movaps %xmm1, 0x3b0(%rsp)
movaps %xmm2, 0x3c0(%rsp)
movaps 0x130(%rsp), %xmm0
movaps %xmm0, 0x3d0(%rsp)
movdqa 0x80(%rsp), %xmm0
movdqa %xmm0, 0x3e0(%rsp)
pcmpeqd %xmm0, %xmm0
leaq 0x3f0(%rsp), %rcx
movdqa %xmm0, 0x10(%rcx)
movdqa %xmm0, (%rcx)
movd (%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x3f0(%rsp)
movd 0x4(%rax), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movdqa %xmm0, 0x400(%rsp)
movdqa 0x10(%rsp), %xmm0
movdqa %xmm0, 0xb0(%rsp)
leaq 0xb0(%rsp), %rcx
movq %rcx, 0x150(%rsp)
movq 0x18(%r15), %rcx
movq %rcx, 0x158(%rsp)
movq %rax, 0x160(%rsp)
movq %r14, 0x168(%rsp)
leaq 0x380(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x4, 0x178(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
movss %xmm5, 0xa0(%rsp)
je 0x1c1ee6
leaq 0x150(%rsp), %rdi
movq %rsi, %rbp
callq *%rax
movss 0xa0(%rsp), %xmm5
movq %rbp, %rsi
movq 0xc98(%rsp), %rdx
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
movq 0x38(%rsp), %r9
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %rdi
movdqa 0xb0(%rsp), %xmm1
ptest %xmm1, %xmm1
je 0x1c1f8b
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1f4d
testb $0x2, (%rcx)
jne 0x1c1f13
testb $0x40, 0x3e(%r15)
je 0x1c1f4d
leaq 0x150(%rsp), %rdi
movq %rsi, %rbp
callq *%rax
movss 0xa0(%rsp), %xmm5
movq %rbp, %rsi
movq 0xc98(%rsp), %rdx
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
movq 0x38(%rsp), %r9
movq 0x40(%rsp), %r8
movq 0x48(%rsp), %rdi
movdqa 0xb0(%rsp), %xmm0
pcmpeqd 0x1d29ab2(%rip), %xmm0 # 0x1eeba10
movdqa %xmm0, %xmm1
pxor 0x1d29eb6(%rip), %xmm1 # 0x1eebe20
movq 0x168(%rsp), %rax
movaps 0x1d29a87(%rip), %xmm2 # 0x1eeba00
blendvps %xmm0, 0x80(%rax), %xmm2
movaps %xmm2, 0x80(%rax)
jmp 0x1c1f9b
pcmpeqd 0x1d29a7d(%rip), %xmm1 # 0x1eeba10
pxor 0x1d29e85(%rip), %xmm1 # 0x1eebe20
ptest 0x1d2ab7c(%rip), %xmm1 # 0x1eecb20
movq 0x70(%rsp), %rcx
jne 0x1c1fd5
movq 0x8(%rsp), %rax
movss %xmm5, 0x80(%r14,%rax,4)
btcq %rcx, %rsi
bsfq %rsi, %rcx
testq %rsi, %rsi
sete 0x90(%rsp)
jne 0x1c1d54
jmp 0x1c1fe3
testb $0x1, 0x90(%rsp)
je 0x1c2079
movq 0x1e8(%rsp), %rcx
leaq -0x1(%rcx), %rax
andq %rax, %rcx
movaps 0x360(%rsp), %xmm8
movaps 0x350(%rsp), %xmm9
movaps 0x340(%rsp), %xmm10
movaps 0x330(%rsp), %xmm11
movaps 0x320(%rsp), %xmm12
movaps 0x310(%rsp), %xmm13
movaps 0x300(%rsp), %xmm14
movaps 0x2f0(%rsp), %xmm15
movaps 0x1c0(%rsp), %xmm7
movaps 0x1b0(%rsp), %xmm6
movaps 0x1a0(%rsp), %xmm4
movq 0x1d8(%rsp), %rsi
jne 0x1c0b9b
movq 0x220(%rsp), %rax
incq %rax
cmpq %rsi, %rax
setb %cl
jne 0x1c09f8
jmp 0x1c20dd
movaps 0x360(%rsp), %xmm8
movaps 0x350(%rsp), %xmm9
movaps 0x340(%rsp), %xmm10
movaps 0x330(%rsp), %xmm11
movaps 0x320(%rsp), %xmm12
movaps 0x310(%rsp), %xmm13
movaps 0x300(%rsp), %xmm14
movaps 0x2f0(%rsp), %xmm15
movaps 0x1c0(%rsp), %xmm7
movaps 0x1b0(%rsp), %xmm6
movaps 0x1a0(%rsp), %xmm4
movb 0x7(%rsp), %cl
xorl %eax, %eax
testb $0x1, %cl
je 0x1c20f8
movq 0x8(%rsp), %rax
movl $0xff800000, 0x80(%r14,%rax,4) # imm = 0xFF800000
pushq $0x1
popq %rax
testb $0x3, %al
movq 0x238(%rsp), %rcx
je 0x1c08ab
leaq 0x4b0(%rsp), %rax
cmpq %rax, %rcx
setne %al
addq $0xc58, %rsp # imm = 0xC58
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::sse42::InstanceIntersector1MB::occluded(embree::sse42::InstanceIntersector1MB::Precalculations const&, embree::RayK<1>&, embree::RayQueryContext*, embree::InstancePrimitive const&)
|
bool InstanceIntersector1MB::occluded(const Precalculations& pre, Ray& ray, RayQueryContext* context, const InstancePrimitive& prim)
{
const Instance* instance = prim.instance;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return false;
#endif
RTCRayQueryContext* user_context = context->user;
bool occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, 0)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(ray.time());
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)instance->object, user_context, context->args);
instance->object->intersectors.occluded((RTCRay&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq (%rcx), %r12
movl 0x34(%r12), %eax
testl %eax, 0x24(%rsi)
je 0x1c704d
movq %rdx, %r14
movq 0x8(%rdx), %r15
cmpl $-0x1, (%r15)
jne 0x1c704d
movq %rsi, %rbx
movl 0x8(%rcx), %eax
movl %eax, (%r15)
andl $0x0, 0x4(%r15)
cmpl $0x1, 0x24(%r12)
jne 0x1c7054
movaps 0x70(%r12), %xmm5
movaps 0x80(%r12), %xmm4
movaps 0x90(%r12), %xmm3
movaps 0xa0(%r12), %xmm0
jmp 0x1c71c5
xorl %eax, %eax
jmp 0x1c7283
movss 0x1c(%rbx), %xmm4
movss 0x28(%r12), %xmm0
movss 0x2c(%r12), %xmm1
movss 0x30(%r12), %xmm2
subss %xmm1, %xmm4
subss %xmm1, %xmm2
divss %xmm2, %xmm4
mulss %xmm0, %xmm4
roundss $0x9, %xmm4, %xmm1
addss 0x1d29940(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm1, %xmm0
subss %xmm0, %xmm4
cvttss2si %xmm0, %r13d
movzbl 0x3d(%r12), %eax
shll $0x8, %eax
movq 0x60(%r12), %rcx
movq %r13, %rbp
shlq $0x6, %rbp
addq %rcx, %rbp
incl %r13d
shlq $0x6, %r13
addq %rcx, %r13
cmpl $0x100, %eax # imm = 0x100
je 0x1c7295
movss 0x1d2563f(%rip), %xmm1 # 0x1eec714
subss %xmm4, %xmm1
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
movaps (%r13), %xmm2
mulps %xmm4, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps (%rbp), %xmm0
mulps %xmm1, %xmm0
addps %xmm2, %xmm0
movaps 0x10(%r13), %xmm2
mulps %xmm4, %xmm2
movaps 0x10(%rbp), %xmm8
mulps %xmm1, %xmm8
addps %xmm2, %xmm8
movaps 0x20(%r13), %xmm3
mulps %xmm4, %xmm3
movaps 0x20(%rbp), %xmm2
mulps %xmm1, %xmm2
mulps 0x30(%r13), %xmm4
addps %xmm3, %xmm2
mulps 0x30(%rbp), %xmm1
addps %xmm4, %xmm1
movaps %xmm2, %xmm5
shufps $0xc9, %xmm2, %xmm5 # xmm5 = xmm5[1,2],xmm2[0,3]
movaps %xmm8, %xmm4
shufps $0xc9, %xmm8, %xmm4 # xmm4 = xmm4[1,2],xmm8[0,3]
movaps %xmm0, %xmm6
mulps %xmm4, %xmm6
mulps %xmm2, %xmm4
movaps %xmm8, %xmm3
mulps %xmm5, %xmm3
subps %xmm4, %xmm3
movaps %xmm3, %xmm4
shufps $0xc9, %xmm3, %xmm4 # xmm4 = xmm4[1,2],xmm3[0,3]
movaps %xmm0, %xmm7
shufps $0xc9, %xmm0, %xmm7 # xmm7 = xmm7[1,2],xmm0[0,3]
mulps %xmm0, %xmm5
mulps %xmm7, %xmm2
subps %xmm5, %xmm2
mulps %xmm8, %xmm7
subps %xmm7, %xmm6
unpcklps %xmm6, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
shufps $0xc9, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
dpps $0x7f, %xmm4, %xmm0
unpcklps %xmm6, %xmm4 # xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
xorps %xmm5, %xmm5
movss %xmm2, %xmm5 # xmm5 = xmm2[0],xmm5[1,2,3]
insertps $0x4a, %xmm2, %xmm2 # xmm2 = xmm2[1],zero,xmm2[2],zero
unpcklps %xmm5, %xmm3 # xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
movaps %xmm4, %xmm5
unpcklps %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
unpckhps %xmm2, %xmm4 # xmm4 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
divps %xmm0, %xmm5
divps %xmm0, %xmm4
divps %xmm0, %xmm3
movaps %xmm1, %xmm0
shufps $0x0, %xmm1, %xmm0 # xmm0 = xmm0[0,0],xmm1[0,0]
movaps %xmm1, %xmm2
shufps $0x55, %xmm1, %xmm2 # xmm2 = xmm2[1,1],xmm1[1,1]
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm3, %xmm1
mulps %xmm4, %xmm2
addps %xmm1, %xmm2
mulps %xmm5, %xmm0
addps %xmm2, %xmm0
xorps 0x1d2550b(%rip), %xmm0 # 0x1eec6d0
movaps (%rbx), %xmm6
movaps %xmm6, 0x10(%rsp)
movaps 0x10(%rbx), %xmm8
movaps %xmm6, %xmm1
shufps $0x0, %xmm6, %xmm1 # xmm1 = xmm1[0,0],xmm6[0,0]
movaps %xmm6, %xmm2
shufps $0x55, %xmm6, %xmm2 # xmm2 = xmm2[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm3, %xmm6
addps %xmm0, %xmm6
mulps %xmm4, %xmm2
addps %xmm6, %xmm2
mulps %xmm5, %xmm1
addps %xmm2, %xmm1
insertps $0x30, 0xc(%rbx), %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
movaps %xmm1, (%rbx)
movaps %xmm8, %xmm2
movaps %xmm8, 0x30(%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm8, %xmm1
shufps $0x55, %xmm8, %xmm1 # xmm1 = xmm1[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm2 # xmm2 = xmm2[2,2],xmm8[2,2]
mulps %xmm3, %xmm2
mulps %xmm4, %xmm1
addps %xmm2, %xmm1
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
insertps $0x30, 0x1c(%rbx), %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
movaps %xmm0, 0x10(%rbx)
movq 0x58(%r12), %rax
movq 0x10(%r14), %rcx
leaq 0xd0(%rsp), %rdx
movq %rax, (%rdx)
movq %r15, 0x8(%rdx)
movq %rcx, 0x10(%rdx)
leaq 0x58(%rax), %rdi
movq %rbx, %rsi
callq *0x80(%rax)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps 0x30(%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
xorps %xmm0, %xmm0
ucomiss 0x20(%rbx), %xmm0
seta %al
orq $-0x1, (%r15)
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movss 0x3c(%rbp), %xmm2
movss 0xc(%rbp), %xmm3
movss 0x1c(%rbp), %xmm6
movss 0x2c(%rbp), %xmm7
movss 0x3c(%r13), %xmm10
movss 0xc(%r13), %xmm9
movss 0x1c(%r13), %xmm11
movss 0x2c(%r13), %xmm0
movaps %xmm3, %xmm1
mulss %xmm9, %xmm1
movaps %xmm2, 0xc0(%rsp)
mulss %xmm10, %xmm2
addss %xmm1, %xmm2
movaps %xmm6, %xmm1
mulss %xmm11, %xmm1
addss %xmm2, %xmm1
movaps %xmm7, %xmm12
mulss %xmm0, %xmm12
addss %xmm1, %xmm12
movaps 0x1d253d5(%rip), %xmm5 # 0x1eec6d0
movaps %xmm12, %xmm8
xorps %xmm5, %xmm8
ucomiss %xmm12, %xmm8
movaps %xmm0, %xmm1
xorps %xmm5, %xmm1
jbe 0x1c7317
xorps %xmm5, %xmm10
xorps %xmm5, %xmm9
ja 0x1c731c
movaps %xmm0, %xmm1
movaps %xmm1, 0x90(%rsp)
movaps %xmm10, 0xa0(%rsp)
movaps %xmm9, 0xb0(%rsp)
movss %xmm7, 0x8(%rsp)
movss %xmm6, 0xc(%rsp)
movss %xmm3, 0x30(%rsp)
movaps %xmm4, 0x10(%rsp)
movaps %xmm12, %xmm15
cmpltss %xmm8, %xmm15
maxss %xmm12, %xmm8
andps 0x1d2535c(%rip), %xmm12 # 0x1eec6c0
movss 0x1d29614(%rip), %xmm1 # 0x1ef0980
mulss %xmm12, %xmm1
addss 0x1d2960b(%rip), %xmm1 # 0x1ef0984
movaps %xmm11, %xmm4
mulss %xmm12, %xmm1
addss 0x1d295fe(%rip), %xmm1 # 0x1ef0988
xorps %xmm5, %xmm4
mulss %xmm12, %xmm1
addss 0x1d295f2(%rip), %xmm1 # 0x1ef098c
mulss %xmm12, %xmm1
addss 0x1d295e9(%rip), %xmm1 # 0x1ef0990
mulss %xmm12, %xmm1
addss 0x1d295e0(%rip), %xmm1 # 0x1ef0994
movss 0x1d25357(%rip), %xmm14 # 0x1eec714
movaps %xmm14, %xmm0
subss %xmm12, %xmm0
xorps %xmm3, %xmm3
ucomiss %xmm3, %xmm0
movaps %xmm8, 0x50(%rsp)
jb 0x1c73da
sqrtss %xmm0, %xmm0
jmp 0x1c7438
movaps %xmm15, 0x40(%rsp)
movaps %xmm11, 0x80(%rsp)
movaps %xmm4, 0x70(%rsp)
movaps %xmm12, 0x60(%rsp)
movss %xmm1, 0x2c(%rsp)
callq 0x6aa20
xorps %xmm3, %xmm3
movss 0x2c(%rsp), %xmm1
movaps 0x60(%rsp), %xmm12
movaps 0x70(%rsp), %xmm4
movaps 0x80(%rsp), %xmm11
movaps 0x40(%rsp), %xmm15
movaps 0x50(%rsp), %xmm8
movaps 0x1d252a1(%rip), %xmm5 # 0x1eec6d0
movss 0x1d252dc(%rip), %xmm14 # 0x1eec714
mulss %xmm1, %xmm0
movss 0x1d29554(%rip), %xmm2 # 0x1ef0998
movaps %xmm2, %xmm1
subss %xmm0, %xmm1
movaps %xmm8, %xmm0
cmpltss %xmm3, %xmm0
maxss %xmm1, %xmm3
movaps %xmm0, %xmm1
andnps %xmm3, %xmm1
xorps %xmm5, %xmm3
andps %xmm0, %xmm3
orps %xmm1, %xmm3
movaps %xmm2, %xmm0
subss %xmm3, %xmm0
movaps %xmm14, %xmm3
cmpltss %xmm12, %xmm3
movss 0x1d2951c(%rip), %xmm1 # 0x1ef099c
andps %xmm3, %xmm1
andnps %xmm0, %xmm3
orps %xmm3, %xmm1
mulss 0x10(%rsp), %xmm1
movss 0x1d29509(%rip), %xmm0 # 0x1ef09a0
mulss %xmm1, %xmm0
roundss $0x9, %xmm0, %xmm0
cvttss2si %xmm0, %eax
mulss %xmm2, %xmm0
subss %xmm0, %xmm1
movaps %xmm1, %xmm3
mulss %xmm1, %xmm3
movss 0x1d294e8(%rip), %xmm0 # 0x1ef09a4
mulss %xmm3, %xmm0
addss 0x1d294e0(%rip), %xmm0 # 0x1ef09a8
movss 0x1d294dc(%rip), %xmm2 # 0x1ef09ac
mulss %xmm3, %xmm2
addss 0x1d294d4(%rip), %xmm2 # 0x1ef09b0
andps %xmm15, %xmm4
mulss %xmm3, %xmm0
addss 0x1d294c8(%rip), %xmm0 # 0x1ef09b4
andnps %xmm11, %xmm15
mulss %xmm3, %xmm2
addss 0x1d294bc(%rip), %xmm2 # 0x1ef09b8
movl %eax, %ecx
mulss %xmm3, %xmm0
addss 0x1d294b2(%rip), %xmm0 # 0x1ef09bc
andl $0x3, %ecx
mulss %xmm3, %xmm2
addss 0x1d294a7(%rip), %xmm2 # 0x1ef09c0
mulss %xmm3, %xmm0
addss 0x1d2949f(%rip), %xmm0 # 0x1ef09c4
mulss %xmm3, %xmm2
addss 0x1d251eb(%rip), %xmm2 # 0x1eec71c
mulss %xmm3, %xmm0
addss %xmm14, %xmm0
mulss %xmm3, %xmm2
addss %xmm14, %xmm2
mulss %xmm1, %xmm0
testb $0x1, %al
movaps 0xa0(%rsp), %xmm6
je 0x1c7559
movaps %xmm2, %xmm11
jmp 0x1c7560
movaps %xmm0, %xmm11
movaps %xmm2, %xmm0
orps %xmm4, %xmm15
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x1c7570
xorps %xmm5, %xmm11
cmpl $0x2, %eax
jae 0x1c7578
xorps %xmm5, %xmm0
movaps %xmm8, %xmm1
movaps 0xc0(%rsp), %xmm3
mulss %xmm3, %xmm1
subss %xmm6, %xmm1
movaps %xmm8, %xmm5
movaps %xmm8, %xmm9
mulss 0x30(%rsp), %xmm9
movaps 0xb0(%rsp), %xmm12
subss %xmm12, %xmm9
movaps %xmm6, %xmm4
movaps %xmm8, %xmm6
mulss 0xc(%rsp), %xmm6
subss %xmm15, %xmm6
mulss 0x8(%rsp), %xmm5
movaps 0x90(%rsp), %xmm13
subss %xmm13, %xmm5
movaps %xmm9, %xmm2
mulss %xmm9, %xmm2
movaps %xmm1, %xmm7
mulss %xmm1, %xmm7
addss %xmm2, %xmm7
movaps %xmm6, %xmm2
mulss %xmm6, %xmm2
addss %xmm7, %xmm2
movaps %xmm5, %xmm7
mulss %xmm5, %xmm7
addss %xmm2, %xmm7
movaps %xmm7, %xmm10
rsqrtss %xmm7, %xmm10
movaps %xmm10, %xmm2
movss 0x1d2510d(%rip), %xmm8 # 0x1eec71c
mulss %xmm8, %xmm7
mulss %xmm10, %xmm7
mulss %xmm10, %xmm10
mulss %xmm7, %xmm10
movss 0x1d250ec(%rip), %xmm8 # 0x1eec718
mulss %xmm8, %xmm2
addss %xmm2, %xmm10
mulss %xmm10, %xmm1
mulss %xmm11, %xmm1
movaps %xmm11, 0x40(%rsp)
movaps %xmm0, %xmm11
mulss %xmm3, %xmm11
subss %xmm1, %xmm11
movaps 0x10(%rsp), %xmm2
subss %xmm2, %xmm14
mulss %xmm2, %xmm4
mulss %xmm14, %xmm3
addss %xmm4, %xmm3
mulss %xmm2, %xmm12
movaps %xmm14, %xmm7
mulss 0x30(%rsp), %xmm7
addss %xmm12, %xmm7
mulss %xmm2, %xmm15
movaps %xmm14, %xmm1
mulss 0xc(%rsp), %xmm1
addss %xmm15, %xmm1
mulss %xmm2, %xmm13
movaps %xmm14, %xmm2
mulss 0x8(%rsp), %xmm2
addss %xmm13, %xmm2
movaps %xmm7, %xmm12
mulss %xmm7, %xmm12
movaps %xmm3, %xmm13
mulss %xmm3, %xmm13
addss %xmm12, %xmm13
movaps %xmm1, %xmm12
mulss %xmm1, %xmm12
addss %xmm13, %xmm12
movaps %xmm2, %xmm13
mulss %xmm2, %xmm13
addss %xmm12, %xmm13
movaps %xmm13, %xmm12
mulss 0x1d25035(%rip), %xmm13 # 0x1eec71c
rsqrtss %xmm12, %xmm12
mulss %xmm12, %xmm8
mulss %xmm12, %xmm13
mulss %xmm12, %xmm12
mulss %xmm13, %xmm12
addss %xmm8, %xmm12
movss 0x1d292ba(%rip), %xmm8 # 0x1ef09c8
movaps 0x50(%rsp), %xmm4
ucomiss %xmm8, %xmm4
cmpltss %xmm4, %xmm8
movaps %xmm8, %xmm4
andnps %xmm11, %xmm4
mulss %xmm12, %xmm3
andps %xmm3, %xmm8
orps %xmm4, %xmm8
ja 0x1c7782
mulss %xmm10, %xmm9
mulss %xmm10, %xmm6
mulss %xmm10, %xmm5
movaps 0x40(%rsp), %xmm2
mulss %xmm2, %xmm9
movss 0x30(%rsp), %xmm7
mulss %xmm0, %xmm7
subss %xmm9, %xmm7
mulss %xmm2, %xmm6
movss 0xc(%rsp), %xmm1
mulss %xmm0, %xmm1
subss %xmm6, %xmm1
mulss %xmm2, %xmm5
movss 0x8(%rsp), %xmm2
mulss %xmm0, %xmm2
subss %xmm5, %xmm2
jmp 0x1c7791
mulss %xmm12, %xmm7
mulss %xmm12, %xmm1
mulss %xmm12, %xmm2
movaps 0x10(%rsp), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps (%r13), %xmm3
mulps %xmm6, %xmm3
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps (%rbp), %xmm0
mulps %xmm14, %xmm0
addps %xmm3, %xmm0
movaps 0x10(%r13), %xmm4
mulps %xmm6, %xmm4
movaps 0x10(%rbp), %xmm3
mulps %xmm14, %xmm3
addps %xmm4, %xmm3
movaps 0x20(%r13), %xmm5
mulps %xmm6, %xmm5
movaps 0x20(%rbp), %xmm4
mulps %xmm14, %xmm4
addps %xmm5, %xmm4
mulps 0x30(%r13), %xmm6
mulps 0x30(%rbp), %xmm14
addps %xmm6, %xmm14
movaps %xmm14, 0x10(%rsp)
movaps %xmm7, %xmm9
mulss %xmm7, %xmm9
movaps %xmm8, %xmm10
mulss %xmm8, %xmm10
movaps %xmm10, %xmm5
addss %xmm9, %xmm5
movaps %xmm1, %xmm13
movaps 0x1d24ebd(%rip), %xmm15 # 0x1eec6d0
xorps %xmm15, %xmm13
mulss %xmm1, %xmm13
addss %xmm13, %xmm5
xorps %xmm2, %xmm15
mulss %xmm2, %xmm15
addss %xmm15, %xmm5
movaps %xmm8, %xmm12
mulss %xmm2, %xmm12
movaps %xmm7, %xmm6
mulss %xmm1, %xmm6
movaps %xmm6, %xmm11
addss %xmm12, %xmm11
subss %xmm12, %xmm6
movaps %xmm7, %xmm14
mulss %xmm2, %xmm14
subss %xmm9, %xmm10
movaps %xmm1, %xmm12
mulss %xmm1, %xmm12
addss %xmm10, %xmm12
addss %xmm15, %xmm12
movaps %xmm8, %xmm9
mulss %xmm1, %xmm9
mulss %xmm7, %xmm8
movaps %xmm14, %xmm15
subss %xmm9, %xmm15
mulss %xmm2, %xmm1
addss %xmm14, %xmm9
movaps %xmm1, %xmm7
addss %xmm8, %xmm7
subss %xmm8, %xmm1
addss %xmm11, %xmm11
addss %xmm15, %xmm15
addss %xmm13, %xmm10
mulss %xmm2, %xmm2
addss %xmm10, %xmm2
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movaps 0x1d24e37(%rip), %xmm14 # 0x1eec700
mulps %xmm14, %xmm15
movsd 0x1d24e1a(%rip), %xmm10 # 0x1eec6f0
mulps %xmm10, %xmm11
addps %xmm15, %xmm11
movss 0x1d24e2d(%rip), %xmm13 # 0x1eec714
mulps %xmm13, %xmm5
addps %xmm11, %xmm5
addss %xmm7, %xmm7
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm14, %xmm7
mulps %xmm10, %xmm12
addps %xmm7, %xmm12
addss %xmm6, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm13, %xmm6
addps %xmm12, %xmm6
xorps %xmm8, %xmm8
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm14, %xmm2
movaps %xmm0, %xmm7
shufps $0xe9, %xmm8, %xmm7 # xmm7 = xmm7[1,2],xmm8[2,3]
blendps $0x4, %xmm3, %xmm7 # xmm7 = xmm7[0,1],xmm3[2],xmm7[3]
addss %xmm9, %xmm9
addss %xmm1, %xmm1
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm10, %xmm1
addps %xmm2, %xmm1
mulps %xmm13, %xmm9
addps %xmm1, %xmm9
addps %xmm8, %xmm7
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm4, %xmm2
movaps %xmm4, %xmm10
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm9, %xmm4
movaps 0x10(%rsp), %xmm12
movaps %xmm12, %xmm1
movaps %xmm12, %xmm11
shufps $0xaa, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
mulps %xmm9, %xmm12
mulps %xmm8, %xmm9
mulps %xmm6, %xmm8
addps %xmm9, %xmm8
mulps %xmm5, %xmm0
addps %xmm8, %xmm0
movaps %xmm3, %xmm8
shufps $0x55, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
mulps %xmm6, %xmm3
addps %xmm9, %xmm3
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm5, %xmm8
addps %xmm3, %xmm8
shufps $0x55, %xmm10, %xmm10 # xmm10 = xmm10[1,1,1,1]
mulps %xmm6, %xmm10
addps %xmm4, %xmm10
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm5, %xmm2
addps %xmm10, %xmm2
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
mulps %xmm6, %xmm11
addps %xmm12, %xmm11
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm5, %xmm1
addps %xmm11, %xmm1
addps %xmm7, %xmm1
jmp 0x1c7126
|
/embree[P]embree/kernels/geometry/instance_intersector.cpp
|
embree::sse42::InstanceIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstancePrimitive const&) (.cold.1)
|
__forceinline void updateAABB()
{
if (likely(query_ws->radius == (float)inf || userContext->instStackSize == 0)) {
query_radius = Vec3fa(query_ws->radius);
return;
}
const AffineSpace3fa m = AffineSpace3fa_load_unaligned((AffineSpace3fa*)userContext->world2inst[userContext->instStackSize-1]);
BBox3fa bbox(Vec3fa(-query_ws->radius), Vec3fa(query_ws->radius));
bbox = xfmBounds(m, bbox);
query_radius = 0.5f * (bbox.upper - bbox.lower);
}
|
decl %edi
shlq $0x6, %rdi
movups (%rsi,%rdi), %xmm5
movups 0x10(%rsi,%rdi), %xmm1
movups 0x20(%rsi,%rdi), %xmm9
movups 0x30(%rsi,%rdi), %xmm10
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x1d24beb(%rip), %xmm7 # 0x1eec6d0
xorps %xmm0, %xmm7
mulps %xmm0, %xmm9
movaps %xmm10, %xmm2
subps %xmm9, %xmm2
movaps %xmm7, %xmm6
mulps %xmm1, %xmm6
movaps %xmm6, %xmm8
addps %xmm2, %xmm8
mulps %xmm5, %xmm7
movaps %xmm7, %xmm11
addps %xmm8, %xmm11
movaps 0x1d23edc(%rip), %xmm3 # 0x1eeb9f0
minps %xmm11, %xmm3
movaps 0x1d23ee1(%rip), %xmm4 # 0x1eeba00
maxps %xmm11, %xmm4
addps %xmm10, %xmm9
addps %xmm9, %xmm6
movaps %xmm7, %xmm10
addps %xmm6, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
mulps %xmm0, %xmm1
addps %xmm1, %xmm2
movaps %xmm7, %xmm10
addps %xmm2, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
addps %xmm9, %xmm1
addps %xmm1, %xmm7
minps %xmm7, %xmm3
maxps %xmm7, %xmm4
mulps %xmm5, %xmm0
addps %xmm0, %xmm8
minps %xmm8, %xmm3
maxps %xmm8, %xmm4
addps %xmm0, %xmm6
minps %xmm6, %xmm3
maxps %xmm6, %xmm4
addps %xmm0, %xmm2
minps %xmm2, %xmm3
maxps %xmm2, %xmm4
addps %xmm1, %xmm0
minps %xmm0, %xmm3
maxps %xmm0, %xmm4
subps %xmm3, %xmm4
mulps 0x1d24b4e(%rip), %xmm4 # 0x1eec6e0
movaps %xmm4, (%rdx)
retq
|
/embree[P]embree/kernels/geometry/../common/context.h
|
embree::sse42::InstanceArrayIntersectorK<4>::occluded(embree::vboolf_impl<4> const&, embree::sse42::InstanceArrayIntersectorK<4>::Precalculations const&, embree::RayK<4>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
vbool<K> InstanceArrayIntersectorK<K>::occluded(const vbool<K>& valid_i, const Precalculations& pre, RayK<K>& ray, RayQueryContext* context, const Primitive& prim)
{
vbool<K> valid = valid_i;
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
valid &= (ray.mask & instance->mask) != 0;
if (none(valid)) return false;
#endif
RTCRayQueryContext* user_context = context->user;
vbool<K> occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3vf<K> world2local = instance->getWorld2Local(prim.primID_);
const Vec3vf<K> ray_org = ray.org;
const Vec3vf<K> ray_dir = ray.dir;
ray.org = xfmPoint(world2local, ray_org);
ray.dir = xfmVector(world2local, ray_dir);
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.occluded(valid, ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x90, %rsp
movq %rcx, %r14
movq %rdi, %rbx
movq (%r8), %rax
movl (%r9), %ecx
movl 0x4(%r9), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rdx
movq 0x58(%rdx), %rax
testq %rax, %rax
jne 0x1c8561
movq 0x90(%rdx), %rax
movq 0xa0(%rdx), %r10
imulq %rcx, %r10
movl (%rax,%r10), %eax
movl $0xffffffff, %r10d # imm = 0xFFFFFFFF
cmpq %r10, %rax
je 0x1c855f
movq 0x60(%rdx), %r10
movq (%r10,%rax,8), %rax
jmp 0x1c8561
xorl %eax, %eax
testq %rax, %rax
je 0x1c8623
movd 0x34(%rdx), %xmm0
pshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
pand 0x90(%r14), %xmm1
pxor %xmm0, %xmm0
pcmpeqd %xmm1, %xmm0
pandn (%rsi), %xmm0
movmskps %xmm0, %esi
testl %esi, %esi
je 0x1c8623
movq 0x8(%r8), %r15
leaq 0x1f879e1(%rip), %rsi # 0x214ff80
movaps (%rsi), %xmm1
movaps %xmm1, (%rbx)
cmpl $-0x1, (%r15)
jne 0x1c8a65
movl %edi, (%r15)
movl %ecx, 0x4(%r15)
movl (%r9), %ecx
movzbl 0x3d(%rdx), %edi
shll $0x8, %edi
movq 0x88(%rdx), %rdx
movl 0x20(%rdx), %esi
cmpl $0x100, %edi # imm = 0x100
je 0x1c8a75
cmpl $0x9134, %esi # imm = 0x9134
je 0x1c8797
cmpl $0x9234, %esi # imm = 0x9234
je 0x1c8635
cmpl $0xb001, %esi # imm = 0xB001
je 0x1c8694
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1c87f9
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movaps (%rsi,%rcx), %xmm1
movaps 0x10(%rsi,%rcx), %xmm8
movaps 0x20(%rsi,%rcx), %xmm5
movaps 0x30(%rsi,%rcx), %xmm6
jmp 0x1c87f9
leaq 0x1f87956(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, (%rbx)
jmp 0x1c8a65
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movsd 0x4(%rsi,%rcx), %xmm2
movss (%rsi,%rcx), %xmm1
shufps $0x4c, %xmm2, %xmm1 # xmm1 = xmm1[0,3],xmm2[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x10(%rsi,%rcx), %xmm2
movss 0xc(%rsi,%rcx), %xmm8
shufps $0x4c, %xmm2, %xmm8 # xmm8 = xmm8[0,3],xmm2[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x1c(%rsi,%rcx), %xmm2
movss 0x18(%rsi,%rcx), %xmm5
shufps $0x4c, %xmm2, %xmm5 # xmm5 = xmm5[0,3],xmm2[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
movsd 0x28(%rsi,%rcx), %xmm2
movss 0x24(%rsi,%rcx), %xmm6
shufps $0x4c, %xmm2, %xmm6 # xmm6 = xmm6[0,3],xmm2[0,1]
shufps $0x78, %xmm6, %xmm6 # xmm6 = xmm6[0,2,3,1]
jmp 0x1c87f9
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movsd 0x10(%rsi,%rcx), %xmm5
insertps $0x20, 0x8(%rsi,%rcx), %xmm5 # xmm5 = xmm5[0,1],mem[0],xmm5[3]
movsd 0x34(%rsi,%rcx), %xmm2
movss (%rsi,%rcx), %xmm1
movss 0xc(%rsi,%rcx), %xmm8
movlhps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[1,3]
movss 0x18(%rsi,%rcx), %xmm6
movsd 0x1c(%rsi,%rcx), %xmm2
movlhps %xmm2, %xmm6 # xmm6 = xmm6[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm6 # xmm6 = xmm6[0,2],xmm2[1,3]
movss 0x24(%rsi,%rcx), %xmm7
movss 0x28(%rsi,%rcx), %xmm4
movss 0x2c(%rsi,%rcx), %xmm2
movss 0x30(%rsi,%rcx), %xmm3
movaps %xmm4, %xmm9
mulss %xmm4, %xmm9
movaps %xmm7, %xmm10
mulss %xmm7, %xmm10
addss %xmm9, %xmm10
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm10, %xmm9
movaps %xmm3, %xmm10
mulss %xmm3, %xmm10
addss %xmm9, %xmm10
movaps %xmm10, %xmm9
rsqrtss %xmm10, %xmm9
movss 0x1d23fe5(%rip), %xmm11 # 0x1eec718
mulss %xmm9, %xmm11
mulss 0x1d23fdb(%rip), %xmm10 # 0x1eec71c
mulss %xmm9, %xmm10
mulss %xmm9, %xmm9
mulss %xmm10, %xmm9
addss %xmm11, %xmm9
mulss %xmm9, %xmm7
insertps $0x30, %xmm7, %xmm6 # xmm6 = xmm6[0,1,2],xmm7[0]
mulss %xmm9, %xmm4
insertps $0x30, %xmm4, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[0]
mulss %xmm9, %xmm2
mulss %xmm3, %xmm9
insertps $0x10, 0x4(%rsi,%rcx), %xmm8 # xmm8 = xmm8[0],mem[0],xmm8[2,3]
insertps $0x30, %xmm9, %xmm5 # xmm5 = xmm5[0,1,2],xmm9[0]
insertps $0x20, 0x3c(%rsi,%rcx), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
insertps $0x30, %xmm2, %xmm8 # xmm8 = xmm8[0,1,2],xmm2[0]
jmp 0x1c87f9
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movss (%rsi,%rcx), %xmm1
movss 0x4(%rsi,%rcx), %xmm8
movss 0x8(%rsi,%rcx), %xmm5
movss 0xc(%rsi,%rcx), %xmm6
insertps $0x1c, 0x10(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rcx), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rcx), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rcx), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rcx), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rcx), %xmm6 # xmm6 = xmm6[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rcx), %xmm6 # xmm6 = xmm6[0,1],mem[0],zero
movaps %xmm5, %xmm4
shufps $0xc9, %xmm5, %xmm4 # xmm4 = xmm4[1,2],xmm5[0,3]
movaps %xmm8, %xmm2
shufps $0xc9, %xmm8, %xmm2 # xmm2 = xmm2[1,2],xmm8[0,3]
movaps %xmm1, %xmm7
mulps %xmm2, %xmm7
mulps %xmm5, %xmm2
movaps %xmm8, %xmm3
mulps %xmm4, %xmm3
subps %xmm2, %xmm3
movaps %xmm3, %xmm2
shufps $0xc9, %xmm3, %xmm2 # xmm2 = xmm2[1,2],xmm3[0,3]
movaps %xmm1, %xmm9
shufps $0xc9, %xmm1, %xmm9 # xmm9 = xmm9[1,2],xmm1[0,3]
mulps %xmm1, %xmm4
mulps %xmm9, %xmm5
subps %xmm4, %xmm5
mulps %xmm8, %xmm9
dpps $0x7f, %xmm2, %xmm1
subps %xmm9, %xmm7
unpcklps %xmm7, %xmm3 # xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
shufps $0xc9, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3]
unpcklps %xmm7, %xmm2 # xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
xorps %xmm4, %xmm4
movss %xmm5, %xmm4 # xmm4 = xmm5[0],xmm4[1,2,3]
insertps $0x4a, %xmm5, %xmm5 # xmm5 = xmm5[1],zero,xmm5[2],zero
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
movaps %xmm2, %xmm4
unpcklps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
unpckhps %xmm5, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
divps %xmm1, %xmm4
divps %xmm1, %xmm2
divps %xmm1, %xmm3
movaps %xmm6, %xmm9
shufps $0x0, %xmm6, %xmm9 # xmm9 = xmm9[0,0],xmm6[0,0]
movaps %xmm6, %xmm1
shufps $0x55, %xmm6, %xmm1 # xmm1 = xmm1[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm3, %xmm6
mulps %xmm2, %xmm1
addps %xmm6, %xmm1
mulps %xmm4, %xmm9
addps %xmm1, %xmm9
movaps %xmm4, %xmm1
shufps $0x0, %xmm4, %xmm1 # xmm1 = xmm1[0,0],xmm4[0,0]
movaps %xmm2, %xmm6
shufps $0x0, %xmm2, %xmm6 # xmm6 = xmm6[0,0],xmm2[0,0]
movaps %xmm2, %xmm5
shufps $0x55, %xmm2, %xmm5 # xmm5 = xmm5[1,1],xmm2[1,1]
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
movaps %xmm3, %xmm8
shufps $0x0, %xmm3, %xmm8 # xmm8 = xmm8[0,0],xmm3[0,0]
movaps %xmm3, %xmm7
shufps $0x55, %xmm3, %xmm7 # xmm7 = xmm7[1,1],xmm3[1,1]
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
movaps %xmm9, %xmm10
shufps $0x0, %xmm9, %xmm10 # xmm10 = xmm10[0,0],xmm9[0,0]
movaps %xmm9, %xmm11
shufps $0x55, %xmm9, %xmm11 # xmm11 = xmm11[1,1],xmm9[1,1]
shufps $0xaa, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
movaps (%r14), %xmm14
movaps %xmm14, 0x50(%rsp)
movaps 0x10(%r14), %xmm13
movaps %xmm13, (%rsp)
movaps 0x20(%r14), %xmm15
movaps %xmm15, 0x10(%rsp)
movaps %xmm15, %xmm12
mulps %xmm8, %xmm12
subps %xmm10, %xmm12
movaps %xmm15, %xmm10
mulps %xmm7, %xmm10
subps %xmm11, %xmm10
movaps %xmm15, %xmm11
mulps %xmm3, %xmm11
subps %xmm9, %xmm11
movaps %xmm13, %xmm9
mulps %xmm6, %xmm9
addps %xmm12, %xmm9
movaps %xmm13, %xmm12
mulps %xmm5, %xmm12
addps %xmm10, %xmm12
movaps %xmm13, %xmm10
mulps %xmm2, %xmm10
addps %xmm11, %xmm10
movaps %xmm14, %xmm11
mulps %xmm1, %xmm11
addps %xmm9, %xmm11
movaps %xmm4, %xmm9
shufps $0x55, %xmm4, %xmm9 # xmm9 = xmm9[1,1],xmm4[1,1]
movaps %xmm14, %xmm13
mulps %xmm9, %xmm13
addps %xmm12, %xmm13
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
movaps %xmm14, %xmm12
mulps %xmm4, %xmm12
addps %xmm10, %xmm12
movaps 0x40(%r14), %xmm10
movaps 0x50(%r14), %xmm14
movaps 0x60(%r14), %xmm15
movaps %xmm11, (%r14)
movaps %xmm13, 0x10(%r14)
movaps %xmm12, 0x20(%r14)
movaps %xmm15, 0x40(%rsp)
mulps %xmm15, %xmm8
movaps %xmm14, 0x30(%rsp)
mulps %xmm14, %xmm6
addps %xmm8, %xmm6
mulps %xmm15, %xmm7
mulps %xmm14, %xmm5
addps %xmm7, %xmm5
mulps %xmm15, %xmm3
mulps %xmm14, %xmm2
addps %xmm3, %xmm2
movaps %xmm10, 0x20(%rsp)
mulps %xmm10, %xmm1
addps %xmm6, %xmm1
mulps %xmm10, %xmm9
addps %xmm5, %xmm9
mulps %xmm10, %xmm4
addps %xmm2, %xmm4
movaps %xmm1, 0x40(%r14)
movaps %xmm9, 0x50(%r14)
movaps %xmm4, 0x60(%r14)
movq 0x10(%r8), %rdx
leaq 0x60(%rsp), %rdi
movaps %xmm0, (%rdi)
leaq 0x78(%rsp), %rcx
movq %rax, (%rcx)
movq %r15, 0x8(%rcx)
movq %rdx, 0x10(%rcx)
leaq 0x58(%rax), %rsi
movq %r14, %rdx
callq *0xa0(%rax)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, (%r14)
movaps (%rsp), %xmm0
movaps %xmm0, 0x10(%r14)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, 0x20(%r14)
movaps 0x20(%rsp), %xmm0
movaps %xmm0, 0x40(%r14)
movaps 0x30(%rsp), %xmm0
movaps %xmm0, 0x50(%r14)
movaps 0x40(%rsp), %xmm0
movaps %xmm0, 0x60(%r14)
movaps 0x80(%r14), %xmm0
cmpltps 0x1d22fb2(%rip), %xmm0 # 0x1eeba10
movaps %xmm0, (%rbx)
orq $-0x1, (%r15)
movq %rbx, %rax
addq $0x90, %rsp
popq %rbx
popq %r14
popq %r15
retq
cmpl $0x9134, %esi # imm = 0x9134
je 0x1c8c1c
cmpl $0x9234, %esi # imm = 0x9234
je 0x1c8ac2
cmpl $0xb001, %esi # imm = 0xB001
je 0x1c8b21
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1c8c7e
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movaps (%rsi,%rcx), %xmm1
movaps 0x10(%rsi,%rcx), %xmm2
movaps 0x20(%rsi,%rcx), %xmm11
movaps 0x30(%rsi,%rcx), %xmm4
jmp 0x1c8c7e
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movsd 0x4(%rsi,%rcx), %xmm2
movss (%rsi,%rcx), %xmm1
shufps $0x4c, %xmm2, %xmm1 # xmm1 = xmm1[0,3],xmm2[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x10(%rsi,%rcx), %xmm3
movss 0xc(%rsi,%rcx), %xmm2
shufps $0x4c, %xmm3, %xmm2 # xmm2 = xmm2[0,3],xmm3[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
movsd 0x1c(%rsi,%rcx), %xmm3
movss 0x18(%rsi,%rcx), %xmm11
shufps $0x4c, %xmm3, %xmm11 # xmm11 = xmm11[0,3],xmm3[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
movsd 0x28(%rsi,%rcx), %xmm3
movss 0x24(%rsi,%rcx), %xmm4
shufps $0x4c, %xmm3, %xmm4 # xmm4 = xmm4[0,3],xmm3[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
jmp 0x1c8c7e
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movsd 0x10(%rsi,%rcx), %xmm11
insertps $0x20, 0x8(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],xmm11[3]
movsd 0x34(%rsi,%rcx), %xmm3
movss (%rsi,%rcx), %xmm1
movss 0xc(%rsi,%rcx), %xmm2
movlhps %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm1 # xmm1 = xmm1[0,2],xmm3[1,3]
movss 0x18(%rsi,%rcx), %xmm4
movsd 0x1c(%rsi,%rcx), %xmm3
movlhps %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm4 # xmm4 = xmm4[0,2],xmm3[1,3]
movss 0x24(%rsi,%rcx), %xmm8
movss 0x28(%rsi,%rcx), %xmm7
movss 0x2c(%rsi,%rcx), %xmm5
movss 0x30(%rsi,%rcx), %xmm6
movaps %xmm7, %xmm3
mulss %xmm7, %xmm3
movaps %xmm8, %xmm9
mulss %xmm8, %xmm9
addss %xmm3, %xmm9
movaps %xmm5, %xmm3
mulss %xmm5, %xmm3
addss %xmm9, %xmm3
movaps %xmm6, %xmm9
mulss %xmm6, %xmm9
addss %xmm3, %xmm9
movaps %xmm9, %xmm3
rsqrtss %xmm9, %xmm3
movss 0x1d23b5a(%rip), %xmm10 # 0x1eec718
mulss %xmm3, %xmm10
mulss 0x1d23b50(%rip), %xmm9 # 0x1eec71c
mulss %xmm3, %xmm9
mulss %xmm3, %xmm3
mulss %xmm9, %xmm3
addss %xmm10, %xmm3
mulss %xmm3, %xmm8
insertps $0x30, %xmm8, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[0]
mulss %xmm3, %xmm7
insertps $0x30, %xmm7, %xmm1 # xmm1 = xmm1[0,1,2],xmm7[0]
mulss %xmm3, %xmm5
mulss %xmm6, %xmm3
insertps $0x10, 0x4(%rsi,%rcx), %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
insertps $0x30, %xmm3, %xmm11 # xmm11 = xmm11[0,1,2],xmm3[0]
insertps $0x20, 0x3c(%rsi,%rcx), %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
insertps $0x30, %xmm5, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0]
jmp 0x1c8c7e
movq (%rdx), %rsi
imulq 0x10(%rdx), %rcx
movss (%rsi,%rcx), %xmm1
movss 0x4(%rsi,%rcx), %xmm2
movss 0x8(%rsi,%rcx), %xmm11
movss 0xc(%rsi,%rcx), %xmm4
insertps $0x1c, 0x10(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rcx), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rcx), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rcx), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rcx), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
movaps %xmm11, (%rsp)
movaps %xmm4, 0x10(%rsp)
movaps %xmm4, %xmm9
shufps $0xff, %xmm4, %xmm9 # xmm9 = xmm9[3,3],xmm4[3,3]
movaps %xmm1, %xmm10
shufps $0xff, %xmm1, %xmm10 # xmm10 = xmm10[3,3],xmm1[3,3]
movaps %xmm2, %xmm8
shufps $0xff, %xmm2, %xmm8 # xmm8 = xmm8[3,3],xmm2[3,3]
movaps %xmm11, %xmm6
shufps $0xff, %xmm11, %xmm6 # xmm6 = xmm6[3,3],xmm11[3,3]
movaps %xmm10, %xmm11
mulss %xmm10, %xmm11
movaps %xmm6, %xmm15
mulss %xmm9, %xmm15
movaps %xmm10, %xmm7
movaps %xmm10, %xmm13
movaps %xmm8, %xmm3
mulss %xmm9, %xmm3
mulss %xmm9, %xmm10
movaps %xmm9, %xmm14
mulss %xmm9, %xmm14
movaps %xmm14, %xmm9
addss %xmm11, %xmm9
movaps 0x1d239e3(%rip), %xmm4 # 0x1eec6d0
mulss %xmm8, %xmm7
movaps %xmm7, %xmm12
addss %xmm15, %xmm12
subss %xmm15, %xmm7
movaps %xmm8, %xmm5
xorps %xmm4, %xmm5
mulss %xmm8, %xmm5
addss %xmm5, %xmm9
xorps %xmm6, %xmm4
mulss %xmm6, %xmm4
addss %xmm4, %xmm9
mulss %xmm6, %xmm13
subss %xmm11, %xmm14
movaps %xmm8, %xmm11
mulss %xmm8, %xmm11
addss %xmm14, %xmm11
addss %xmm4, %xmm11
movaps %xmm13, %xmm15
subss %xmm3, %xmm15
mulss %xmm6, %xmm8
addss %xmm13, %xmm3
movaps %xmm8, %xmm13
addss %xmm10, %xmm13
subss %xmm10, %xmm8
addss %xmm12, %xmm12
addss %xmm15, %xmm15
addss %xmm5, %xmm14
mulss %xmm6, %xmm6
addss %xmm14, %xmm6
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
movaps 0x1d23976(%rip), %xmm10 # 0x1eec700
mulps %xmm10, %xmm15
movsd 0x1d23959(%rip), %xmm14 # 0x1eec6f0
mulps %xmm14, %xmm12
addps %xmm15, %xmm12
movss 0x1d2396c(%rip), %xmm15 # 0x1eec714
mulps %xmm15, %xmm9
addps %xmm12, %xmm9
addss %xmm13, %xmm13
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
mulps %xmm10, %xmm13
mulps %xmm14, %xmm11
addps %xmm13, %xmm11
addss %xmm7, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm15, %xmm7
addps %xmm11, %xmm7
xorps %xmm11, %xmm11
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm10, %xmm6
movaps %xmm1, %xmm10
shufps $0xe9, %xmm11, %xmm10 # xmm10 = xmm10[1,2],xmm11[2,3]
blendps $0x4, %xmm2, %xmm10 # xmm10 = xmm10[0,1],xmm2[2],xmm10[3]
addss %xmm3, %xmm3
addss %xmm8, %xmm8
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm14, %xmm8
addps %xmm6, %xmm8
mulps %xmm15, %xmm3
addps %xmm8, %xmm3
addps %xmm11, %xmm10
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps (%rsp), %xmm4
movaps %xmm4, %xmm5
movaps %xmm4, %xmm12
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
movaps %xmm4, %xmm14
movaps 0x10(%rsp), %xmm4
movaps %xmm4, %xmm6
movaps %xmm4, %xmm13
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm11, %xmm3
mulps %xmm7, %xmm11
addps %xmm3, %xmm11
mulps %xmm9, %xmm1
addps %xmm11, %xmm1
movaps %xmm2, %xmm8
shufps $0x55, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
mulps %xmm7, %xmm2
addps %xmm3, %xmm2
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm9, %xmm8
addps %xmm2, %xmm8
shufps $0x55, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
mulps %xmm7, %xmm12
addps %xmm14, %xmm12
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm9, %xmm5
addps %xmm12, %xmm5
shufps $0x55, %xmm13, %xmm13 # xmm13 = xmm13[1,1,1,1]
mulps %xmm7, %xmm13
addps %xmm4, %xmm13
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm9, %xmm6
addps %xmm13, %xmm6
addps %xmm10, %xmm6
jmp 0x1c87f9
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersectorKMB<4>::intersect(embree::vboolf_impl<4> const&, embree::sse42::InstanceArrayIntersectorKMB<4>::Precalculations const&, embree::RayHitK<4>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
void InstanceArrayIntersectorKMB<K>::intersect(const vbool<K>& valid_i, const Precalculations& pre, RayHitK<K>& ray, RayQueryContext* context, const Primitive& prim)
{
vbool<K> valid = valid_i;
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
valid &= (ray.mask & instance->mask) != 0;
if (none(valid)) return;
#endif
RTCRayQueryContext* user_context = context->user;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
AffineSpace3vf<K> world2local = instance->getWorld2Local<K>(prim.primID_, valid, ray.time());
const Vec3vf<K> ray_org = ray.org;
const Vec3vf<K> ray_dir = ray.dir;
ray.org = xfmPoint(world2local, ray_org);
ray.dir = xfmVector(world2local, ray_dir);
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.intersect(valid, ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
instance_id_stack::pop(user_context);
}
}
|
pushq %r14
pushq %rbx
subq $0x278, %rsp # imm = 0x278
movq %rdx, %rbx
movq (%rcx), %rax
movl (%r8), %edx
movl 0x4(%r8), %r9d
movq 0x1e8(%rax), %rax
movq (%rax,%r9,8), %rsi
movq 0x58(%rsi), %rax
testq %rax, %rax
jne 0x1c8f0c
movq 0x90(%rsi), %rax
movq 0xa0(%rsi), %r10
imulq %rdx, %r10
movl (%rax,%r10), %eax
movl $0xffffffff, %r10d # imm = 0xFFFFFFFF
cmpq %r10, %rax
je 0x1c8f0a
movq 0x60(%rsi), %r10
movq (%r10,%rax,8), %rax
jmp 0x1c8f0c
xorl %eax, %eax
testq %rax, %rax
je 0x1c98c6
movd 0x34(%rsi), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pand 0x90(%rbx), %xmm0
pxor %xmm4, %xmm4
pcmpeqd %xmm0, %xmm4
pandn (%rdi), %xmm4
movmskps %xmm4, %edi
testl %edi, %edi
je 0x1c98c6
movq 0x8(%rcx), %r14
cmpl $-0x1, (%r14)
jne 0x1c98c6
movl %r9d, (%r14)
movl %edx, 0x4(%r14)
movl (%r8), %edx
movzbl 0x3d(%rsi), %r8d
shll $0x8, %r8d
cmpl $0x100, %r8d # imm = 0x100
movaps %xmm4, 0x250(%rsp)
je 0x1c98d1
movss 0x28(%rsi), %xmm0
movss 0x2c(%rsi), %xmm1
movss 0x30(%rsi), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%rbx), %xmm12
subps %xmm1, %xmm12
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm12
mulps %xmm3, %xmm12
roundps $0x1, %xmm12, %xmm1
addss 0x1d27a16(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
xorps %xmm0, %xmm0
maxps %xmm0, %xmm1
subps %xmm1, %xmm12
cvtps2dq %xmm1, %xmm11
movapd %xmm11, 0x230(%rsp)
movzbl %dil, %r8d
bsfq %r8, %r8
movslq 0x230(%rsp,%r8,4), %r8
movd %r8d, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pandn %xmm4, %xmm0
movmskps %xmm0, %r9d
testl %r9d, %r9d
jne 0x1c99db
movq 0x88(%rsi), %rdi
imulq $0x38, %r8, %r8
leaq (%rdi,%r8), %rsi
movl 0x20(%rdi,%r8), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1c91e5
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1c9071
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1c90de
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1c9253
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movaps (%r9,%r10), %xmm4
movaps 0x10(%r9,%r10), %xmm11
movaps 0x20(%r9,%r10), %xmm10
movaps 0x30(%r9,%r10), %xmm8
jmp 0x1c9253
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movsd 0x4(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm4
shufps $0x4c, %xmm1, %xmm4 # xmm4 = xmm4[0,3],xmm1[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm1
movss 0xc(%r9,%r10), %xmm11
shufps $0x4c, %xmm1, %xmm11 # xmm11 = xmm11[0,3],xmm1[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm1
movss 0x18(%r9,%r10), %xmm10
shufps $0x4c, %xmm1, %xmm10 # xmm10 = xmm10[0,3],xmm1[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm1
movss 0x24(%r9,%r10), %xmm8
shufps $0x4c, %xmm1, %xmm8 # xmm8 = xmm8[0,3],xmm1[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
jmp 0x1c9253
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movsd 0x10(%r9,%r10), %xmm10
insertps $0x20, 0x8(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],xmm10[3]
movsd 0x34(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm4
movss 0xc(%r9,%r10), %xmm11
movlhps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm4 # xmm4 = xmm4[0,2],xmm1[1,3]
movss 0x18(%r9,%r10), %xmm8
movsd 0x1c(%r9,%r10), %xmm1
movlhps %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm8 # xmm8 = xmm8[0,2],xmm1[1,3]
movss 0x24(%r9,%r10), %xmm5
movss 0x28(%r9,%r10), %xmm3
movss 0x2c(%r9,%r10), %xmm1
movss 0x30(%r9,%r10), %xmm2
movaps %xmm3, %xmm6
mulss %xmm3, %xmm6
movaps %xmm5, %xmm9
mulss %xmm5, %xmm9
addss %xmm6, %xmm9
movaps %xmm1, %xmm6
mulss %xmm1, %xmm6
addss %xmm9, %xmm6
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm6, %xmm9
movaps %xmm9, %xmm6
rsqrtss %xmm9, %xmm6
movss 0x1d23591(%rip), %xmm0 # 0x1eec718
mulss %xmm6, %xmm0
mulss 0x1d23588(%rip), %xmm9 # 0x1eec71c
mulss %xmm6, %xmm9
mulss %xmm6, %xmm6
mulss %xmm9, %xmm6
addss %xmm0, %xmm6
mulss %xmm6, %xmm5
insertps $0x30, %xmm5, %xmm8 # xmm8 = xmm8[0,1,2],xmm5[0]
mulss %xmm6, %xmm3
insertps $0x30, %xmm3, %xmm4 # xmm4 = xmm4[0,1,2],xmm3[0]
mulss %xmm6, %xmm1
mulss %xmm2, %xmm6
insertps $0x10, 0x4(%r9,%r10), %xmm11 # xmm11 = xmm11[0],mem[0],xmm11[2,3]
insertps $0x30, %xmm6, %xmm10 # xmm10 = xmm10[0,1,2],xmm6[0]
insertps $0x20, 0x3c(%r9,%r10), %xmm11 # xmm11 = xmm11[0,1],mem[0],xmm11[3]
insertps $0x30, %xmm1, %xmm11 # xmm11 = xmm11[0,1,2],xmm1[0]
jmp 0x1c9253
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movss (%r9,%r10), %xmm4
movss 0x4(%r9,%r10), %xmm11
movss 0x8(%r9,%r10), %xmm10
movss 0xc(%r9,%r10), %xmm8
insertps $0x1c, 0x10(%r9,%r10), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
movaps %xmm4, %xmm13
shufps $0x55, %xmm4, %xmm13 # xmm13 = xmm13[1,1],xmm4[1,1]
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
movaps %xmm11, %xmm3
shufps $0x0, %xmm11, %xmm3 # xmm3 = xmm3[0,0],xmm11[0,0]
movaps %xmm11, %xmm1
shufps $0x55, %xmm11, %xmm1 # xmm1 = xmm1[1,1],xmm11[1,1]
movaps %xmm1, 0x60(%rsp)
shufps $0xaa, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
movaps %xmm10, %xmm1
shufps $0x0, %xmm10, %xmm1 # xmm1 = xmm1[0,0],xmm10[0,0]
movaps %xmm1, 0x10(%rsp)
movaps %xmm10, %xmm14
shufps $0x55, %xmm10, %xmm14 # xmm14 = xmm14[1,1],xmm10[1,1]
shufps $0xaa, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
movaps %xmm8, %xmm1
shufps $0x0, %xmm8, %xmm1 # xmm1 = xmm1[0,0],xmm8[0,0]
movaps %xmm1, (%rsp)
movaps %xmm8, %xmm1
shufps $0x55, %xmm8, %xmm1 # xmm1 = xmm1[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
movl 0x58(%rdi,%r8), %edi
cmpl $0x9134, %edi # imm = 0x9134
movaps %xmm8, 0x50(%rsp)
movaps %xmm1, 0x40(%rsp)
movaps %xmm11, 0xc0(%rsp)
je 0x1c9497
cmpl $0x9234, %edi # imm = 0x9234
je 0x1c9323
cmpl $0xb001, %edi # imm = 0xB001
je 0x1c9389
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1c9500
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movaps (%rdi,%rdx), %xmm1
movaps 0x10(%rdi,%rdx), %xmm15
movaps 0x20(%rdi,%rdx), %xmm9
movaps 0x30(%rdi,%rdx), %xmm11
jmp 0x1c9500
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movsd 0x4(%rdi,%rdx), %xmm2
movss (%rdi,%rdx), %xmm1
shufps $0x4c, %xmm2, %xmm1 # xmm1 = xmm1[0,3],xmm2[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x10(%rdi,%rdx), %xmm2
movss 0xc(%rdi,%rdx), %xmm15
shufps $0x4c, %xmm2, %xmm15 # xmm15 = xmm15[0,3],xmm2[0,1]
shufps $0x78, %xmm15, %xmm15 # xmm15 = xmm15[0,2,3,1]
movsd 0x1c(%rdi,%rdx), %xmm2
movss 0x18(%rdi,%rdx), %xmm9
shufps $0x4c, %xmm2, %xmm9 # xmm9 = xmm9[0,3],xmm2[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x28(%rdi,%rdx), %xmm2
movss 0x24(%rdi,%rdx), %xmm11
shufps $0x4c, %xmm2, %xmm11 # xmm11 = xmm11[0,3],xmm2[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
jmp 0x1c9500
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movsd 0x10(%rdi,%rdx), %xmm9
insertps $0x20, 0x8(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
movsd 0x34(%rdi,%rdx), %xmm2
movss (%rdi,%rdx), %xmm1
movss 0xc(%rdi,%rdx), %xmm15
movlhps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[1,3]
movss 0x18(%rdi,%rdx), %xmm11
movsd 0x1c(%rdi,%rdx), %xmm2
movlhps %xmm2, %xmm11 # xmm11 = xmm11[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm11 # xmm11 = xmm11[0,2],xmm2[1,3]
movss 0x24(%rdi,%rdx), %xmm0
movss 0x28(%rdi,%rdx), %xmm5
movss 0x2c(%rdi,%rdx), %xmm8
movss 0x30(%rdi,%rdx), %xmm6
movss %xmm6, 0xa0(%rsp)
movaps %xmm5, %xmm2
mulss %xmm5, %xmm2
movaps %xmm3, 0x20(%rsp)
movaps %xmm0, %xmm3
mulss %xmm0, %xmm3
addss %xmm2, %xmm3
movaps %xmm8, %xmm2
mulss %xmm8, %xmm2
addss %xmm3, %xmm2
movaps %xmm6, %xmm3
mulss %xmm6, %xmm3
addss %xmm2, %xmm3
movaps %xmm3, %xmm2
rsqrtss %xmm3, %xmm2
movss 0x1d232e7(%rip), %xmm6 # 0x1eec718
mulss %xmm2, %xmm6
mulss 0x1d232df(%rip), %xmm3 # 0x1eec71c
mulss %xmm2, %xmm3
mulss %xmm2, %xmm2
mulss %xmm3, %xmm2
movaps 0x20(%rsp), %xmm3
addss %xmm6, %xmm2
mulss %xmm2, %xmm0
insertps $0x30, %xmm0, %xmm11 # xmm11 = xmm11[0,1,2],xmm0[0]
mulss %xmm2, %xmm5
insertps $0x30, %xmm5, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0]
mulss %xmm2, %xmm8
mulss 0xa0(%rsp), %xmm2
insertps $0x10, 0x4(%rdi,%rdx), %xmm15 # xmm15 = xmm15[0],mem[0],xmm15[2,3]
insertps $0x30, %xmm2, %xmm9 # xmm9 = xmm9[0,1,2],xmm2[0]
insertps $0x20, 0x3c(%rdi,%rdx), %xmm15 # xmm15 = xmm15[0,1],mem[0],xmm15[3]
insertps $0x30, %xmm8, %xmm15 # xmm15 = xmm15[0,1,2],xmm8[0]
jmp 0x1c9500
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movss (%rdi,%rdx), %xmm1
movss 0x4(%rdi,%rdx), %xmm15
movss 0x8(%rdi,%rdx), %xmm9
movss 0xc(%rdi,%rdx), %xmm11
insertps $0x1c, 0x10(%rdi,%rdx), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rdx), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rdx), %xmm15 # xmm15 = xmm15[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rdx), %xmm15 # xmm15 = xmm15[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rdx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rdx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
movaps %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm2 # xmm2 = xmm2[0,0],xmm1[0,0]
movaps 0x1d23502(%rip), %xmm6 # 0x1eeca10
subps %xmm12, %xmm6
mulps %xmm12, %xmm2
mulps %xmm6, %xmm7
addps %xmm2, %xmm7
movaps %xmm1, %xmm2
shufps $0x55, %xmm1, %xmm2 # xmm2 = xmm2[1,1],xmm1[1,1]
mulps %xmm12, %xmm2
mulps %xmm6, %xmm13
addps %xmm2, %xmm13
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm4
addps %xmm1, %xmm4
movaps %xmm15, %xmm1
shufps $0x0, %xmm15, %xmm1 # xmm1 = xmm1[0,0],xmm15[0,0]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm3
addps %xmm1, %xmm3
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm1 # xmm1 = xmm1[1,1],xmm15[1,1]
mulps %xmm12, %xmm1
movaps %xmm3, %xmm8
movaps 0x60(%rsp), %xmm3
mulps %xmm6, %xmm3
addps %xmm1, %xmm3
shufps $0xaa, %xmm15, %xmm15 # xmm15 = xmm15[2,2,2,2]
mulps %xmm12, %xmm15
movaps %xmm4, %xmm0
movaps 0xc0(%rsp), %xmm4
mulps %xmm6, %xmm4
addps %xmm15, %xmm4
movaps %xmm9, %xmm1
shufps $0x0, %xmm9, %xmm1 # xmm1 = xmm1[0,0],xmm9[0,0]
mulps %xmm12, %xmm1
movaps 0x10(%rsp), %xmm5
mulps %xmm6, %xmm5
addps %xmm1, %xmm5
movaps %xmm9, %xmm1
shufps $0x55, %xmm9, %xmm1 # xmm1 = xmm1[1,1],xmm9[1,1]
shufps $0xaa, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm14
addps %xmm1, %xmm14
movaps %xmm11, %xmm1
shufps $0x0, %xmm11, %xmm1 # xmm1 = xmm1[0,0],xmm11[0,0]
mulps %xmm12, %xmm9
mulps %xmm6, %xmm10
addps %xmm9, %xmm10
movaps %xmm11, %xmm2
shufps $0x55, %xmm11, %xmm2 # xmm2 = xmm2[1,1],xmm11[1,1]
shufps $0xaa, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm12, %xmm2
mulps %xmm12, %xmm11
movaps (%rsp), %xmm9
mulps %xmm6, %xmm9
addps %xmm1, %xmm9
movaps %xmm9, (%rsp)
movaps 0x40(%rsp), %xmm9
mulps %xmm6, %xmm9
addps %xmm2, %xmm9
mulps 0x50(%rsp), %xmm6
addps %xmm11, %xmm6
movaps %xmm4, %xmm1
mulps %xmm14, %xmm1
movaps %xmm3, %xmm2
mulps %xmm10, %xmm2
subps %xmm1, %xmm2
movaps %xmm8, %xmm12
movaps %xmm8, 0x20(%rsp)
movaps %xmm8, %xmm1
mulps %xmm10, %xmm1
movaps %xmm4, %xmm8
mulps %xmm5, %xmm8
subps %xmm1, %xmm8
movaps %xmm3, %xmm1
mulps %xmm5, %xmm1
mulps %xmm14, %xmm12
subps %xmm1, %xmm12
movaps %xmm13, %xmm1
mulps %xmm10, %xmm1
movaps %xmm0, %xmm11
mulps %xmm14, %xmm11
subps %xmm1, %xmm11
movaps %xmm0, %xmm1
mulps %xmm5, %xmm1
mulps %xmm7, %xmm10
subps %xmm1, %xmm10
mulps %xmm7, %xmm14
mulps %xmm13, %xmm5
subps %xmm14, %xmm5
movaps %xmm0, %xmm1
mulps %xmm3, %xmm1
movaps %xmm13, %xmm14
mulps %xmm4, %xmm14
subps %xmm1, %xmm14
mulps %xmm7, %xmm4
movaps %xmm0, %xmm15
movaps 0x20(%rsp), %xmm1
mulps %xmm1, %xmm15
subps %xmm4, %xmm15
mulps %xmm13, %xmm1
mulps %xmm7, %xmm3
subps %xmm1, %xmm3
mulps %xmm12, %xmm0
mulps %xmm8, %xmm13
addps %xmm0, %xmm13
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
movaps %xmm2, %xmm13
divps %xmm7, %xmm13
divps %xmm7, %xmm11
divps %xmm7, %xmm14
divps %xmm7, %xmm8
divps %xmm7, %xmm10
divps %xmm7, %xmm15
divps %xmm7, %xmm12
divps %xmm7, %xmm5
divps %xmm7, %xmm3
movaps %xmm3, %xmm4
movaps %xmm6, %xmm0
mulps %xmm12, %xmm0
movaps %xmm6, %xmm1
movaps %xmm5, 0x10(%rsp)
mulps %xmm5, %xmm1
movaps %xmm9, %xmm2
mulps %xmm8, %xmm2
addps %xmm0, %xmm2
movaps (%rsp), %xmm7
movaps %xmm9, %xmm3
movaps %xmm10, 0x20(%rsp)
mulps %xmm10, %xmm3
addps %xmm1, %xmm3
mulps %xmm4, %xmm6
mulps %xmm15, %xmm9
addps %xmm6, %xmm9
movaps %xmm7, %xmm0
mulps %xmm13, %xmm0
addps %xmm2, %xmm0
movaps %xmm7, %xmm1
mulps %xmm11, %xmm1
addps %xmm3, %xmm1
movaps %xmm7, %xmm10
mulps %xmm14, %xmm10
addps %xmm9, %xmm10
movaps %xmm11, %xmm9
movaps 0x1d22f80(%rip), %xmm2 # 0x1eec6d0
xorps %xmm2, %xmm0
xorps %xmm2, %xmm1
xorps %xmm2, %xmm10
movaps 0x250(%rsp), %xmm6
movaps 0x10(%rbx), %xmm5
movaps %xmm5, 0x60(%rsp)
movaps 0x20(%rbx), %xmm3
movaps %xmm3, (%rsp)
movaps %xmm12, %xmm2
mulps %xmm3, %xmm2
addps %xmm0, %xmm2
movaps 0x10(%rsp), %xmm11
movaps %xmm11, %xmm0
mulps %xmm3, %xmm0
addps %xmm1, %xmm0
movaps %xmm4, %xmm1
mulps %xmm3, %xmm1
addps %xmm10, %xmm1
movaps %xmm8, %xmm3
mulps %xmm5, %xmm3
addps %xmm2, %xmm3
movaps 0x20(%rsp), %xmm7
movaps %xmm7, %xmm2
mulps %xmm5, %xmm2
addps %xmm0, %xmm2
movaps %xmm15, %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
movaps (%rbx), %xmm5
movaps %xmm5, 0x50(%rsp)
movaps %xmm13, %xmm1
mulps %xmm5, %xmm1
addps %xmm3, %xmm1
movaps %xmm9, %xmm3
mulps %xmm5, %xmm3
addps %xmm2, %xmm3
movaps %xmm14, %xmm2
mulps %xmm5, %xmm2
addps %xmm0, %xmm2
movaps 0x40(%rbx), %xmm0
movaps 0x50(%rbx), %xmm5
movaps 0x60(%rbx), %xmm10
movaps %xmm1, (%rbx)
movaps %xmm3, 0x10(%rbx)
movaps %xmm2, 0x20(%rbx)
movaps %xmm10, 0x40(%rsp)
mulps %xmm10, %xmm12
movaps %xmm5, 0xc0(%rsp)
mulps %xmm5, %xmm8
addps %xmm12, %xmm8
mulps %xmm10, %xmm11
mulps %xmm5, %xmm7
addps %xmm11, %xmm7
mulps %xmm10, %xmm4
mulps %xmm5, %xmm15
addps %xmm4, %xmm15
movaps %xmm0, 0xa0(%rsp)
mulps %xmm0, %xmm13
addps %xmm8, %xmm13
mulps %xmm0, %xmm9
addps %xmm7, %xmm9
mulps %xmm0, %xmm14
addps %xmm15, %xmm14
movaps %xmm13, 0x40(%rbx)
movaps %xmm9, 0x50(%rbx)
movaps %xmm14, 0x60(%rbx)
movq 0x10(%rcx), %rdx
leaq 0x260(%rsp), %rdi
movaps %xmm6, (%rdi)
leaq 0x230(%rsp), %rcx
movq %rax, (%rcx)
movq %r14, 0x8(%rcx)
movq %rdx, 0x10(%rcx)
leaq 0x58(%rax), %rsi
movq %rbx, %rdx
callq *0x98(%rax)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
movaps (%rsp), %xmm0
movaps %xmm0, 0x20(%rbx)
movaps 0xa0(%rsp), %xmm0
movaps %xmm0, 0x40(%rbx)
movaps 0xc0(%rsp), %xmm0
movaps %xmm0, 0x50(%rbx)
movdqa 0x40(%rsp), %xmm0
movdqa %xmm0, 0x60(%rbx)
orq $-0x1, (%r14)
addq $0x278, %rsp # imm = 0x278
popq %rbx
popq %r14
retq
movss 0x28(%rsi), %xmm0
movss 0x2c(%rsi), %xmm1
movss 0x30(%rsi), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%rbx), %xmm12
subps %xmm1, %xmm12
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm12
mulps %xmm3, %xmm12
roundps $0x1, %xmm12, %xmm1
addss 0x1d270b9(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
xorps %xmm0, %xmm0
maxps %xmm0, %xmm1
subps %xmm1, %xmm12
cvtps2dq %xmm1, %xmm11
movapd %xmm11, 0x230(%rsp)
movzbl %dil, %r8d
bsfq %r8, %r8
movslq 0x230(%rsp,%r8,4), %r8
movd %r8d, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pandn %xmm4, %xmm0
movmskps %xmm0, %r9d
testl %r9d, %r9d
movaps %xmm12, 0x1d0(%rsp)
jne 0x1cb404
movq 0x88(%rsi), %rdi
imulq $0x38, %r8, %r8
leaq (%rdi,%r8), %rsi
movl 0x20(%rdi,%r8), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1ca5cf
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1ca464
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1ca4d1
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1ca63d
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movaps (%r9,%r10), %xmm2
movaps 0x10(%r9,%r10), %xmm12
movaps 0x20(%r9,%r10), %xmm13
movaps 0x30(%r9,%r10), %xmm8
jmp 0x1ca63d
movaps %xmm12, 0x1a0(%rsp)
testl %edi, %edi
je 0x1ca1df
movq 0x88(%rsi), %rsi
movdqa %xmm4, %xmm14
movzbl %dil, %edi
andl $0xf, %edi
bsfq %rdi, %rdi
movslq %edi, %rdi
movslq 0x230(%rsp,%rdi,4), %rdi
movd %edi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pand %xmm14, %xmm0
imulq $0x38, %rdi, %r8
leaq (%rsi,%r8), %rdi
movl 0x20(%rsi,%r8), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
movdqa %xmm14, 0x50(%rsp)
movaps %xmm13, 0x20(%rsp)
je 0x1c9c4d
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1c9aa1
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1c9b10
cmpl $0x9244, %r9d # imm = 0x9244
movaps 0x10(%rsp), %xmm14
movaps 0x1c0(%rsp), %xmm13
jne 0x1c9cbb
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movaps (%r9,%r10), %xmm14
movaps 0x10(%r9,%r10), %xmm10
movaps 0x20(%r9,%r10), %xmm13
movaps 0x30(%r9,%r10), %xmm15
jmp 0x1c9cbb
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movsd 0x4(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm14
shufps $0x4c, %xmm1, %xmm14 # xmm14 = xmm14[0,3],xmm1[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm1
movss 0xc(%r9,%r10), %xmm10
shufps $0x4c, %xmm1, %xmm10 # xmm10 = xmm10[0,3],xmm1[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm1
movss 0x18(%r9,%r10), %xmm13
shufps $0x4c, %xmm1, %xmm13 # xmm13 = xmm13[0,3],xmm1[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm1
movss 0x24(%r9,%r10), %xmm15
shufps $0x4c, %xmm1, %xmm15 # xmm15 = xmm15[0,3],xmm1[0,1]
shufps $0x78, %xmm15, %xmm15 # xmm15 = xmm15[0,2,3,1]
jmp 0x1c9cbb
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movsd 0x10(%r9,%r10), %xmm13
insertps $0x20, 0x8(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],xmm13[3]
movsd 0x34(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm14
movss 0xc(%r9,%r10), %xmm10
movlhps %xmm1, %xmm14 # xmm14 = xmm14[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm14 # xmm14 = xmm14[0,2],xmm1[1,3]
movss 0x18(%r9,%r10), %xmm15
movsd 0x1c(%r9,%r10), %xmm1
movlhps %xmm1, %xmm15 # xmm15 = xmm15[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm15 # xmm15 = xmm15[0,2],xmm1[1,3]
movss 0x24(%r9,%r10), %xmm2
movaps %xmm9, 0x130(%rsp)
movaps %xmm6, 0x120(%rsp)
movss 0x28(%r9,%r10), %xmm6
movss 0x2c(%r9,%r10), %xmm1
movss 0x30(%r9,%r10), %xmm12
movaps %xmm3, (%rsp)
movaps %xmm6, %xmm3
mulss %xmm6, %xmm3
movaps %xmm5, 0x10(%rsp)
movaps %xmm4, %xmm5
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
addss %xmm3, %xmm4
movaps %xmm1, %xmm3
mulss %xmm1, %xmm3
addss %xmm4, %xmm3
movaps %xmm12, %xmm4
mulss %xmm12, %xmm4
addss %xmm3, %xmm4
movaps %xmm4, %xmm3
rsqrtss %xmm4, %xmm3
movaps %xmm3, %xmm9
mulss 0x1d22b42(%rip), %xmm9 # 0x1eec718
mulss 0x1d22b3e(%rip), %xmm4 # 0x1eec71c
mulss %xmm3, %xmm4
mulss %xmm3, %xmm3
mulss %xmm4, %xmm3
movaps %xmm5, %xmm4
movaps 0x10(%rsp), %xmm5
addss %xmm9, %xmm3
mulss %xmm3, %xmm2
insertps $0x30, %xmm2, %xmm15 # xmm15 = xmm15[0,1,2],xmm2[0]
mulss %xmm3, %xmm6
insertps $0x30, %xmm6, %xmm14 # xmm14 = xmm14[0,1,2],xmm6[0]
movaps 0x120(%rsp), %xmm6
movaps 0x130(%rsp), %xmm9
mulss %xmm3, %xmm1
mulss %xmm12, %xmm3
insertps $0x10, 0x4(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],xmm10[2,3]
insertps $0x30, %xmm3, %xmm13 # xmm13 = xmm13[0,1,2],xmm3[0]
movaps (%rsp), %xmm3
insertps $0x20, 0x3c(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],xmm10[3]
insertps $0x30, %xmm1, %xmm10 # xmm10 = xmm10[0,1,2],xmm1[0]
jmp 0x1c9cbb
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movss (%r9,%r10), %xmm14
movss 0x4(%r9,%r10), %xmm10
movss 0x8(%r9,%r10), %xmm13
movss 0xc(%r9,%r10), %xmm15
insertps $0x1c, 0x10(%r9,%r10), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm15 # xmm15 = xmm15[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm15 # xmm15 = xmm15[0,1],mem[0],zero
movaps %xmm14, %xmm1
shufps $0x0, %xmm14, %xmm1 # xmm1 = xmm1[0,0],xmm14[0,0]
movaps 0xc0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xc0(%rsp)
movaps %xmm14, %xmm1
shufps $0x55, %xmm14, %xmm1 # xmm1 = xmm1[1,1],xmm14[1,1]
movaps 0xd0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xd0(%rsp)
movaps %xmm14, 0x10(%rsp)
shufps $0xaa, %xmm14, %xmm14 # xmm14 = xmm14[2,2,2,2]
movaps 0xa0(%rsp), %xmm2
blendvps %xmm0, %xmm14, %xmm2
movaps %xmm2, 0xa0(%rsp)
movaps %xmm10, %xmm1
shufps $0x0, %xmm10, %xmm1 # xmm1 = xmm1[0,0],xmm10[0,0]
movaps 0xf0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xf0(%rsp)
movaps %xmm10, %xmm1
shufps $0x55, %xmm10, %xmm1 # xmm1 = xmm1[1,1],xmm10[1,1]
movaps 0x60(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x60(%rsp)
movaps %xmm10, %xmm1
shufps $0xaa, %xmm10, %xmm1 # xmm1 = xmm1[2,2],xmm10[2,2]
movaps 0xe0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xe0(%rsp)
movaps %xmm13, %xmm1
shufps $0x0, %xmm13, %xmm1 # xmm1 = xmm1[0,0],xmm13[0,0]
blendvps %xmm0, %xmm1, %xmm5
movaps %xmm13, %xmm1
shufps $0x55, %xmm13, %xmm1 # xmm1 = xmm1[1,1],xmm13[1,1]
blendvps %xmm0, %xmm1, %xmm6
movaps %xmm13, 0x1c0(%rsp)
shufps $0xaa, %xmm13, %xmm13 # xmm13 = xmm13[2,2,2,2]
blendvps %xmm0, %xmm13, %xmm4
movaps %xmm15, %xmm1
shufps $0x0, %xmm15, %xmm1 # xmm1 = xmm1[0,0],xmm15[0,0]
blendvps %xmm0, %xmm1, %xmm3
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm1 # xmm1 = xmm1[1,1],xmm15[1,1]
movaps 0x40(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x40(%rsp)
movaps %xmm15, %xmm1
shufps $0xaa, %xmm15, %xmm1 # xmm1 = xmm1[2,2],xmm15[2,2]
movaps 0xb0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xb0(%rsp)
movdqa 0x50(%rsp), %xmm14
pxor %xmm0, %xmm14
movl 0x58(%rsi,%r8), %r8d
cmpl $0x9134, %r8d # imm = 0x9134
movdqa %xmm14, 0x50(%rsp)
je 0x1ca00f
cmpl $0x9234, %r8d # imm = 0x9234
je 0x1c9e5f
cmpl $0xb001, %r8d # imm = 0xB001
je 0x1c9ecd
cmpl $0x9244, %r8d # imm = 0x9244
movaps 0x20(%rsp), %xmm13
movaps 0x1b0(%rsp), %xmm12
jne 0x1ca07e
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movaps (%r8,%rdi), %xmm13
movaps 0x10(%r8,%rdi), %xmm8
movaps 0x20(%r8,%rdi), %xmm12
movaps 0x30(%r8,%rdi), %xmm7
jmp 0x1ca07e
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movsd 0x4(%r8,%rdi), %xmm1
movss (%r8,%rdi), %xmm13
shufps $0x4c, %xmm1, %xmm13 # xmm13 = xmm13[0,3],xmm1[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x10(%r8,%rdi), %xmm1
movss 0xc(%r8,%rdi), %xmm8
shufps $0x4c, %xmm1, %xmm8 # xmm8 = xmm8[0,3],xmm1[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x1c(%r8,%rdi), %xmm1
movss 0x18(%r8,%rdi), %xmm12
shufps $0x4c, %xmm1, %xmm12 # xmm12 = xmm12[0,3],xmm1[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x28(%r8,%rdi), %xmm1
movss 0x24(%r8,%rdi), %xmm7
shufps $0x4c, %xmm1, %xmm7 # xmm7 = xmm7[0,3],xmm1[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
jmp 0x1ca07e
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movsd 0x10(%r8,%rdi), %xmm12
insertps $0x20, 0x8(%r8,%rdi), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
movsd 0x34(%r8,%rdi), %xmm1
movss (%r8,%rdi), %xmm13
movss 0xc(%r8,%rdi), %xmm8
movlhps %xmm1, %xmm13 # xmm13 = xmm13[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm13 # xmm13 = xmm13[0,2],xmm1[1,3]
movss 0x18(%r8,%rdi), %xmm7
movsd 0x1c(%r8,%rdi), %xmm1
movlhps %xmm1, %xmm7 # xmm7 = xmm7[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm7 # xmm7 = xmm7[0,2],xmm1[1,3]
movss 0x24(%r8,%rdi), %xmm2
movaps %xmm9, 0x130(%rsp)
movaps %xmm6, 0x120(%rsp)
movss 0x28(%r8,%rdi), %xmm6
movss 0x2c(%r8,%rdi), %xmm1
movaps %xmm3, (%rsp)
movss 0x30(%r8,%rdi), %xmm14
movaps %xmm4, 0x20(%rsp)
movaps %xmm6, %xmm4
mulss %xmm6, %xmm4
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm4, %xmm9
movaps %xmm1, %xmm4
mulss %xmm1, %xmm4
addss %xmm9, %xmm4
movaps %xmm14, %xmm9
mulss %xmm14, %xmm9
addss %xmm4, %xmm9
movaps %xmm9, %xmm4
rsqrtss %xmm9, %xmm4
movaps %xmm5, %xmm3
movaps %xmm4, %xmm5
mulss 0x1d22781(%rip), %xmm5 # 0x1eec718
mulss 0x1d2277c(%rip), %xmm9 # 0x1eec71c
mulss %xmm4, %xmm9
mulss %xmm4, %xmm4
mulss %xmm9, %xmm4
addss %xmm5, %xmm4
movaps %xmm3, %xmm5
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
mulss %xmm4, %xmm6
insertps $0x30, %xmm6, %xmm13 # xmm13 = xmm13[0,1,2],xmm6[0]
movaps 0x120(%rsp), %xmm6
movaps 0x130(%rsp), %xmm9
mulss %xmm4, %xmm1
mulss %xmm14, %xmm4
movaps (%rsp), %xmm3
insertps $0x10, 0x4(%r8,%rdi), %xmm8 # xmm8 = xmm8[0],mem[0],xmm8[2,3]
insertps $0x30, %xmm4, %xmm12 # xmm12 = xmm12[0,1,2],xmm4[0]
movaps 0x20(%rsp), %xmm4
insertps $0x20, 0x3c(%r8,%rdi), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
insertps $0x30, %xmm1, %xmm8 # xmm8 = xmm8[0,1,2],xmm1[0]
jmp 0x1ca07e
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movss (%r8,%rdi), %xmm13
movss 0x4(%r8,%rdi), %xmm8
movss 0x8(%r8,%rdi), %xmm12
movss 0xc(%r8,%rdi), %xmm7
insertps $0x1c, 0x10(%r8,%rdi), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x20(%r8,%rdi), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x14(%r8,%rdi), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x24(%r8,%rdi), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x18(%r8,%rdi), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x28(%r8,%rdi), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r8,%rdi), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r8,%rdi), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
movaps %xmm13, %xmm1
shufps $0x0, %xmm13, %xmm1 # xmm1 = xmm1[0,0],xmm13[0,0]
movaps 0x150(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x150(%rsp)
movaps %xmm13, %xmm1
shufps $0x55, %xmm13, %xmm1 # xmm1 = xmm1[1,1],xmm13[1,1]
movaps 0x160(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x160(%rsp)
movaps %xmm13, %xmm1
shufps $0xaa, %xmm13, %xmm1 # xmm1 = xmm1[2,2],xmm13[2,2]
movaps 0x170(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x170(%rsp)
movaps %xmm8, %xmm1
shufps $0x0, %xmm8, %xmm1 # xmm1 = xmm1[0,0],xmm8[0,0]
movaps 0x140(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x140(%rsp)
movaps %xmm8, %xmm1
shufps $0x55, %xmm8, %xmm1 # xmm1 = xmm1[1,1],xmm8[1,1]
movaps 0x100(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x100(%rsp)
movaps %xmm8, %xmm1
shufps $0xaa, %xmm8, %xmm1 # xmm1 = xmm1[2,2],xmm8[2,2]
movaps 0x110(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x110(%rsp)
movaps %xmm12, %xmm1
shufps $0x0, %xmm12, %xmm1 # xmm1 = xmm1[0,0],xmm12[0,0]
movaps 0x1d0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x1d0(%rsp)
movaps %xmm12, %xmm1
shufps $0x55, %xmm12, %xmm1 # xmm1 = xmm1[1,1],xmm12[1,1]
movaps 0x90(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x90(%rsp)
movaps %xmm12, 0x1b0(%rsp)
shufps $0xaa, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
movaps 0x70(%rsp), %xmm2
blendvps %xmm0, %xmm12, %xmm2
movaps %xmm2, 0x70(%rsp)
movaps %xmm7, %xmm1
shufps $0x0, %xmm7, %xmm1 # xmm1 = xmm1[0,0],xmm7[0,0]
movaps 0x80(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x80(%rsp)
movaps %xmm7, %xmm1
shufps $0x55, %xmm7, %xmm1 # xmm1 = xmm1[1,1],xmm7[1,1]
movaps 0x30(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x30(%rsp)
movaps %xmm7, %xmm1
shufps $0xaa, %xmm7, %xmm1 # xmm1 = xmm1[2,2],xmm7[2,2]
blendvps %xmm0, %xmm1, %xmm9
movaps 0x50(%rsp), %xmm14
movmskps %xmm14, %edi
testl %edi, %edi
jne 0x1c99f8
jmp 0x1ca1df
movaps 0x1d2282a(%rip), %xmm0 # 0x1eeca10
movaps 0x1a0(%rsp), %xmm1
subps %xmm1, %xmm0
movaps 0x150(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xc0(%rsp), %xmm2
mulps %xmm0, %xmm2
addps %xmm7, %xmm2
movaps 0x160(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps %xmm6, %xmm13
movaps 0xd0(%rsp), %xmm6
mulps %xmm0, %xmm6
addps %xmm7, %xmm6
movaps 0x170(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xa0(%rsp), %xmm10
mulps %xmm0, %xmm10
addps %xmm7, %xmm10
movaps 0x140(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xf0(%rsp), %xmm11
mulps %xmm0, %xmm11
addps %xmm7, %xmm11
movaps 0x100(%rsp), %xmm8
mulps %xmm1, %xmm8
movaps %xmm9, %xmm7
movaps %xmm4, %xmm12
movaps 0x60(%rsp), %xmm4
mulps %xmm0, %xmm4
addps %xmm8, %xmm4
movaps 0x110(%rsp), %xmm9
mulps %xmm1, %xmm9
movaps %xmm3, %xmm8
movaps 0xe0(%rsp), %xmm3
mulps %xmm0, %xmm3
addps %xmm9, %xmm3
movaps 0x1d0(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm5
addps %xmm9, %xmm5
movaps 0x90(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm13
addps %xmm9, %xmm13
movaps 0x70(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm12
addps %xmm9, %xmm12
movaps 0x80(%rsp), %xmm15
mulps %xmm1, %xmm15
movaps 0x30(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm1, %xmm7
mulps %xmm0, %xmm8
addps %xmm15, %xmm8
movaps %xmm8, (%rsp)
movaps 0x40(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm9, %xmm1
movaps %xmm1, 0x40(%rsp)
mulps 0xb0(%rsp), %xmm0
addps %xmm7, %xmm0
movaps %xmm3, %xmm1
mulps %xmm13, %xmm1
movaps %xmm12, %xmm7
movaps %xmm5, %xmm12
movaps %xmm4, %xmm5
mulps %xmm7, %xmm5
subps %xmm1, %xmm5
movaps %xmm11, %xmm1
mulps %xmm7, %xmm1
movaps %xmm3, %xmm8
mulps %xmm12, %xmm8
subps %xmm1, %xmm8
movaps %xmm4, %xmm1
mulps %xmm12, %xmm1
movaps %xmm11, %xmm9
mulps %xmm13, %xmm9
subps %xmm1, %xmm9
movaps %xmm9, 0x10(%rsp)
movaps %xmm6, %xmm1
mulps %xmm7, %xmm1
movaps %xmm10, %xmm9
mulps %xmm13, %xmm9
subps %xmm1, %xmm9
movaps %xmm10, %xmm1
mulps %xmm12, %xmm1
mulps %xmm2, %xmm7
subps %xmm1, %xmm7
mulps %xmm2, %xmm13
mulps %xmm6, %xmm12
subps %xmm13, %xmm12
movaps %xmm10, %xmm1
mulps %xmm4, %xmm1
movaps %xmm6, %xmm14
mulps %xmm3, %xmm14
subps %xmm1, %xmm14
mulps %xmm2, %xmm3
movaps %xmm10, %xmm15
mulps %xmm11, %xmm15
subps %xmm3, %xmm15
movaps %xmm5, %xmm13
mulps %xmm6, %xmm11
mulps %xmm2, %xmm4
subps %xmm11, %xmm4
movaps 0x10(%rsp), %xmm3
mulps %xmm3, %xmm10
mulps %xmm8, %xmm6
addps %xmm10, %xmm6
mulps %xmm5, %xmm2
addps %xmm6, %xmm2
divps %xmm2, %xmm13
divps %xmm2, %xmm9
divps %xmm2, %xmm14
divps %xmm2, %xmm8
divps %xmm2, %xmm7
divps %xmm2, %xmm15
divps %xmm2, %xmm3
divps %xmm2, %xmm12
divps %xmm2, %xmm4
movaps %xmm0, %xmm1
mulps %xmm3, %xmm1
movaps %xmm3, %xmm5
movaps %xmm0, %xmm2
movaps %xmm12, 0x10(%rsp)
mulps %xmm12, %xmm2
movaps (%rsp), %xmm10
movaps 0x40(%rsp), %xmm11
movaps %xmm11, %xmm3
mulps %xmm8, %xmm3
addps %xmm1, %xmm3
movaps %xmm11, %xmm6
movaps %xmm7, 0x20(%rsp)
mulps %xmm7, %xmm6
addps %xmm2, %xmm6
mulps %xmm4, %xmm0
mulps %xmm15, %xmm11
addps %xmm0, %xmm11
movaps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps %xmm10, %xmm1
mulps %xmm9, %xmm1
addps %xmm6, %xmm1
mulps %xmm14, %xmm10
addps %xmm11, %xmm10
movaps %xmm5, %xmm12
jmp 0x1c9749
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movsd 0x4(%r9,%r10), %xmm0
movss (%r9,%r10), %xmm2
shufps $0x4c, %xmm0, %xmm2 # xmm2 = xmm2[0,3],xmm0[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm0
movss 0xc(%r9,%r10), %xmm12
shufps $0x4c, %xmm0, %xmm12 # xmm12 = xmm12[0,3],xmm0[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm0
movss 0x18(%r9,%r10), %xmm13
shufps $0x4c, %xmm0, %xmm13 # xmm13 = xmm13[0,3],xmm0[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm0
movss 0x24(%r9,%r10), %xmm8
shufps $0x4c, %xmm0, %xmm8 # xmm8 = xmm8[0,3],xmm0[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
jmp 0x1ca63d
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movsd 0x10(%r9,%r10), %xmm13
insertps $0x20, 0x8(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],xmm13[3]
movsd 0x34(%r9,%r10), %xmm0
movss (%r9,%r10), %xmm7
movss 0xc(%r9,%r10), %xmm12
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%r9,%r10), %xmm8
movsd 0x1c(%r9,%r10), %xmm0
movlhps %xmm0, %xmm8 # xmm8 = xmm8[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm8 # xmm8 = xmm8[0,2],xmm0[1,3]
movss 0x24(%r9,%r10), %xmm3
movss 0x28(%r9,%r10), %xmm2
movss 0x2c(%r9,%r10), %xmm0
movss 0x30(%r9,%r10), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d221a7(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d2219f(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm8 # xmm8 = xmm8[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
movaps %xmm7, %xmm2
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm4, %xmm13 # xmm13 = xmm13[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm0, %xmm12 # xmm12 = xmm12[0,1,2],xmm0[0]
jmp 0x1ca63d
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rdx, %r10
movss (%r9,%r10), %xmm2
movss 0x4(%r9,%r10), %xmm12
movss 0x8(%r9,%r10), %xmm13
movss 0xc(%r9,%r10), %xmm8
insertps $0x1c, 0x10(%r9,%r10), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
movaps %xmm2, %xmm0
movaps %xmm2, %xmm1
shufps $0x0, %xmm2, %xmm1 # xmm1 = xmm1[0,0],xmm2[0,0]
movaps %xmm2, %xmm5
shufps $0x55, %xmm2, %xmm5 # xmm5 = xmm5[1,1],xmm2[1,1]
movaps %xmm2, %xmm3
shufps $0xaa, %xmm2, %xmm3 # xmm3 = xmm3[2,2],xmm2[2,2]
shufps $0xff, %xmm2, %xmm0 # xmm0 = xmm0[3,3],xmm2[3,3]
movaps %xmm0, %xmm15
movaps %xmm12, %xmm2
shufps $0x0, %xmm12, %xmm2 # xmm2 = xmm2[0,0],xmm12[0,0]
movaps %xmm12, %xmm11
shufps $0x55, %xmm12, %xmm11 # xmm11 = xmm11[1,1],xmm12[1,1]
movaps %xmm12, %xmm4
shufps $0xaa, %xmm12, %xmm4 # xmm4 = xmm4[2,2],xmm12[2,2]
shufps $0xff, %xmm12, %xmm12 # xmm12 = xmm12[3,3,3,3]
movaps %xmm13, %xmm0
shufps $0x0, %xmm13, %xmm0 # xmm0 = xmm0[0,0],xmm13[0,0]
movaps %xmm13, %xmm9
shufps $0x55, %xmm13, %xmm9 # xmm9 = xmm9[1,1],xmm13[1,1]
movaps %xmm13, %xmm10
shufps $0xaa, %xmm13, %xmm10 # xmm10 = xmm10[2,2],xmm13[2,2]
shufps $0xff, %xmm13, %xmm13 # xmm13 = xmm13[3,3,3,3]
movaps %xmm8, %xmm6
shufps $0x0, %xmm8, %xmm6 # xmm6 = xmm6[0,0],xmm8[0,0]
movaps %xmm8, %xmm7
shufps $0x55, %xmm8, %xmm7 # xmm7 = xmm7[1,1],xmm8[1,1]
movaps %xmm8, %xmm14
shufps $0xaa, %xmm8, %xmm14 # xmm14 = xmm14[2,2],xmm8[2,2]
shufps $0xff, %xmm8, %xmm8 # xmm8 = xmm8[3,3,3,3]
movl 0x58(%rdi,%r8), %edi
cmpl $0x9134, %edi # imm = 0x9134
movaps %xmm0, 0x20(%rsp)
movaps %xmm1, 0xc0(%rsp)
movaps %xmm2, 0x10(%rsp)
movaps %xmm3, 0x60(%rsp)
movaps %xmm4, 0x50(%rsp)
movaps %xmm5, 0x40(%rsp)
movaps %xmm6, 0xa0(%rsp)
movaps %xmm7, 0xf0(%rsp)
movaps %xmm9, 0xe0(%rsp)
movaps %xmm10, 0xd0(%rsp)
movaps %xmm11, 0xb0(%rsp)
movaps %xmm14, 0x110(%rsp)
je 0x1ca8c0
cmpl $0x9234, %edi # imm = 0x9234
je 0x1ca767
cmpl $0xb001, %edi # imm = 0xB001
je 0x1ca7cd
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1ca929
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movaps (%rdi,%rdx), %xmm10
movaps 0x10(%rdi,%rdx), %xmm9
movaps 0x20(%rdi,%rdx), %xmm7
movaps 0x30(%rdi,%rdx), %xmm11
jmp 0x1ca929
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movsd 0x4(%rdi,%rdx), %xmm0
movss (%rdi,%rdx), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x10(%rdi,%rdx), %xmm0
movss 0xc(%rdi,%rdx), %xmm9
shufps $0x4c, %xmm0, %xmm9 # xmm9 = xmm9[0,3],xmm0[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x1c(%rdi,%rdx), %xmm0
movss 0x18(%rdi,%rdx), %xmm7
shufps $0x4c, %xmm0, %xmm7 # xmm7 = xmm7[0,3],xmm0[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x28(%rdi,%rdx), %xmm0
movss 0x24(%rdi,%rdx), %xmm11
shufps $0x4c, %xmm0, %xmm11 # xmm11 = xmm11[0,3],xmm0[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
jmp 0x1ca929
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movsd 0x10(%rdi,%rdx), %xmm7
insertps $0x20, 0x8(%rdi,%rdx), %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
movsd 0x34(%rdi,%rdx), %xmm0
movss (%rdi,%rdx), %xmm10
movss 0xc(%rdi,%rdx), %xmm9
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x18(%rdi,%rdx), %xmm11
movsd 0x1c(%rdi,%rdx), %xmm0
movlhps %xmm0, %xmm11 # xmm11 = xmm11[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm11 # xmm11 = xmm11[0,2],xmm0[1,3]
movss 0x24(%rdi,%rdx), %xmm3
movss 0x28(%rdi,%rdx), %xmm2
movss 0x2c(%rdi,%rdx), %xmm0
movss 0x30(%rdi,%rdx), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d21eb3(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d21eab(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm11 # xmm11 = xmm11[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm10 # xmm10 = xmm10[0,1,2],xmm2[0]
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0],mem[0],xmm9[2,3]
insertps $0x30, %xmm4, %xmm7 # xmm7 = xmm7[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
insertps $0x30, %xmm0, %xmm9 # xmm9 = xmm9[0,1,2],xmm0[0]
jmp 0x1ca929
movq 0x38(%rsi), %rdi
imulq 0x48(%rsi), %rdx
movss (%rdi,%rdx), %xmm10
movss 0x4(%rdi,%rdx), %xmm9
movss 0x8(%rdi,%rdx), %xmm7
movss 0xc(%rdi,%rdx), %xmm11
insertps $0x1c, 0x10(%rdi,%rdx), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rdx), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rdx), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rdx), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rdx), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rdx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rdx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
movaps %xmm10, 0x1a0(%rsp)
movaps %xmm10, 0x1b0(%rsp)
movaps %xmm10, 0x160(%rsp)
shufps $0xff, %xmm10, %xmm10 # xmm10 = xmm10[3,3,3,3]
movaps %xmm11, 0x100(%rsp)
movaps %xmm11, 0x140(%rsp)
movaps %xmm11, 0x170(%rsp)
shufps $0xff, %xmm11, %xmm11 # xmm11 = xmm11[3,3,3,3]
movaps %xmm8, %xmm0
mulps %xmm11, %xmm0
movaps %xmm15, %xmm1
movaps %xmm15, 0x80(%rsp)
mulps %xmm10, %xmm1
addps %xmm0, %xmm1
movaps %xmm9, 0x210(%rsp)
movaps %xmm9, 0x220(%rsp)
movaps %xmm9, 0x120(%rsp)
shufps $0xff, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3]
movaps %xmm12, 0x90(%rsp)
mulps %xmm9, %xmm12
addps %xmm1, %xmm12
movaps %xmm7, 0x130(%rsp)
movaps %xmm7, 0x150(%rsp)
movaps %xmm7, 0x1c0(%rsp)
shufps $0xff, %xmm7, %xmm7 # xmm7 = xmm7[3,3,3,3]
movaps %xmm13, %xmm3
mulps %xmm7, %xmm3
addps %xmm12, %xmm3
movaps 0x1d21cec(%rip), %xmm6 # 0x1eec6d0
movaps %xmm3, %xmm14
xorps %xmm6, %xmm14
movaps %xmm3, %xmm0
cmpltps %xmm14, %xmm0
movaps %xmm11, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm11
movaps %xmm10, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm10
movaps %xmm10, 0x200(%rsp)
movaps %xmm9, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm9
movaps %xmm7, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm7
movaps %xmm7, (%rsp)
maxps %xmm3, %xmm14
andps 0x1d21c82(%rip), %xmm3 # 0x1eec6c0
movaps 0x1d2718b(%rip), %xmm0 # 0x1ef1bd0
mulps %xmm3, %xmm0
addps 0x1d27191(%rip), %xmm0 # 0x1ef1be0
mulps %xmm3, %xmm0
addps 0x1d27197(%rip), %xmm0 # 0x1ef1bf0
mulps %xmm3, %xmm0
addps 0x1d2719d(%rip), %xmm0 # 0x1ef1c00
mulps %xmm3, %xmm0
addps 0x1d271a3(%rip), %xmm0 # 0x1ef1c10
mulps %xmm3, %xmm0
addps 0x1d271a9(%rip), %xmm0 # 0x1ef1c20
movaps 0x1d21f91(%rip), %xmm10 # 0x1eeca10
movaps %xmm10, %xmm2
subps %xmm3, %xmm2
sqrtps %xmm2, %xmm2
mulps %xmm0, %xmm2
movaps 0x1d2719d(%rip), %xmm4 # 0x1ef1c30
movaps %xmm4, %xmm0
subps %xmm2, %xmm0
xorps %xmm7, %xmm7
maxps %xmm0, %xmm7
xorps %xmm2, %xmm2
movaps %xmm14, %xmm0
cmpltps %xmm2, %xmm0
movaps %xmm7, %xmm5
xorps %xmm6, %xmm5
blendvps %xmm0, %xmm5, %xmm7
movaps %xmm4, %xmm5
subps %xmm7, %xmm5
movaps %xmm3, %xmm0
cmpnleps %xmm10, %xmm0
blendvps %xmm0, 0x1d27174(%rip), %xmm5 # 0x1ef1c40
movaps %xmm13, 0x70(%rsp)
movaps 0x1d0(%rsp), %xmm13
mulps %xmm13, %xmm5
movaps 0x1d2716a(%rip), %xmm0 # 0x1ef1c50
mulps %xmm5, %xmm0
roundps $0x1, %xmm0, %xmm3
cvtps2dq %xmm3, %xmm0
mulps %xmm4, %xmm3
subps %xmm3, %xmm5
movaps %xmm9, %xmm15
movaps %xmm9, 0x1e0(%rsp)
movapd 0x1d27151(%rip), %xmm9 # 0x1ef1c60
andpd %xmm0, %xmm9
movaps %xmm11, %xmm1
movaps %xmm11, 0x190(%rsp)
movaps %xmm5, %xmm11
mulps %xmm5, %xmm11
movaps 0x1d2715f(%rip), %xmm12 # 0x1ef1c90
mulps %xmm11, %xmm12
addps 0x1d27163(%rip), %xmm12 # 0x1ef1ca0
movaps 0x1d2716c(%rip), %xmm7 # 0x1ef1cb0
mulps %xmm11, %xmm7
addps 0x1d27171(%rip), %xmm7 # 0x1ef1cc0
mulps %xmm11, %xmm12
addps 0x1d27175(%rip), %xmm12 # 0x1ef1cd0
movapd 0x1d21fbd(%rip), %xmm3 # 0x1eecb20
andpd %xmm3, %xmm0
mulps %xmm11, %xmm7
addps 0x1d2716e(%rip), %xmm7 # 0x1ef1ce0
mulps %xmm11, %xmm12
addps 0x1d27172(%rip), %xmm12 # 0x1ef1cf0
movapd %xmm0, %xmm4
pcmpeqd %xmm2, %xmm4
mulps %xmm11, %xmm7
addps 0x1d2716f(%rip), %xmm7 # 0x1ef1d00
psubd %xmm0, %xmm2
mulps %xmm11, %xmm12
addps 0x1d2716f(%rip), %xmm12 # 0x1ef1d10
mulps %xmm11, %xmm7
addps 0x1d21f34(%rip), %xmm7 # 0x1eecae0
mulps %xmm11, %xmm12
mulps %xmm11, %xmm7
addps %xmm10, %xmm12
mulps %xmm5, %xmm12
addps %xmm10, %xmm7
movaps %xmm12, %xmm5
movdqa %xmm2, %xmm0
blendvps %xmm0, %xmm7, %xmm5
movdqa %xmm4, %xmm0
blendvps %xmm0, %xmm7, %xmm12
movapd %xmm9, %xmm0
pcmpgtd %xmm3, %xmm0
movaps %xmm5, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm5, 0x1f0(%rsp)
pcmpeqd %xmm0, %xmm0
paddd %xmm9, %xmm0
pminud %xmm0, %xmm3
pcmpeqd %xmm0, %xmm3
xorps %xmm12, %xmm6
movdqa %xmm3, %xmm0
blendvps %xmm0, %xmm6, %xmm12
movaps %xmm8, %xmm7
mulps %xmm14, %xmm7
subps %xmm1, %xmm7
movaps 0x80(%rsp), %xmm3
mulps %xmm14, %xmm3
movaps 0x200(%rsp), %xmm1
subps %xmm1, %xmm3
movaps %xmm7, %xmm0
mulps %xmm7, %xmm0
movaps %xmm3, %xmm2
mulps %xmm3, %xmm2
addps %xmm0, %xmm2
movaps 0x90(%rsp), %xmm4
mulps %xmm14, %xmm4
subps %xmm15, %xmm4
movaps %xmm4, %xmm0
mulps %xmm4, %xmm0
addps %xmm2, %xmm0
movaps 0x70(%rsp), %xmm6
mulps %xmm14, %xmm6
subps (%rsp), %xmm6
movaps %xmm6, %xmm2
mulps %xmm6, %xmm2
addps %xmm0, %xmm2
rsqrtps %xmm2, %xmm9
mulps 0x1d21a62(%rip), %xmm2 # 0x1eec6e0
mulps %xmm9, %xmm2
movaps %xmm9, %xmm15
mulps %xmm9, %xmm9
mulps %xmm2, %xmm9
movaps 0x1d2708a(%rip), %xmm11 # 0x1ef1d20
mulps %xmm11, %xmm15
subps %xmm9, %xmm15
mulps %xmm15, %xmm7
mulps %xmm5, %xmm7
movaps %xmm8, %xmm0
mulps %xmm12, %xmm0
subps %xmm7, %xmm0
movaps %xmm0, 0x30(%rsp)
movaps 0x190(%rsp), %xmm2
subps %xmm8, %xmm2
mulps %xmm13, %xmm2
addps %xmm8, %xmm2
movaps 0x80(%rsp), %xmm0
subps %xmm0, %xmm1
mulps %xmm13, %xmm1
addps %xmm0, %xmm1
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm1, %xmm8
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x90(%rsp), %xmm9
movaps 0x1e0(%rsp), %xmm5
subps %xmm9, %xmm5
mulps %xmm13, %xmm5
addps %xmm9, %xmm5
movaps %xmm5, %xmm7
mulps %xmm5, %xmm7
addps %xmm8, %xmm7
movaps (%rsp), %xmm8
movaps 0x70(%rsp), %xmm0
subps %xmm0, %xmm8
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
movaps %xmm8, (%rsp)
mulps %xmm8, %xmm8
addps %xmm7, %xmm8
rsqrtps %xmm8, %xmm7
mulps 0x1d2199d(%rip), %xmm8 # 0x1eec6e0
mulps %xmm7, %xmm11
mulps %xmm7, %xmm8
mulps %xmm7, %xmm7
mulps %xmm8, %xmm7
subps %xmm7, %xmm11
mulps %xmm11, %xmm2
cmpnleps 0x1d26fcd(%rip), %xmm14 # 0x1ef1d30
movaps %xmm14, %xmm0
movaps 0x30(%rsp), %xmm7
blendvps %xmm0, %xmm2, %xmm7
movaps %xmm7, 0x30(%rsp)
mulps %xmm15, %xmm3
movaps 0x1f0(%rsp), %xmm2
mulps %xmm2, %xmm3
movaps 0x80(%rsp), %xmm7
mulps %xmm12, %xmm7
subps %xmm3, %xmm7
mulps %xmm11, %xmm1
blendvps %xmm0, %xmm1, %xmm7
mulps %xmm15, %xmm4
mulps %xmm2, %xmm4
movaps %xmm9, %xmm8
mulps %xmm12, %xmm8
subps %xmm4, %xmm8
mulps %xmm11, %xmm5
blendvps %xmm0, %xmm5, %xmm8
mulps %xmm6, %xmm15
mulps %xmm2, %xmm15
mulps 0x70(%rsp), %xmm12
subps %xmm15, %xmm12
mulps (%rsp), %xmm11
blendvps %xmm0, %xmm11, %xmm12
movaps 0x1a0(%rsp), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm13, %xmm10
mulps %xmm13, %xmm0
movaps %xmm0, %xmm1
movaps 0xc0(%rsp), %xmm0
mulps %xmm10, %xmm0
addps %xmm1, %xmm0
movaps 0x1b0(%rsp), %xmm1
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm13, %xmm1
movaps 0x40(%rsp), %xmm3
mulps %xmm10, %xmm3
addps %xmm1, %xmm3
movaps %xmm3, 0x40(%rsp)
movaps 0x160(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x60(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x60(%rsp)
movaps 0x210(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x10(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x10(%rsp)
movaps 0x220(%rsp), %xmm1
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm13, %xmm1
movaps 0xb0(%rsp), %xmm2
mulps %xmm10, %xmm2
addps %xmm1, %xmm2
movaps %xmm2, 0xb0(%rsp)
movaps 0x120(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x50(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x50(%rsp)
movaps 0x130(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x20(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x20(%rsp)
movaps 0x150(%rsp), %xmm11
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
movaps 0x1c0(%rsp), %xmm9
shufps $0xaa, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
movaps 0x100(%rsp), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps 0x140(%rsp), %xmm3
shufps $0x55, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
movaps 0x170(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm11
mulps %xmm13, %xmm9
movaps 0xe0(%rsp), %xmm14
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
movaps %xmm14, 0xe0(%rsp)
movaps 0xd0(%rsp), %xmm11
mulps %xmm10, %xmm11
addps %xmm9, %xmm11
movaps %xmm11, 0xd0(%rsp)
mulps %xmm13, %xmm6
mulps %xmm13, %xmm3
mulps %xmm13, %xmm1
movaps 0xa0(%rsp), %xmm9
mulps %xmm10, %xmm9
addps %xmm6, %xmm9
movaps %xmm9, 0xa0(%rsp)
movaps 0xf0(%rsp), %xmm6
mulps %xmm10, %xmm6
addps %xmm3, %xmm6
movaps %xmm6, 0xf0(%rsp)
mulps 0x110(%rsp), %xmm10
addps %xmm1, %xmm10
movaps %xmm10, 0x90(%rsp)
movaps %xmm7, %xmm3
movaps %xmm7, %xmm15
movaps 0x30(%rsp), %xmm1
movaps %xmm1, %xmm6
movaps %xmm7, %xmm9
movaps %xmm1, %xmm10
movaps %xmm1, %xmm2
mulps %xmm1, %xmm7
mulps %xmm1, %xmm2
mulps %xmm3, %xmm3
movaps %xmm2, %xmm14
addps %xmm3, %xmm14
mulps %xmm8, %xmm15
mulps %xmm12, %xmm6
movaps %xmm15, %xmm1
addps %xmm6, %xmm1
subps %xmm6, %xmm15
movaps %xmm12, %xmm11
mulps %xmm12, %xmm9
mulps %xmm8, %xmm10
mulps %xmm8, %xmm12
movaps %xmm8, %xmm13
mulps %xmm8, %xmm13
subps %xmm13, %xmm14
subps %xmm3, %xmm2
movaps %xmm9, %xmm6
subps %xmm10, %xmm6
addps %xmm9, %xmm10
movaps %xmm10, %xmm8
movaps %xmm12, %xmm10
addps %xmm7, %xmm10
subps %xmm7, %xmm12
movaps %xmm13, %xmm3
addps %xmm2, %xmm3
subps %xmm13, %xmm2
movaps %xmm3, %xmm13
mulps %xmm11, %xmm11
subps %xmm11, %xmm14
subps %xmm11, %xmm13
addps %xmm11, %xmm2
addps %xmm1, %xmm1
addps %xmm6, %xmm6
xorps %xmm3, %xmm3
movaps %xmm6, %xmm7
mulps %xmm3, %xmm7
movaps %xmm1, %xmm9
mulps %xmm3, %xmm9
addps %xmm9, %xmm6
addps %xmm7, %xmm9
addps %xmm1, %xmm7
addps %xmm14, %xmm9
movaps %xmm9, %xmm5
movaps %xmm9, (%rsp)
mulps %xmm3, %xmm14
addps %xmm14, %xmm7
addps %xmm6, %xmm14
addps %xmm10, %xmm10
movaps %xmm10, %xmm9
mulps %xmm3, %xmm9
movaps %xmm13, %xmm1
mulps %xmm3, %xmm1
addps %xmm9, %xmm13
addps %xmm1, %xmm9
addps %xmm10, %xmm1
addps %xmm15, %xmm15
addps %xmm15, %xmm9
mulps %xmm3, %xmm15
addps %xmm15, %xmm13
addps %xmm1, %xmm15
addps %xmm12, %xmm12
movaps %xmm2, %xmm10
mulps %xmm3, %xmm10
movaps %xmm12, %xmm1
mulps %xmm3, %xmm1
movaps %xmm1, %xmm4
addps %xmm10, %xmm4
addps %xmm12, %xmm10
addps %xmm2, %xmm1
addps %xmm8, %xmm8
addps %xmm8, %xmm4
movaps %xmm4, %xmm2
movaps %xmm4, 0x80(%rsp)
mulps %xmm3, %xmm8
addps %xmm8, %xmm10
addps %xmm1, %xmm8
mulps %xmm3, %xmm2
movaps %xmm9, %xmm1
mulps %xmm3, %xmm1
addps %xmm2, %xmm1
movaps %xmm0, %xmm6
mulps %xmm5, %xmm6
addps %xmm1, %xmm6
movaps %xmm10, %xmm12
mulps %xmm3, %xmm12
movaps %xmm13, %xmm4
mulps %xmm3, %xmm13
addps %xmm12, %xmm13
movaps %xmm0, %xmm11
mulps %xmm7, %xmm11
addps %xmm13, %xmm11
movaps 0x40(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x40(%rsp)
movaps 0x60(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x60(%rsp)
movaps 0x50(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x50(%rsp)
movaps %xmm8, %xmm13
mulps %xmm3, %xmm13
movaps %xmm15, 0x70(%rsp)
mulps %xmm15, %xmm3
addps %xmm13, %xmm3
mulps %xmm14, %xmm0
movaps %xmm14, 0x30(%rsp)
addps %xmm3, %xmm0
movaps %xmm0, 0xc0(%rsp)
movaps 0xb0(%rsp), %xmm3
movaps %xmm3, %xmm1
mulps %xmm9, %xmm1
addps %xmm2, %xmm1
movaps %xmm3, %xmm2
mulps %xmm4, %xmm2
addps %xmm12, %xmm2
mulps %xmm15, %xmm3
addps %xmm13, %xmm3
movaps 0x10(%rsp), %xmm0
movaps %xmm0, %xmm5
movaps (%rsp), %xmm15
mulps %xmm15, %xmm5
addps %xmm1, %xmm5
movaps %xmm5, 0xb0(%rsp)
movaps %xmm0, %xmm12
mulps %xmm7, %xmm12
addps %xmm2, %xmm12
mulps %xmm14, %xmm0
addps %xmm3, %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0xd0(%rsp), %xmm5
movaps %xmm5, %xmm2
mulps 0x80(%rsp), %xmm2
movaps 0xe0(%rsp), %xmm3
movaps %xmm3, %xmm13
mulps %xmm9, %xmm13
addps %xmm2, %xmm13
movaps %xmm5, %xmm2
mulps %xmm10, %xmm2
movaps %xmm3, %xmm14
mulps %xmm4, %xmm14
addps %xmm2, %xmm14
mulps %xmm8, %xmm5
mulps 0x70(%rsp), %xmm3
addps %xmm5, %xmm3
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm5
mulps %xmm15, %xmm5
addps %xmm13, %xmm5
movaps %xmm0, %xmm1
mulps %xmm7, %xmm1
addps %xmm14, %xmm1
movaps 0x30(%rsp), %xmm13
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps 0x90(%rsp), %xmm2
movaps 0x80(%rsp), %xmm3
mulps %xmm2, %xmm3
mulps %xmm2, %xmm10
mulps %xmm2, %xmm8
movaps 0xf0(%rsp), %xmm2
mulps %xmm2, %xmm9
addps %xmm3, %xmm9
mulps %xmm2, %xmm4
addps %xmm10, %xmm4
movaps 0x70(%rsp), %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
movaps %xmm3, %xmm8
movaps 0xa0(%rsp), %xmm2
mulps %xmm2, %xmm15
addps %xmm9, %xmm15
addps 0x40(%rsp), %xmm15
movaps %xmm15, (%rsp)
mulps %xmm2, %xmm7
addps %xmm4, %xmm7
addps 0x60(%rsp), %xmm7
movaps %xmm13, %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
addps 0x50(%rsp), %xmm3
movaps %xmm3, 0x30(%rsp)
movaps 0x10(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm1, %xmm2
movaps %xmm12, %xmm13
mulps %xmm0, %xmm13
subps %xmm2, %xmm13
movaps 0xb0(%rsp), %xmm10
movaps %xmm10, %xmm2
mulps %xmm0, %xmm2
movaps %xmm4, %xmm8
mulps %xmm5, %xmm8
subps %xmm2, %xmm8
movaps %xmm12, %xmm2
mulps %xmm5, %xmm2
mulps %xmm1, %xmm10
subps %xmm2, %xmm10
movaps %xmm0, %xmm2
mulps %xmm11, %xmm2
movaps %xmm1, %xmm9
movaps 0xc0(%rsp), %xmm3
mulps %xmm3, %xmm9
subps %xmm2, %xmm9
movaps %xmm5, %xmm2
mulps %xmm3, %xmm2
mulps %xmm6, %xmm0
subps %xmm2, %xmm0
mulps %xmm6, %xmm1
mulps %xmm11, %xmm5
subps %xmm1, %xmm5
movaps %xmm3, %xmm2
mulps %xmm12, %xmm2
movaps %xmm11, %xmm14
mulps %xmm4, %xmm14
subps %xmm2, %xmm14
mulps %xmm6, %xmm4
movaps %xmm3, %xmm2
movaps %xmm3, %xmm15
movaps 0xb0(%rsp), %xmm1
mulps %xmm1, %xmm15
subps %xmm4, %xmm15
mulps %xmm11, %xmm1
mulps %xmm6, %xmm12
subps %xmm1, %xmm12
movaps %xmm12, %xmm4
mulps %xmm10, %xmm2
mulps %xmm8, %xmm11
addps %xmm2, %xmm11
mulps %xmm13, %xmm6
addps %xmm11, %xmm6
divps %xmm6, %xmm13
divps %xmm6, %xmm9
divps %xmm6, %xmm14
divps %xmm6, %xmm8
divps %xmm6, %xmm0
divps %xmm6, %xmm15
divps %xmm6, %xmm10
divps %xmm6, %xmm5
divps %xmm6, %xmm4
movaps 0x30(%rsp), %xmm3
movaps %xmm3, %xmm1
mulps %xmm10, %xmm1
movaps %xmm3, %xmm2
movaps %xmm3, %xmm11
movaps %xmm5, 0x10(%rsp)
mulps %xmm5, %xmm2
movaps %xmm7, %xmm3
mulps %xmm8, %xmm3
addps %xmm1, %xmm3
movaps %xmm7, %xmm6
movaps %xmm0, 0x20(%rsp)
mulps %xmm0, %xmm6
addps %xmm2, %xmm6
mulps %xmm4, %xmm11
mulps %xmm15, %xmm7
addps %xmm11, %xmm7
movaps %xmm10, %xmm12
movaps (%rsp), %xmm10
movaps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps %xmm10, %xmm1
mulps %xmm9, %xmm1
addps %xmm6, %xmm1
mulps %xmm14, %xmm10
addps %xmm7, %xmm10
jmp 0x1c9749
testl %edi, %edi
je 0x1cbca8
movq 0x88(%rsi), %rsi
movaps 0x250(%rsp), %xmm0
movaps %xmm0, (%rsp)
movzbl %dil, %edi
andl $0xf, %edi
bsfq %rdi, %rdi
movslq %edi, %rdi
movslq 0x230(%rsp,%rdi,4), %rdi
movd %edi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pand (%rsp), %xmm0
imulq $0x38, %rdi, %r8
leaq (%rsi,%r8), %rdi
movl 0x20(%rsi,%r8), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1cb527
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1cb4b4
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1cb5a2
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1cb6c8
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movaps (%r9,%r10), %xmm8
movaps 0x10(%r9,%r10), %xmm12
movaps 0x20(%r9,%r10), %xmm2
movaps %xmm2, 0x180(%rsp)
movaps 0x30(%r9,%r10), %xmm2
jmp 0x1cb6c8
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movsd 0x4(%r9,%r10), %xmm2
movss (%r9,%r10), %xmm8
shufps $0x4c, %xmm2, %xmm8 # xmm8 = xmm8[0,3],xmm2[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm2
movss 0xc(%r9,%r10), %xmm12
shufps $0x4c, %xmm2, %xmm12 # xmm12 = xmm12[0,3],xmm2[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm2
movss 0x18(%r9,%r10), %xmm5
shufps $0x4c, %xmm2, %xmm5 # xmm5 = xmm5[0,3],xmm2[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
movaps %xmm5, 0x180(%rsp)
movsd 0x28(%r9,%r10), %xmm5
movss 0x24(%r9,%r10), %xmm2
shufps $0x4c, %xmm5, %xmm2 # xmm2 = xmm2[0,3],xmm5[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1cb6c8
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movss (%r9,%r10), %xmm8
movss 0x4(%r9,%r10), %xmm12
movss 0x8(%r9,%r10), %xmm5
movss 0xc(%r9,%r10), %xmm2
insertps $0x1c, 0x10(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
movaps %xmm5, 0x180(%rsp)
insertps $0x1c, 0x1c(%r9,%r10), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
jmp 0x1cb6c8
movq (%rdi), %r9
movq 0x10(%rdi), %r10
imulq %rdx, %r10
movsd 0x10(%r9,%r10), %xmm6
insertps $0x20, 0x8(%r9,%r10), %xmm6 # xmm6 = xmm6[0,1],mem[0],xmm6[3]
movsd 0x34(%r9,%r10), %xmm2
movss (%r9,%r10), %xmm8
movss 0xc(%r9,%r10), %xmm12
movlhps %xmm2, %xmm8 # xmm8 = xmm8[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm8 # xmm8 = xmm8[0,2],xmm2[1,3]
movss 0x18(%r9,%r10), %xmm2
movsd 0x1c(%r9,%r10), %xmm5
movlhps %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm2 # xmm2 = xmm2[0,2],xmm5[1,3]
movss 0x24(%r9,%r10), %xmm10
movss 0x28(%r9,%r10), %xmm9
movss 0x2c(%r9,%r10), %xmm5
movss 0x30(%r9,%r10), %xmm7
movss %xmm7, 0x180(%rsp)
movaps %xmm9, %xmm13
mulss %xmm9, %xmm13
movaps %xmm10, %xmm15
mulss %xmm10, %xmm15
addss %xmm13, %xmm15
movaps %xmm5, %xmm13
mulss %xmm5, %xmm13
addss %xmm15, %xmm13
movaps %xmm7, %xmm15
mulss %xmm7, %xmm15
addss %xmm13, %xmm15
movaps %xmm15, %xmm13
rsqrtss %xmm15, %xmm13
movaps %xmm13, %xmm7
mulss 0x1d210bc(%rip), %xmm7 # 0x1eec718
mulss 0x1d210b7(%rip), %xmm15 # 0x1eec71c
mulss %xmm13, %xmm15
mulss %xmm13, %xmm13
mulss %xmm15, %xmm13
addss %xmm7, %xmm13
mulss %xmm13, %xmm10
insertps $0x30, %xmm10, %xmm2 # xmm2 = xmm2[0,1,2],xmm10[0]
mulss %xmm13, %xmm9
insertps $0x30, %xmm9, %xmm8 # xmm8 = xmm8[0,1,2],xmm9[0]
mulss %xmm13, %xmm5
mulss 0x180(%rsp), %xmm13
insertps $0x10, 0x4(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm13, %xmm6 # xmm6 = xmm6[0,1,2],xmm13[0]
movaps %xmm6, 0x180(%rsp)
insertps $0x20, 0x3c(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm5, %xmm12 # xmm12 = xmm12[0,1,2],xmm5[0]
movaps %xmm8, %xmm5
shufps $0x0, %xmm8, %xmm5 # xmm5 = xmm5[0,0],xmm8[0,0]
movaps 0xc0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xc0(%rsp)
movaps %xmm8, %xmm5
shufps $0x55, %xmm8, %xmm5 # xmm5 = xmm5[1,1],xmm8[1,1]
movaps 0x40(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x40(%rsp)
movaps %xmm8, %xmm5
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
movaps 0x60(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x60(%rsp)
movaps %xmm8, %xmm5
shufps $0xff, %xmm8, %xmm5 # xmm5 = xmm5[3,3],xmm8[3,3]
movaps 0x90(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x90(%rsp)
movaps %xmm12, %xmm5
shufps $0x0, %xmm12, %xmm5 # xmm5 = xmm5[0,0],xmm12[0,0]
movaps 0x10(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x10(%rsp)
movaps %xmm12, %xmm5
shufps $0x55, %xmm12, %xmm5 # xmm5 = xmm5[1,1],xmm12[1,1]
movaps 0xb0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xb0(%rsp)
movaps %xmm12, %xmm5
shufps $0xaa, %xmm12, %xmm5 # xmm5 = xmm5[2,2],xmm12[2,2]
movaps 0x50(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x50(%rsp)
movaps %xmm12, %xmm5
shufps $0xff, %xmm12, %xmm5 # xmm5 = xmm5[3,3],xmm12[3,3]
movaps 0x70(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x70(%rsp)
movaps 0x180(%rsp), %xmm7
movaps %xmm7, %xmm5
shufps $0x0, %xmm7, %xmm5 # xmm5 = xmm5[0,0],xmm7[0,0]
movaps 0x20(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x20(%rsp)
movaps %xmm7, %xmm5
shufps $0x55, %xmm7, %xmm5 # xmm5 = xmm5[1,1],xmm7[1,1]
movaps 0xe0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xe0(%rsp)
movaps %xmm7, %xmm5
shufps $0xaa, %xmm7, %xmm5 # xmm5 = xmm5[2,2],xmm7[2,2]
movaps 0xd0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xd0(%rsp)
movaps %xmm7, %xmm5
shufps $0xff, %xmm7, %xmm5 # xmm5 = xmm5[3,3],xmm7[3,3]
movaps 0x80(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x80(%rsp)
movaps %xmm2, %xmm5
shufps $0x0, %xmm2, %xmm5 # xmm5 = xmm5[0,0],xmm2[0,0]
movaps 0xa0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xa0(%rsp)
movaps %xmm2, %xmm5
shufps $0x55, %xmm2, %xmm5 # xmm5 = xmm5[1,1],xmm2[1,1]
movaps 0xf0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xf0(%rsp)
movaps %xmm2, %xmm5
shufps $0xaa, %xmm2, %xmm5 # xmm5 = xmm5[2,2],xmm2[2,2]
movaps 0x160(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x160(%rsp)
movaps %xmm2, %xmm5
shufps $0xff, %xmm2, %xmm5 # xmm5 = xmm5[3,3],xmm2[3,3]
movaps 0x140(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x140(%rsp)
movdqa (%rsp), %xmm5
pxor %xmm0, %xmm5
movdqa %xmm5, (%rsp)
movl 0x58(%rsi,%r8), %r8d
cmpl $0x9134, %r8d # imm = 0x9134
je 0x1cb951
cmpl $0x9234, %r8d # imm = 0x9234
je 0x1cb8e7
cmpl $0xb001, %r8d # imm = 0xB001
je 0x1cb9c5
cmpl $0x9244, %r8d # imm = 0x9244
jne 0x1cbad5
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movaps (%r8,%rdi), %xmm14
movaps 0x10(%r8,%rdi), %xmm3
movaps 0x20(%r8,%rdi), %xmm1
movaps 0x30(%r8,%rdi), %xmm4
jmp 0x1cbad5
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movsd 0x4(%r8,%rdi), %xmm1
movss (%r8,%rdi), %xmm14
shufps $0x4c, %xmm1, %xmm14 # xmm14 = xmm14[0,3],xmm1[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%r8,%rdi), %xmm1
movss 0xc(%r8,%rdi), %xmm3
shufps $0x4c, %xmm1, %xmm3 # xmm3 = xmm3[0,3],xmm1[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x1c(%r8,%rdi), %xmm4
movss 0x18(%r8,%rdi), %xmm1
shufps $0x4c, %xmm4, %xmm1 # xmm1 = xmm1[0,3],xmm4[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x28(%r8,%rdi), %xmm5
movss 0x24(%r8,%rdi), %xmm4
shufps $0x4c, %xmm5, %xmm4 # xmm4 = xmm4[0,3],xmm5[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
jmp 0x1cbad5
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movss (%r8,%rdi), %xmm14
movss 0x4(%r8,%rdi), %xmm3
movss 0x8(%r8,%rdi), %xmm1
movss 0xc(%r8,%rdi), %xmm4
insertps $0x1c, 0x10(%r8,%rdi), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%r8,%rdi), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%r8,%rdi), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x24(%r8,%rdi), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x18(%r8,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x28(%r8,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r8,%rdi), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r8,%rdi), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
jmp 0x1cbad5
movq 0x38(%rdi), %r8
movq 0x48(%rdi), %rdi
imulq %rdx, %rdi
movsd 0x10(%r8,%rdi), %xmm1
insertps $0x20, 0x8(%r8,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
movsd 0x34(%r8,%rdi), %xmm4
movss (%r8,%rdi), %xmm14
movss 0xc(%r8,%rdi), %xmm3
movlhps %xmm4, %xmm14 # xmm14 = xmm14[0],xmm4[0]
shufps $0xd8, %xmm4, %xmm14 # xmm14 = xmm14[0,2],xmm4[1,3]
movss 0x18(%r8,%rdi), %xmm4
movsd 0x1c(%r8,%rdi), %xmm5
movlhps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm4 # xmm4 = xmm4[0,2],xmm5[1,3]
movss 0x24(%r8,%rdi), %xmm10
movss 0x28(%r8,%rdi), %xmm9
movss 0x2c(%r8,%rdi), %xmm5
movss 0x30(%r8,%rdi), %xmm7
movaps %xmm9, %xmm13
mulss %xmm9, %xmm13
movaps %xmm10, %xmm15
mulss %xmm10, %xmm15
addss %xmm13, %xmm15
movaps %xmm5, %xmm13
mulss %xmm5, %xmm13
addss %xmm15, %xmm13
movaps %xmm7, %xmm15
mulss %xmm7, %xmm15
addss %xmm13, %xmm15
movaps %xmm15, %xmm13
rsqrtss %xmm15, %xmm13
movaps %xmm13, %xmm6
mulss 0x1d20ca1(%rip), %xmm6 # 0x1eec718
mulss 0x1d20c9c(%rip), %xmm15 # 0x1eec71c
mulss %xmm13, %xmm15
mulss %xmm13, %xmm13
mulss %xmm15, %xmm13
addss %xmm6, %xmm13
mulss %xmm13, %xmm10
insertps $0x30, %xmm10, %xmm4 # xmm4 = xmm4[0,1,2],xmm10[0]
mulss %xmm13, %xmm9
insertps $0x30, %xmm9, %xmm14 # xmm14 = xmm14[0,1,2],xmm9[0]
mulss %xmm13, %xmm5
mulss %xmm7, %xmm13
insertps $0x10, 0x4(%r8,%rdi), %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
insertps $0x30, %xmm13, %xmm1 # xmm1 = xmm1[0,1,2],xmm13[0]
insertps $0x20, 0x3c(%r8,%rdi), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
insertps $0x30, %xmm5, %xmm3 # xmm3 = xmm3[0,1,2],xmm5[0]
movaps %xmm14, %xmm5
shufps $0x0, %xmm14, %xmm5 # xmm5 = xmm5[0,0],xmm14[0,0]
movaps 0x1e0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1e0(%rsp)
movaps %xmm14, %xmm5
shufps $0x55, %xmm14, %xmm5 # xmm5 = xmm5[1,1],xmm14[1,1]
movaps 0x1f0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1f0(%rsp)
movaps %xmm14, %xmm5
shufps $0xaa, %xmm14, %xmm5 # xmm5 = xmm5[2,2],xmm14[2,2]
movaps 0x200(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x200(%rsp)
movaps %xmm14, %xmm5
shufps $0xff, %xmm14, %xmm5 # xmm5 = xmm5[3,3],xmm14[3,3]
movaps 0x100(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x100(%rsp)
movaps %xmm3, %xmm5
shufps $0x0, %xmm3, %xmm5 # xmm5 = xmm5[0,0],xmm3[0,0]
movaps 0x210(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x210(%rsp)
movaps %xmm3, %xmm5
shufps $0x55, %xmm3, %xmm5 # xmm5 = xmm5[1,1],xmm3[1,1]
movaps 0x220(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x220(%rsp)
movaps %xmm3, %xmm5
shufps $0xaa, %xmm3, %xmm5 # xmm5 = xmm5[2,2],xmm3[2,2]
movaps 0x1a0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1a0(%rsp)
movaps %xmm3, %xmm5
shufps $0xff, %xmm3, %xmm5 # xmm5 = xmm5[3,3],xmm3[3,3]
movaps 0x110(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x110(%rsp)
movaps %xmm1, %xmm5
shufps $0x0, %xmm1, %xmm5 # xmm5 = xmm5[0,0],xmm1[0,0]
movaps 0x120(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x120(%rsp)
movaps %xmm1, %xmm5
shufps $0x55, %xmm1, %xmm5 # xmm5 = xmm5[1,1],xmm1[1,1]
movaps 0x130(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x130(%rsp)
movaps %xmm1, %xmm5
shufps $0xaa, %xmm1, %xmm5 # xmm5 = xmm5[2,2],xmm1[2,2]
movaps 0x1b0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1b0(%rsp)
movaps %xmm1, %xmm5
shufps $0xff, %xmm1, %xmm5 # xmm5 = xmm5[3,3],xmm1[3,3]
movaps 0x30(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x30(%rsp)
movaps %xmm4, %xmm5
shufps $0x0, %xmm4, %xmm5 # xmm5 = xmm5[0,0],xmm4[0,0]
movaps 0x1c0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1c0(%rsp)
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps 0x150(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x150(%rsp)
movaps %xmm4, %xmm5
shufps $0xaa, %xmm4, %xmm5 # xmm5 = xmm5[2,2],xmm4[2,2]
movaps 0x170(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x170(%rsp)
movaps %xmm4, %xmm5
shufps $0xff, %xmm4, %xmm5 # xmm5 = xmm5[3,3],xmm4[3,3]
movaps 0x190(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x190(%rsp)
movaps (%rsp), %xmm0
movmskps %xmm0, %edi
testl %edi, %edi
jne 0x1cb41f
jmp 0x1cbca8
movaps 0x190(%rsp), %xmm15
movaps %xmm15, %xmm0
mulps 0x140(%rsp), %xmm0
movaps 0x100(%rsp), %xmm7
movaps %xmm7, %xmm1
movaps %xmm7, %xmm6
mulps 0x90(%rsp), %xmm1
addps %xmm0, %xmm1
movaps 0x110(%rsp), %xmm14
movaps %xmm14, %xmm0
mulps 0x70(%rsp), %xmm0
addps %xmm1, %xmm0
movaps 0x30(%rsp), %xmm1
movaps %xmm1, %xmm3
movaps %xmm1, %xmm4
mulps 0x80(%rsp), %xmm3
addps %xmm0, %xmm3
movaps 0x1d209c8(%rip), %xmm1 # 0x1eec6d0
movaps %xmm3, %xmm9
xorps %xmm1, %xmm9
movaps %xmm3, %xmm0
cmpltps %xmm9, %xmm0
movaps %xmm15, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm15
movaps %xmm7, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm6
movaps %xmm6, 0x100(%rsp)
movaps %xmm14, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm14
movaps %xmm14, 0x110(%rsp)
movaps %xmm4, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm4
movaps %xmm4, 0x30(%rsp)
maxps %xmm3, %xmm9
andps 0x1d20957(%rip), %xmm3 # 0x1eec6c0
movaps 0x1d25e60(%rip), %xmm0 # 0x1ef1bd0
mulps %xmm3, %xmm0
addps 0x1d25e66(%rip), %xmm0 # 0x1ef1be0
mulps %xmm3, %xmm0
addps 0x1d25e6c(%rip), %xmm0 # 0x1ef1bf0
mulps %xmm3, %xmm0
addps 0x1d25e72(%rip), %xmm0 # 0x1ef1c00
mulps %xmm3, %xmm0
addps 0x1d25e78(%rip), %xmm0 # 0x1ef1c10
mulps %xmm3, %xmm0
addps 0x1d25e7e(%rip), %xmm0 # 0x1ef1c20
movaps 0x1d20c66(%rip), %xmm11 # 0x1eeca10
movaps %xmm11, %xmm2
subps %xmm3, %xmm2
sqrtps %xmm2, %xmm2
mulps %xmm0, %xmm2
movaps 0x1d25e72(%rip), %xmm4 # 0x1ef1c30
movaps %xmm4, %xmm0
subps %xmm2, %xmm0
xorps %xmm5, %xmm5
maxps %xmm0, %xmm5
xorps %xmm2, %xmm2
movaps %xmm9, %xmm0
cmpltps %xmm2, %xmm0
xorps %xmm6, %xmm6
movaps %xmm5, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm4, %xmm2
subps %xmm5, %xmm2
movaps %xmm3, %xmm0
cmpnleps %xmm11, %xmm0
blendvps %xmm0, 0x1d25e46(%rip), %xmm2 # 0x1ef1c40
movaps 0x1d0(%rsp), %xmm10
mulps %xmm10, %xmm2
movaps 0x1d25e42(%rip), %xmm0 # 0x1ef1c50
mulps %xmm2, %xmm0
roundps $0x1, %xmm0, %xmm3
cvtps2dq %xmm3, %xmm0
mulps %xmm4, %xmm3
subps %xmm3, %xmm2
movapd 0x1d25e37(%rip), %xmm3 # 0x1ef1c60
andpd %xmm0, %xmm3
movaps %xmm2, %xmm5
mulps %xmm2, %xmm5
movaps 0x1d25e55(%rip), %xmm12 # 0x1ef1c90
mulps %xmm5, %xmm12
addps 0x1d25e59(%rip), %xmm12 # 0x1ef1ca0
movaps 0x1d25e62(%rip), %xmm4 # 0x1ef1cb0
mulps %xmm5, %xmm4
addps 0x1d25e68(%rip), %xmm4 # 0x1ef1cc0
mulps %xmm5, %xmm12
addps 0x1d25e6c(%rip), %xmm12 # 0x1ef1cd0
movapd 0x1d20cb3(%rip), %xmm8 # 0x1eecb20
andpd %xmm8, %xmm0
mulps %xmm5, %xmm4
addps 0x1d25e64(%rip), %xmm4 # 0x1ef1ce0
mulps %xmm5, %xmm12
addps 0x1d25e68(%rip), %xmm12 # 0x1ef1cf0
movapd %xmm0, %xmm7
pcmpeqd %xmm6, %xmm7
mulps %xmm5, %xmm4
addps 0x1d25e66(%rip), %xmm4 # 0x1ef1d00
psubd %xmm0, %xmm6
mulps %xmm5, %xmm12
addps 0x1d25e66(%rip), %xmm12 # 0x1ef1d10
mulps %xmm5, %xmm4
addps 0x1d20c2c(%rip), %xmm4 # 0x1eecae0
mulps %xmm5, %xmm12
mulps %xmm5, %xmm4
addps %xmm11, %xmm12
mulps %xmm2, %xmm12
addps %xmm11, %xmm4
movaps %xmm12, %xmm13
movdqa %xmm6, %xmm0
blendvps %xmm0, %xmm4, %xmm13
movdqa %xmm7, %xmm0
blendvps %xmm0, %xmm4, %xmm12
movapd %xmm3, %xmm0
pcmpgtd %xmm8, %xmm0
movaps %xmm13, %xmm4
xorps %xmm1, %xmm4
blendvps %xmm0, %xmm4, %xmm13
pcmpeqd %xmm0, %xmm0
paddd %xmm3, %xmm0
pminud %xmm0, %xmm8
pcmpeqd %xmm0, %xmm8
xorps %xmm12, %xmm1
movdqa %xmm8, %xmm0
blendvps %xmm0, %xmm1, %xmm12
movaps 0x140(%rsp), %xmm2
movaps %xmm2, %xmm3
mulps %xmm9, %xmm3
subps %xmm15, %xmm3
movaps 0x90(%rsp), %xmm4
mulps %xmm9, %xmm4
movaps %xmm9, 0x190(%rsp)
movaps 0x100(%rsp), %xmm7
subps %xmm7, %xmm4
movaps %xmm3, %xmm0
mulps %xmm3, %xmm0
movaps %xmm4, %xmm1
mulps %xmm4, %xmm1
addps %xmm0, %xmm1
movaps 0x70(%rsp), %xmm5
mulps %xmm9, %xmm5
subps %xmm14, %xmm5
movaps %xmm5, %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
movaps 0x80(%rsp), %xmm1
mulps %xmm9, %xmm1
subps 0x30(%rsp), %xmm1
movaps %xmm1, (%rsp)
mulps %xmm1, %xmm1
addps %xmm0, %xmm1
rsqrtps %xmm1, %xmm6
mulps 0x1d2074c(%rip), %xmm1 # 0x1eec6e0
mulps %xmm6, %xmm1
movaps %xmm6, %xmm8
mulps %xmm6, %xmm6
mulps %xmm1, %xmm6
movaps 0x1d25d77(%rip), %xmm9 # 0x1ef1d20
mulps %xmm9, %xmm8
subps %xmm6, %xmm8
mulps %xmm8, %xmm3
mulps %xmm13, %xmm3
movaps %xmm2, %xmm14
mulps %xmm12, %xmm14
subps %xmm3, %xmm14
subps %xmm2, %xmm15
mulps %xmm10, %xmm15
addps %xmm2, %xmm15
movaps %xmm7, %xmm2
movaps 0x90(%rsp), %xmm0
subps %xmm0, %xmm2
mulps %xmm10, %xmm2
addps %xmm0, %xmm2
movaps %xmm15, %xmm3
mulps %xmm15, %xmm3
movaps %xmm2, %xmm6
mulps %xmm2, %xmm6
addps %xmm3, %xmm6
movaps 0x110(%rsp), %xmm7
movaps 0x70(%rsp), %xmm1
subps %xmm1, %xmm7
mulps %xmm10, %xmm7
addps %xmm1, %xmm7
movaps %xmm7, %xmm3
mulps %xmm7, %xmm3
addps %xmm6, %xmm3
movaps 0x30(%rsp), %xmm1
movaps 0x80(%rsp), %xmm0
subps %xmm0, %xmm1
mulps %xmm10, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0x30(%rsp)
mulps %xmm1, %xmm1
addps %xmm3, %xmm1
rsqrtps %xmm1, %xmm3
mulps 0x1d2069d(%rip), %xmm1 # 0x1eec6e0
mulps %xmm3, %xmm9
mulps %xmm3, %xmm1
mulps %xmm3, %xmm3
mulps %xmm1, %xmm3
subps %xmm3, %xmm9
mulps %xmm9, %xmm15
movaps 0x190(%rsp), %xmm1
cmpnleps 0x1d25cc8(%rip), %xmm1 # 0x1ef1d30
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm15, %xmm14
mulps %xmm8, %xmm4
mulps %xmm13, %xmm4
movaps 0x90(%rsp), %xmm3
mulps %xmm12, %xmm3
subps %xmm4, %xmm3
mulps %xmm9, %xmm2
blendvps %xmm0, %xmm2, %xmm3
mulps %xmm8, %xmm5
mulps %xmm13, %xmm5
movaps 0x70(%rsp), %xmm6
mulps %xmm12, %xmm6
subps %xmm5, %xmm6
mulps %xmm9, %xmm7
blendvps %xmm0, %xmm7, %xmm6
mulps (%rsp), %xmm8
mulps %xmm13, %xmm8
mulps 0x80(%rsp), %xmm12
subps %xmm8, %xmm12
mulps 0x30(%rsp), %xmm9
blendvps %xmm0, %xmm9, %xmm12
subps %xmm10, %xmm11
movaps 0x1e0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xc0(%rsp), %xmm7
mulps %xmm11, %xmm7
addps %xmm0, %xmm7
movaps 0x1f0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x40(%rsp), %xmm1
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0x40(%rsp)
movaps 0x200(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x60(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x60(%rsp)
movaps 0x210(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x10(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x10(%rsp)
movaps 0x220(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xb0(%rsp), %xmm1
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0xb0(%rsp)
movaps 0x1a0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x50(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x50(%rsp)
movaps 0x120(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x20(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x20(%rsp)
movaps 0x130(%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x1b0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xe0(%rsp), %xmm4
mulps %xmm11, %xmm4
addps %xmm2, %xmm4
movaps %xmm4, 0xe0(%rsp)
movaps 0xd0(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0xd0(%rsp)
movaps 0x1c0(%rsp), %xmm4
mulps %xmm10, %xmm4
movaps 0x150(%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x170(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xa0(%rsp), %xmm9
mulps %xmm11, %xmm9
addps %xmm4, %xmm9
movaps %xmm9, 0xa0(%rsp)
movaps 0xf0(%rsp), %xmm8
mulps %xmm11, %xmm8
addps %xmm2, %xmm8
movaps %xmm8, 0xf0(%rsp)
mulps 0x160(%rsp), %xmm11
addps %xmm0, %xmm11
movaps %xmm7, %xmm0
movaps %xmm11, 0x90(%rsp)
movaps %xmm3, %xmm7
movaps %xmm3, %xmm15
movaps %xmm14, %xmm9
movaps %xmm3, %xmm8
movaps %xmm14, %xmm10
mulps %xmm14, %xmm3
movaps %xmm14, %xmm2
mulps %xmm14, %xmm2
mulps %xmm7, %xmm7
movaps %xmm2, %xmm14
addps %xmm7, %xmm14
mulps %xmm6, %xmm15
mulps %xmm12, %xmm9
movaps %xmm15, %xmm1
addps %xmm9, %xmm1
subps %xmm9, %xmm15
movaps %xmm12, %xmm9
mulps %xmm12, %xmm8
mulps %xmm6, %xmm10
mulps %xmm6, %xmm12
movaps %xmm6, %xmm11
mulps %xmm6, %xmm11
subps %xmm11, %xmm14
subps %xmm7, %xmm2
movaps %xmm8, %xmm6
subps %xmm10, %xmm6
addps %xmm8, %xmm10
movaps %xmm10, %xmm8
movaps %xmm12, %xmm10
addps %xmm3, %xmm10
subps %xmm3, %xmm12
movaps %xmm11, %xmm13
addps %xmm2, %xmm13
subps %xmm11, %xmm2
mulps %xmm9, %xmm9
subps %xmm9, %xmm14
subps %xmm9, %xmm13
addps %xmm9, %xmm2
jmp 0x1cb03c
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersectorKMB<4>::occluded(embree::vboolf_impl<4> const&, embree::sse42::InstanceArrayIntersectorKMB<4>::Precalculations const&, embree::RayK<4>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
vbool<K> InstanceArrayIntersectorKMB<K>::occluded(const vbool<K>& valid_i, const Precalculations& pre, RayK<K>& ray, RayQueryContext* context, const Primitive& prim)
{
vbool<K> valid = valid_i;
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
valid &= (ray.mask & instance->mask) != 0;
if (none(valid)) return false;
#endif
RTCRayQueryContext* user_context = context->user;
vbool<K> occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
AffineSpace3vf<K> world2local = instance->getWorld2Local<K>(prim.primID_, valid, ray.time());
const Vec3vf<K> ray_org = ray.org;
const Vec3vf<K> ray_dir = ray.dir;
ray.org = xfmPoint(world2local, ray_org);
ray.dir = xfmVector(world2local, ray_dir);
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.occluded(valid, ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x270, %rsp # imm = 0x270
movq %rcx, %r14
movq %rdi, %rbx
movq (%r8), %rax
movl (%r9), %ecx
movl 0x4(%r9), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rdx
movq 0x58(%rdx), %rax
testq %rax, %rax
jne 0x1cc351
movq 0x90(%rdx), %rax
movq 0xa0(%rdx), %r10
imulq %rcx, %r10
movl (%rax,%r10), %eax
movl $0xffffffff, %r10d # imm = 0xFFFFFFFF
cmpq %r10, %rax
je 0x1cc34f
movq 0x60(%rdx), %r10
movq (%r10,%rax,8), %rax
jmp 0x1cc351
xorl %eax, %eax
testq %rax, %rax
je 0x1cc4c1
movd 0x34(%rdx), %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pand 0x90(%r14), %xmm0
pxor %xmm4, %xmm4
pcmpeqd %xmm0, %xmm4
pandn (%rsi), %xmm4
movmskps %xmm4, %esi
testl %esi, %esi
je 0x1cc4c1
movq 0x8(%r8), %r15
leaq 0x1f83bf1(%rip), %r10 # 0x214ff80
movaps (%r10), %xmm0
movaps %xmm0, (%rbx)
cmpl $-0x1, (%r15)
jne 0x1ccd45
movl %edi, (%r15)
movl %ecx, 0x4(%r15)
movl (%r9), %ecx
movzbl 0x3d(%rdx), %edi
shll $0x8, %edi
cmpl $0x100, %edi # imm = 0x100
movaps %xmm4, 0x250(%rsp)
je 0x1ccd55
movss 0x28(%rdx), %xmm0
movss 0x2c(%rdx), %xmm1
movss 0x30(%rdx), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%r14), %xmm12
subps %xmm1, %xmm12
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm12
mulps %xmm3, %xmm12
roundps $0x1, %xmm12, %xmm1
addss 0x1d245c5(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
xorps %xmm0, %xmm0
maxps %xmm0, %xmm1
subps %xmm1, %xmm12
cvtps2dq %xmm1, %xmm11
movapd %xmm11, 0x230(%rsp)
movzbl %sil, %edi
bsfq %rdi, %rdi
movslq 0x230(%rsp,%rdi,4), %rdi
movd %edi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pandn %xmm4, %xmm0
movmskps %xmm0, %r9d
testl %r9d, %r9d
jne 0x1cce5e
movq 0x88(%rdx), %rsi
imulq $0x38, %rdi, %rdi
leaq (%rsi,%rdi), %rdx
movl 0x20(%rsi,%rdi), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1cc647
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1cc4d3
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1cc540
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1cc6b5
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movaps (%r9,%r10), %xmm4
movaps 0x10(%r9,%r10), %xmm11
movaps 0x20(%r9,%r10), %xmm10
movaps 0x30(%r9,%r10), %xmm8
jmp 0x1cc6b5
leaq 0x1f83ab8(%rip), %rax # 0x214ff80
movaps (%rax), %xmm0
movaps %xmm0, (%rbx)
jmp 0x1ccd45
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movsd 0x4(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm4
shufps $0x4c, %xmm1, %xmm4 # xmm4 = xmm4[0,3],xmm1[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm1
movss 0xc(%r9,%r10), %xmm11
shufps $0x4c, %xmm1, %xmm11 # xmm11 = xmm11[0,3],xmm1[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm1
movss 0x18(%r9,%r10), %xmm10
shufps $0x4c, %xmm1, %xmm10 # xmm10 = xmm10[0,3],xmm1[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm1
movss 0x24(%r9,%r10), %xmm8
shufps $0x4c, %xmm1, %xmm8 # xmm8 = xmm8[0,3],xmm1[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
jmp 0x1cc6b5
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movsd 0x10(%r9,%r10), %xmm10
insertps $0x20, 0x8(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],xmm10[3]
movsd 0x34(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm4
movss 0xc(%r9,%r10), %xmm11
movlhps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm4 # xmm4 = xmm4[0,2],xmm1[1,3]
movss 0x18(%r9,%r10), %xmm8
movsd 0x1c(%r9,%r10), %xmm1
movlhps %xmm1, %xmm8 # xmm8 = xmm8[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm8 # xmm8 = xmm8[0,2],xmm1[1,3]
movss 0x24(%r9,%r10), %xmm5
movss 0x28(%r9,%r10), %xmm3
movss 0x2c(%r9,%r10), %xmm1
movss 0x30(%r9,%r10), %xmm2
movaps %xmm3, %xmm6
mulss %xmm3, %xmm6
movaps %xmm5, %xmm9
mulss %xmm5, %xmm9
addss %xmm6, %xmm9
movaps %xmm1, %xmm6
mulss %xmm1, %xmm6
addss %xmm9, %xmm6
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm6, %xmm9
movaps %xmm9, %xmm6
rsqrtss %xmm9, %xmm6
movss 0x1d2012f(%rip), %xmm0 # 0x1eec718
mulss %xmm6, %xmm0
mulss 0x1d20126(%rip), %xmm9 # 0x1eec71c
mulss %xmm6, %xmm9
mulss %xmm6, %xmm6
mulss %xmm9, %xmm6
addss %xmm0, %xmm6
mulss %xmm6, %xmm5
insertps $0x30, %xmm5, %xmm8 # xmm8 = xmm8[0,1,2],xmm5[0]
mulss %xmm6, %xmm3
insertps $0x30, %xmm3, %xmm4 # xmm4 = xmm4[0,1,2],xmm3[0]
mulss %xmm6, %xmm1
mulss %xmm2, %xmm6
insertps $0x10, 0x4(%r9,%r10), %xmm11 # xmm11 = xmm11[0],mem[0],xmm11[2,3]
insertps $0x30, %xmm6, %xmm10 # xmm10 = xmm10[0,1,2],xmm6[0]
insertps $0x20, 0x3c(%r9,%r10), %xmm11 # xmm11 = xmm11[0,1],mem[0],xmm11[3]
insertps $0x30, %xmm1, %xmm11 # xmm11 = xmm11[0,1,2],xmm1[0]
jmp 0x1cc6b5
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movss (%r9,%r10), %xmm4
movss 0x4(%r9,%r10), %xmm11
movss 0x8(%r9,%r10), %xmm10
movss 0xc(%r9,%r10), %xmm8
insertps $0x1c, 0x10(%r9,%r10), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
movaps %xmm4, %xmm13
shufps $0x55, %xmm4, %xmm13 # xmm13 = xmm13[1,1],xmm4[1,1]
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
movaps %xmm11, %xmm3
shufps $0x0, %xmm11, %xmm3 # xmm3 = xmm3[0,0],xmm11[0,0]
movaps %xmm11, %xmm1
shufps $0x55, %xmm11, %xmm1 # xmm1 = xmm1[1,1],xmm11[1,1]
movaps %xmm1, 0x60(%rsp)
shufps $0xaa, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
movaps %xmm10, %xmm1
shufps $0x0, %xmm10, %xmm1 # xmm1 = xmm1[0,0],xmm10[0,0]
movaps %xmm1, 0x10(%rsp)
movaps %xmm10, %xmm14
shufps $0x55, %xmm10, %xmm14 # xmm14 = xmm14[1,1],xmm10[1,1]
shufps $0xaa, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
movaps %xmm8, %xmm1
shufps $0x0, %xmm8, %xmm1 # xmm1 = xmm1[0,0],xmm8[0,0]
movaps %xmm1, (%rsp)
movaps %xmm8, %xmm1
shufps $0x55, %xmm8, %xmm1 # xmm1 = xmm1[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
movl 0x58(%rsi,%rdi), %esi
cmpl $0x9134, %esi # imm = 0x9134
movaps %xmm8, 0x50(%rsp)
movaps %xmm1, 0x40(%rsp)
movaps %xmm11, 0xc0(%rsp)
je 0x1cc8f8
cmpl $0x9234, %esi # imm = 0x9234
je 0x1cc784
cmpl $0xb001, %esi # imm = 0xB001
je 0x1cc7ea
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1cc961
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movaps (%rsi,%rcx), %xmm1
movaps 0x10(%rsi,%rcx), %xmm15
movaps 0x20(%rsi,%rcx), %xmm9
movaps 0x30(%rsi,%rcx), %xmm11
jmp 0x1cc961
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movsd 0x4(%rsi,%rcx), %xmm2
movss (%rsi,%rcx), %xmm1
shufps $0x4c, %xmm2, %xmm1 # xmm1 = xmm1[0,3],xmm2[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x10(%rsi,%rcx), %xmm2
movss 0xc(%rsi,%rcx), %xmm15
shufps $0x4c, %xmm2, %xmm15 # xmm15 = xmm15[0,3],xmm2[0,1]
shufps $0x78, %xmm15, %xmm15 # xmm15 = xmm15[0,2,3,1]
movsd 0x1c(%rsi,%rcx), %xmm2
movss 0x18(%rsi,%rcx), %xmm9
shufps $0x4c, %xmm2, %xmm9 # xmm9 = xmm9[0,3],xmm2[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x28(%rsi,%rcx), %xmm2
movss 0x24(%rsi,%rcx), %xmm11
shufps $0x4c, %xmm2, %xmm11 # xmm11 = xmm11[0,3],xmm2[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
jmp 0x1cc961
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movsd 0x10(%rsi,%rcx), %xmm9
insertps $0x20, 0x8(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
movsd 0x34(%rsi,%rcx), %xmm2
movss (%rsi,%rcx), %xmm1
movss 0xc(%rsi,%rcx), %xmm15
movlhps %xmm2, %xmm1 # xmm1 = xmm1[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm1 # xmm1 = xmm1[0,2],xmm2[1,3]
movss 0x18(%rsi,%rcx), %xmm11
movsd 0x1c(%rsi,%rcx), %xmm2
movlhps %xmm2, %xmm11 # xmm11 = xmm11[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm11 # xmm11 = xmm11[0,2],xmm2[1,3]
movss 0x24(%rsi,%rcx), %xmm0
movss 0x28(%rsi,%rcx), %xmm5
movss 0x2c(%rsi,%rcx), %xmm8
movss 0x30(%rsi,%rcx), %xmm6
movss %xmm6, 0xa0(%rsp)
movaps %xmm5, %xmm2
mulss %xmm5, %xmm2
movaps %xmm3, 0x20(%rsp)
movaps %xmm0, %xmm3
mulss %xmm0, %xmm3
addss %xmm2, %xmm3
movaps %xmm8, %xmm2
mulss %xmm8, %xmm2
addss %xmm3, %xmm2
movaps %xmm6, %xmm3
mulss %xmm6, %xmm3
addss %xmm2, %xmm3
movaps %xmm3, %xmm2
rsqrtss %xmm3, %xmm2
movss 0x1d1fe86(%rip), %xmm6 # 0x1eec718
mulss %xmm2, %xmm6
mulss 0x1d1fe7e(%rip), %xmm3 # 0x1eec71c
mulss %xmm2, %xmm3
mulss %xmm2, %xmm2
mulss %xmm3, %xmm2
movaps 0x20(%rsp), %xmm3
addss %xmm6, %xmm2
mulss %xmm2, %xmm0
insertps $0x30, %xmm0, %xmm11 # xmm11 = xmm11[0,1,2],xmm0[0]
mulss %xmm2, %xmm5
insertps $0x30, %xmm5, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0]
mulss %xmm2, %xmm8
mulss 0xa0(%rsp), %xmm2
insertps $0x10, 0x4(%rsi,%rcx), %xmm15 # xmm15 = xmm15[0],mem[0],xmm15[2,3]
insertps $0x30, %xmm2, %xmm9 # xmm9 = xmm9[0,1,2],xmm2[0]
insertps $0x20, 0x3c(%rsi,%rcx), %xmm15 # xmm15 = xmm15[0,1],mem[0],xmm15[3]
insertps $0x30, %xmm8, %xmm15 # xmm15 = xmm15[0,1,2],xmm8[0]
jmp 0x1cc961
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movss (%rsi,%rcx), %xmm1
movss 0x4(%rsi,%rcx), %xmm15
movss 0x8(%rsi,%rcx), %xmm9
movss 0xc(%rsi,%rcx), %xmm11
insertps $0x1c, 0x10(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rcx), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rcx), %xmm15 # xmm15 = xmm15[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rcx), %xmm15 # xmm15 = xmm15[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
movaps %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm2 # xmm2 = xmm2[0,0],xmm1[0,0]
movaps 0x1d200a1(%rip), %xmm6 # 0x1eeca10
subps %xmm12, %xmm6
mulps %xmm12, %xmm2
mulps %xmm6, %xmm7
addps %xmm2, %xmm7
movaps %xmm1, %xmm2
shufps $0x55, %xmm1, %xmm2 # xmm2 = xmm2[1,1],xmm1[1,1]
mulps %xmm12, %xmm2
mulps %xmm6, %xmm13
addps %xmm2, %xmm13
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm4
addps %xmm1, %xmm4
movaps %xmm15, %xmm1
shufps $0x0, %xmm15, %xmm1 # xmm1 = xmm1[0,0],xmm15[0,0]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm3
addps %xmm1, %xmm3
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm1 # xmm1 = xmm1[1,1],xmm15[1,1]
mulps %xmm12, %xmm1
movaps %xmm3, %xmm8
movaps 0x60(%rsp), %xmm3
mulps %xmm6, %xmm3
addps %xmm1, %xmm3
shufps $0xaa, %xmm15, %xmm15 # xmm15 = xmm15[2,2,2,2]
mulps %xmm12, %xmm15
movaps %xmm4, %xmm0
movaps 0xc0(%rsp), %xmm4
mulps %xmm6, %xmm4
addps %xmm15, %xmm4
movaps %xmm9, %xmm1
shufps $0x0, %xmm9, %xmm1 # xmm1 = xmm1[0,0],xmm9[0,0]
mulps %xmm12, %xmm1
movaps 0x10(%rsp), %xmm5
mulps %xmm6, %xmm5
addps %xmm1, %xmm5
movaps %xmm9, %xmm1
shufps $0x55, %xmm9, %xmm1 # xmm1 = xmm1[1,1],xmm9[1,1]
shufps $0xaa, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm6, %xmm14
addps %xmm1, %xmm14
movaps %xmm11, %xmm1
shufps $0x0, %xmm11, %xmm1 # xmm1 = xmm1[0,0],xmm11[0,0]
mulps %xmm12, %xmm9
mulps %xmm6, %xmm10
addps %xmm9, %xmm10
movaps %xmm11, %xmm2
shufps $0x55, %xmm11, %xmm2 # xmm2 = xmm2[1,1],xmm11[1,1]
shufps $0xaa, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
mulps %xmm12, %xmm1
mulps %xmm12, %xmm2
mulps %xmm12, %xmm11
movaps (%rsp), %xmm9
mulps %xmm6, %xmm9
addps %xmm1, %xmm9
movaps %xmm9, (%rsp)
movaps 0x40(%rsp), %xmm9
mulps %xmm6, %xmm9
addps %xmm2, %xmm9
mulps 0x50(%rsp), %xmm6
addps %xmm11, %xmm6
movaps %xmm4, %xmm1
mulps %xmm14, %xmm1
movaps %xmm3, %xmm2
mulps %xmm10, %xmm2
subps %xmm1, %xmm2
movaps %xmm8, %xmm12
movaps %xmm8, 0x20(%rsp)
movaps %xmm8, %xmm1
mulps %xmm10, %xmm1
movaps %xmm4, %xmm8
mulps %xmm5, %xmm8
subps %xmm1, %xmm8
movaps %xmm3, %xmm1
mulps %xmm5, %xmm1
mulps %xmm14, %xmm12
subps %xmm1, %xmm12
movaps %xmm13, %xmm1
mulps %xmm10, %xmm1
movaps %xmm0, %xmm11
mulps %xmm14, %xmm11
subps %xmm1, %xmm11
movaps %xmm0, %xmm1
mulps %xmm5, %xmm1
mulps %xmm7, %xmm10
subps %xmm1, %xmm10
mulps %xmm7, %xmm14
mulps %xmm13, %xmm5
subps %xmm14, %xmm5
movaps %xmm0, %xmm1
mulps %xmm3, %xmm1
movaps %xmm13, %xmm14
mulps %xmm4, %xmm14
subps %xmm1, %xmm14
mulps %xmm7, %xmm4
movaps %xmm0, %xmm15
movaps 0x20(%rsp), %xmm1
mulps %xmm1, %xmm15
subps %xmm4, %xmm15
mulps %xmm13, %xmm1
mulps %xmm7, %xmm3
subps %xmm1, %xmm3
mulps %xmm12, %xmm0
mulps %xmm8, %xmm13
addps %xmm0, %xmm13
mulps %xmm2, %xmm7
addps %xmm13, %xmm7
movaps %xmm2, %xmm13
divps %xmm7, %xmm13
divps %xmm7, %xmm11
divps %xmm7, %xmm14
divps %xmm7, %xmm8
divps %xmm7, %xmm10
divps %xmm7, %xmm15
divps %xmm7, %xmm12
divps %xmm7, %xmm5
divps %xmm7, %xmm3
movaps %xmm3, %xmm4
movaps %xmm6, %xmm0
mulps %xmm12, %xmm0
movaps %xmm6, %xmm1
movaps %xmm5, 0x10(%rsp)
mulps %xmm5, %xmm1
movaps %xmm9, %xmm2
mulps %xmm8, %xmm2
addps %xmm0, %xmm2
movaps (%rsp), %xmm7
movaps %xmm9, %xmm3
movaps %xmm10, 0x20(%rsp)
mulps %xmm10, %xmm3
addps %xmm1, %xmm3
mulps %xmm4, %xmm6
mulps %xmm15, %xmm9
addps %xmm6, %xmm9
movaps %xmm7, %xmm0
mulps %xmm13, %xmm0
addps %xmm2, %xmm0
movaps %xmm7, %xmm1
mulps %xmm11, %xmm1
addps %xmm3, %xmm1
movaps %xmm7, %xmm10
mulps %xmm14, %xmm10
addps %xmm9, %xmm10
movaps %xmm11, %xmm9
movaps 0x1d1fb1f(%rip), %xmm2 # 0x1eec6d0
xorps %xmm2, %xmm0
xorps %xmm2, %xmm1
xorps %xmm2, %xmm10
movaps 0x250(%rsp), %xmm6
movaps 0x10(%r14), %xmm5
movaps %xmm5, 0x60(%rsp)
movaps 0x20(%r14), %xmm3
movaps %xmm3, (%rsp)
movaps %xmm12, %xmm2
mulps %xmm3, %xmm2
addps %xmm0, %xmm2
movaps 0x10(%rsp), %xmm11
movaps %xmm11, %xmm0
mulps %xmm3, %xmm0
addps %xmm1, %xmm0
movaps %xmm4, %xmm1
mulps %xmm3, %xmm1
addps %xmm10, %xmm1
movaps %xmm8, %xmm3
mulps %xmm5, %xmm3
addps %xmm2, %xmm3
movaps 0x20(%rsp), %xmm7
movaps %xmm7, %xmm2
mulps %xmm5, %xmm2
addps %xmm0, %xmm2
movaps %xmm15, %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
movaps (%r14), %xmm5
movaps %xmm5, 0x50(%rsp)
movaps %xmm13, %xmm1
mulps %xmm5, %xmm1
addps %xmm3, %xmm1
movaps %xmm9, %xmm3
mulps %xmm5, %xmm3
addps %xmm2, %xmm3
movaps %xmm14, %xmm2
mulps %xmm5, %xmm2
addps %xmm0, %xmm2
movaps 0x40(%r14), %xmm0
movaps 0x50(%r14), %xmm5
movaps 0x60(%r14), %xmm10
movaps %xmm1, (%r14)
movaps %xmm3, 0x10(%r14)
movaps %xmm2, 0x20(%r14)
movaps %xmm10, 0x40(%rsp)
mulps %xmm10, %xmm12
movaps %xmm5, 0xc0(%rsp)
mulps %xmm5, %xmm8
addps %xmm12, %xmm8
mulps %xmm10, %xmm11
mulps %xmm5, %xmm7
addps %xmm11, %xmm7
mulps %xmm10, %xmm4
mulps %xmm5, %xmm15
addps %xmm4, %xmm15
movaps %xmm0, 0xa0(%rsp)
mulps %xmm0, %xmm13
addps %xmm8, %xmm13
mulps %xmm0, %xmm9
addps %xmm7, %xmm9
mulps %xmm0, %xmm14
addps %xmm15, %xmm14
movaps %xmm13, 0x40(%r14)
movaps %xmm9, 0x50(%r14)
movaps %xmm14, 0x60(%r14)
movq 0x10(%r8), %rdx
leaq 0x260(%rsp), %rdi
movaps %xmm6, (%rdi)
leaq 0x230(%rsp), %rcx
movq %rax, (%rcx)
movq %r15, 0x8(%rcx)
movq %rdx, 0x10(%rcx)
leaq 0x58(%rax), %rsi
movq %r14, %rdx
callq *0xa0(%rax)
movaps 0x50(%rsp), %xmm0
movaps %xmm0, (%r14)
movaps 0x60(%rsp), %xmm0
movaps %xmm0, 0x10(%r14)
movaps (%rsp), %xmm0
movaps %xmm0, 0x20(%r14)
movaps 0xa0(%rsp), %xmm0
movaps %xmm0, 0x40(%r14)
movaps 0xc0(%rsp), %xmm0
movaps %xmm0, 0x50(%r14)
movaps 0x40(%rsp), %xmm0
movaps %xmm0, 0x60(%r14)
movaps 0x80(%r14), %xmm0
xorps %xmm1, %xmm1
cmpltps %xmm1, %xmm0
movaps %xmm0, (%rbx)
orq $-0x1, (%r15)
movq %rbx, %rax
addq $0x270, %rsp # imm = 0x270
popq %rbx
popq %r14
popq %r15
retq
movss 0x28(%rdx), %xmm0
movss 0x2c(%rdx), %xmm1
movss 0x30(%rdx), %xmm2
subss %xmm1, %xmm2
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm0, %xmm3
shufps $0x0, %xmm0, %xmm3 # xmm3 = xmm3[0,0],xmm0[0,0]
movaps 0x70(%r14), %xmm12
subps %xmm1, %xmm12
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
divps %xmm2, %xmm12
mulps %xmm3, %xmm12
roundps $0x1, %xmm12, %xmm1
addss 0x1d23c35(%rip), %xmm0 # 0x1ef09cc
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm1
xorps %xmm0, %xmm0
maxps %xmm0, %xmm1
subps %xmm1, %xmm12
cvtps2dq %xmm1, %xmm11
movapd %xmm11, 0x230(%rsp)
movzbl %sil, %edi
bsfq %rdi, %rdi
movslq 0x230(%rsp,%rdi,4), %rdi
movd %edi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pandn %xmm4, %xmm0
movmskps %xmm0, %r9d
testl %r9d, %r9d
movaps %xmm12, 0x1d0(%rsp)
jne 0x1ce872
movq 0x88(%rdx), %rsi
imulq $0x38, %rdi, %rdi
leaq (%rsi,%rdi), %rdx
movl 0x20(%rsi,%rdi), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1cda3e
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1cd8d3
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1cd940
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1cdaac
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movaps (%r9,%r10), %xmm2
movaps 0x10(%r9,%r10), %xmm12
movaps 0x20(%r9,%r10), %xmm13
movaps 0x30(%r9,%r10), %xmm8
jmp 0x1cdaac
movaps %xmm12, 0x1a0(%rsp)
testl %esi, %esi
je 0x1cd64e
movq 0x88(%rdx), %rdx
movdqa %xmm4, %xmm14
movzbl %sil, %esi
andl $0xf, %esi
bsfq %rsi, %rsi
movslq %esi, %rsi
movslq 0x230(%rsp,%rsi,4), %rsi
movd %esi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pand %xmm14, %xmm0
imulq $0x38, %rsi, %rdi
leaq (%rdx,%rdi), %rsi
movl 0x20(%rdx,%rdi), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
movdqa %xmm14, 0x50(%rsp)
movaps %xmm13, 0x20(%rsp)
je 0x1cd0d0
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1ccf24
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1ccf93
cmpl $0x9244, %r9d # imm = 0x9244
movaps 0x10(%rsp), %xmm14
movaps 0x1c0(%rsp), %xmm13
jne 0x1cd13e
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movaps (%r9,%r10), %xmm14
movaps 0x10(%r9,%r10), %xmm10
movaps 0x20(%r9,%r10), %xmm13
movaps 0x30(%r9,%r10), %xmm15
jmp 0x1cd13e
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movsd 0x4(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm14
shufps $0x4c, %xmm1, %xmm14 # xmm14 = xmm14[0,3],xmm1[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm1
movss 0xc(%r9,%r10), %xmm10
shufps $0x4c, %xmm1, %xmm10 # xmm10 = xmm10[0,3],xmm1[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm1
movss 0x18(%r9,%r10), %xmm13
shufps $0x4c, %xmm1, %xmm13 # xmm13 = xmm13[0,3],xmm1[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm1
movss 0x24(%r9,%r10), %xmm15
shufps $0x4c, %xmm1, %xmm15 # xmm15 = xmm15[0,3],xmm1[0,1]
shufps $0x78, %xmm15, %xmm15 # xmm15 = xmm15[0,2,3,1]
jmp 0x1cd13e
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movsd 0x10(%r9,%r10), %xmm13
insertps $0x20, 0x8(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],xmm13[3]
movsd 0x34(%r9,%r10), %xmm1
movss (%r9,%r10), %xmm14
movss 0xc(%r9,%r10), %xmm10
movlhps %xmm1, %xmm14 # xmm14 = xmm14[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm14 # xmm14 = xmm14[0,2],xmm1[1,3]
movss 0x18(%r9,%r10), %xmm15
movsd 0x1c(%r9,%r10), %xmm1
movlhps %xmm1, %xmm15 # xmm15 = xmm15[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm15 # xmm15 = xmm15[0,2],xmm1[1,3]
movss 0x24(%r9,%r10), %xmm2
movaps %xmm9, 0x130(%rsp)
movaps %xmm6, 0x120(%rsp)
movss 0x28(%r9,%r10), %xmm6
movss 0x2c(%r9,%r10), %xmm1
movss 0x30(%r9,%r10), %xmm12
movaps %xmm3, (%rsp)
movaps %xmm6, %xmm3
mulss %xmm6, %xmm3
movaps %xmm5, 0x10(%rsp)
movaps %xmm4, %xmm5
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
addss %xmm3, %xmm4
movaps %xmm1, %xmm3
mulss %xmm1, %xmm3
addss %xmm4, %xmm3
movaps %xmm12, %xmm4
mulss %xmm12, %xmm4
addss %xmm3, %xmm4
movaps %xmm4, %xmm3
rsqrtss %xmm4, %xmm3
movaps %xmm3, %xmm9
mulss 0x1d1f6bf(%rip), %xmm9 # 0x1eec718
mulss 0x1d1f6bb(%rip), %xmm4 # 0x1eec71c
mulss %xmm3, %xmm4
mulss %xmm3, %xmm3
mulss %xmm4, %xmm3
movaps %xmm5, %xmm4
movaps 0x10(%rsp), %xmm5
addss %xmm9, %xmm3
mulss %xmm3, %xmm2
insertps $0x30, %xmm2, %xmm15 # xmm15 = xmm15[0,1,2],xmm2[0]
mulss %xmm3, %xmm6
insertps $0x30, %xmm6, %xmm14 # xmm14 = xmm14[0,1,2],xmm6[0]
movaps 0x120(%rsp), %xmm6
movaps 0x130(%rsp), %xmm9
mulss %xmm3, %xmm1
mulss %xmm12, %xmm3
insertps $0x10, 0x4(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],xmm10[2,3]
insertps $0x30, %xmm3, %xmm13 # xmm13 = xmm13[0,1,2],xmm3[0]
movaps (%rsp), %xmm3
insertps $0x20, 0x3c(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],xmm10[3]
insertps $0x30, %xmm1, %xmm10 # xmm10 = xmm10[0,1,2],xmm1[0]
jmp 0x1cd13e
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movss (%r9,%r10), %xmm14
movss 0x4(%r9,%r10), %xmm10
movss 0x8(%r9,%r10), %xmm13
movss 0xc(%r9,%r10), %xmm15
insertps $0x1c, 0x10(%r9,%r10), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm15 # xmm15 = xmm15[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm15 # xmm15 = xmm15[0,1],mem[0],zero
movaps %xmm14, %xmm1
shufps $0x0, %xmm14, %xmm1 # xmm1 = xmm1[0,0],xmm14[0,0]
movaps 0xc0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xc0(%rsp)
movaps %xmm14, %xmm1
shufps $0x55, %xmm14, %xmm1 # xmm1 = xmm1[1,1],xmm14[1,1]
movaps 0xd0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xd0(%rsp)
movaps %xmm14, 0x10(%rsp)
shufps $0xaa, %xmm14, %xmm14 # xmm14 = xmm14[2,2,2,2]
movaps 0xa0(%rsp), %xmm2
blendvps %xmm0, %xmm14, %xmm2
movaps %xmm2, 0xa0(%rsp)
movaps %xmm10, %xmm1
shufps $0x0, %xmm10, %xmm1 # xmm1 = xmm1[0,0],xmm10[0,0]
movaps 0xf0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xf0(%rsp)
movaps %xmm10, %xmm1
shufps $0x55, %xmm10, %xmm1 # xmm1 = xmm1[1,1],xmm10[1,1]
movaps 0x60(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x60(%rsp)
movaps %xmm10, %xmm1
shufps $0xaa, %xmm10, %xmm1 # xmm1 = xmm1[2,2],xmm10[2,2]
movaps 0xe0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xe0(%rsp)
movaps %xmm13, %xmm1
shufps $0x0, %xmm13, %xmm1 # xmm1 = xmm1[0,0],xmm13[0,0]
blendvps %xmm0, %xmm1, %xmm5
movaps %xmm13, %xmm1
shufps $0x55, %xmm13, %xmm1 # xmm1 = xmm1[1,1],xmm13[1,1]
blendvps %xmm0, %xmm1, %xmm6
movaps %xmm13, 0x1c0(%rsp)
shufps $0xaa, %xmm13, %xmm13 # xmm13 = xmm13[2,2,2,2]
blendvps %xmm0, %xmm13, %xmm4
movaps %xmm15, %xmm1
shufps $0x0, %xmm15, %xmm1 # xmm1 = xmm1[0,0],xmm15[0,0]
blendvps %xmm0, %xmm1, %xmm3
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm1 # xmm1 = xmm1[1,1],xmm15[1,1]
movaps 0x40(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x40(%rsp)
movaps %xmm15, %xmm1
shufps $0xaa, %xmm15, %xmm1 # xmm1 = xmm1[2,2],xmm15[2,2]
movaps 0xb0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0xb0(%rsp)
movdqa 0x50(%rsp), %xmm14
pxor %xmm0, %xmm14
movl 0x58(%rdx,%rdi), %edi
cmpl $0x9134, %edi # imm = 0x9134
movdqa %xmm14, 0x50(%rsp)
je 0x1cd481
cmpl $0x9234, %edi # imm = 0x9234
je 0x1cd2dc
cmpl $0xb001, %edi # imm = 0xB001
je 0x1cd345
cmpl $0x9244, %edi # imm = 0x9244
movaps 0x20(%rsp), %xmm13
movaps 0x1b0(%rsp), %xmm12
jne 0x1cd4ed
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movaps (%rdi,%rsi), %xmm13
movaps 0x10(%rdi,%rsi), %xmm8
movaps 0x20(%rdi,%rsi), %xmm12
movaps 0x30(%rdi,%rsi), %xmm7
jmp 0x1cd4ed
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movsd 0x4(%rdi,%rsi), %xmm1
movss (%rdi,%rsi), %xmm13
shufps $0x4c, %xmm1, %xmm13 # xmm13 = xmm13[0,3],xmm1[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x10(%rdi,%rsi), %xmm1
movss 0xc(%rdi,%rsi), %xmm8
shufps $0x4c, %xmm1, %xmm8 # xmm8 = xmm8[0,3],xmm1[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x1c(%rdi,%rsi), %xmm1
movss 0x18(%rdi,%rsi), %xmm12
shufps $0x4c, %xmm1, %xmm12 # xmm12 = xmm12[0,3],xmm1[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x28(%rdi,%rsi), %xmm1
movss 0x24(%rdi,%rsi), %xmm7
shufps $0x4c, %xmm1, %xmm7 # xmm7 = xmm7[0,3],xmm1[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
jmp 0x1cd4ed
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movsd 0x10(%rdi,%rsi), %xmm12
insertps $0x20, 0x8(%rdi,%rsi), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
movsd 0x34(%rdi,%rsi), %xmm1
movss (%rdi,%rsi), %xmm13
movss 0xc(%rdi,%rsi), %xmm8
movlhps %xmm1, %xmm13 # xmm13 = xmm13[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm13 # xmm13 = xmm13[0,2],xmm1[1,3]
movss 0x18(%rdi,%rsi), %xmm7
movsd 0x1c(%rdi,%rsi), %xmm1
movlhps %xmm1, %xmm7 # xmm7 = xmm7[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm7 # xmm7 = xmm7[0,2],xmm1[1,3]
movss 0x24(%rdi,%rsi), %xmm2
movaps %xmm9, 0x130(%rsp)
movaps %xmm6, 0x120(%rsp)
movss 0x28(%rdi,%rsi), %xmm6
movss 0x2c(%rdi,%rsi), %xmm1
movaps %xmm3, (%rsp)
movss 0x30(%rdi,%rsi), %xmm14
movaps %xmm4, 0x20(%rsp)
movaps %xmm6, %xmm4
mulss %xmm6, %xmm4
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm4, %xmm9
movaps %xmm1, %xmm4
mulss %xmm1, %xmm4
addss %xmm9, %xmm4
movaps %xmm14, %xmm9
mulss %xmm14, %xmm9
addss %xmm4, %xmm9
movaps %xmm9, %xmm4
rsqrtss %xmm9, %xmm4
movaps %xmm5, %xmm3
movaps %xmm4, %xmm5
mulss 0x1d1f30f(%rip), %xmm5 # 0x1eec718
mulss 0x1d1f30a(%rip), %xmm9 # 0x1eec71c
mulss %xmm4, %xmm9
mulss %xmm4, %xmm4
mulss %xmm9, %xmm4
addss %xmm5, %xmm4
movaps %xmm3, %xmm5
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
mulss %xmm4, %xmm6
insertps $0x30, %xmm6, %xmm13 # xmm13 = xmm13[0,1,2],xmm6[0]
movaps 0x120(%rsp), %xmm6
movaps 0x130(%rsp), %xmm9
mulss %xmm4, %xmm1
mulss %xmm14, %xmm4
movaps (%rsp), %xmm3
insertps $0x10, 0x4(%rdi,%rsi), %xmm8 # xmm8 = xmm8[0],mem[0],xmm8[2,3]
insertps $0x30, %xmm4, %xmm12 # xmm12 = xmm12[0,1,2],xmm4[0]
movaps 0x20(%rsp), %xmm4
insertps $0x20, 0x3c(%rdi,%rsi), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
insertps $0x30, %xmm1, %xmm8 # xmm8 = xmm8[0,1,2],xmm1[0]
jmp 0x1cd4ed
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movss (%rdi,%rsi), %xmm13
movss 0x4(%rdi,%rsi), %xmm8
movss 0x8(%rdi,%rsi), %xmm12
movss 0xc(%rdi,%rsi), %xmm7
insertps $0x1c, 0x10(%rdi,%rsi), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rsi), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rsi), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rsi), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rsi), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rsi), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rsi), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rsi), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
movaps %xmm13, %xmm1
shufps $0x0, %xmm13, %xmm1 # xmm1 = xmm1[0,0],xmm13[0,0]
movaps 0x150(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x150(%rsp)
movaps %xmm13, %xmm1
shufps $0x55, %xmm13, %xmm1 # xmm1 = xmm1[1,1],xmm13[1,1]
movaps 0x160(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x160(%rsp)
movaps %xmm13, %xmm1
shufps $0xaa, %xmm13, %xmm1 # xmm1 = xmm1[2,2],xmm13[2,2]
movaps 0x170(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x170(%rsp)
movaps %xmm8, %xmm1
shufps $0x0, %xmm8, %xmm1 # xmm1 = xmm1[0,0],xmm8[0,0]
movaps 0x140(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x140(%rsp)
movaps %xmm8, %xmm1
shufps $0x55, %xmm8, %xmm1 # xmm1 = xmm1[1,1],xmm8[1,1]
movaps 0x100(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x100(%rsp)
movaps %xmm8, %xmm1
shufps $0xaa, %xmm8, %xmm1 # xmm1 = xmm1[2,2],xmm8[2,2]
movaps 0x110(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x110(%rsp)
movaps %xmm12, %xmm1
shufps $0x0, %xmm12, %xmm1 # xmm1 = xmm1[0,0],xmm12[0,0]
movaps 0x1d0(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x1d0(%rsp)
movaps %xmm12, %xmm1
shufps $0x55, %xmm12, %xmm1 # xmm1 = xmm1[1,1],xmm12[1,1]
movaps 0x90(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x90(%rsp)
movaps %xmm12, 0x1b0(%rsp)
shufps $0xaa, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
movaps 0x70(%rsp), %xmm2
blendvps %xmm0, %xmm12, %xmm2
movaps %xmm2, 0x70(%rsp)
movaps %xmm7, %xmm1
shufps $0x0, %xmm7, %xmm1 # xmm1 = xmm1[0,0],xmm7[0,0]
movaps 0x80(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x80(%rsp)
movaps %xmm7, %xmm1
shufps $0x55, %xmm7, %xmm1 # xmm1 = xmm1[1,1],xmm7[1,1]
movaps 0x30(%rsp), %xmm2
blendvps %xmm0, %xmm1, %xmm2
movaps %xmm2, 0x30(%rsp)
movaps %xmm7, %xmm1
shufps $0xaa, %xmm7, %xmm1 # xmm1 = xmm1[2,2],xmm7[2,2]
blendvps %xmm0, %xmm1, %xmm9
movaps 0x50(%rsp), %xmm14
movmskps %xmm14, %esi
testl %esi, %esi
jne 0x1cce7b
jmp 0x1cd64e
movaps 0x1d1f3bb(%rip), %xmm0 # 0x1eeca10
movaps 0x1a0(%rsp), %xmm1
subps %xmm1, %xmm0
movaps 0x150(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xc0(%rsp), %xmm2
mulps %xmm0, %xmm2
addps %xmm7, %xmm2
movaps 0x160(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps %xmm6, %xmm13
movaps 0xd0(%rsp), %xmm6
mulps %xmm0, %xmm6
addps %xmm7, %xmm6
movaps 0x170(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xa0(%rsp), %xmm10
mulps %xmm0, %xmm10
addps %xmm7, %xmm10
movaps 0x140(%rsp), %xmm7
mulps %xmm1, %xmm7
movaps 0xf0(%rsp), %xmm11
mulps %xmm0, %xmm11
addps %xmm7, %xmm11
movaps 0x100(%rsp), %xmm8
mulps %xmm1, %xmm8
movaps %xmm9, %xmm7
movaps %xmm4, %xmm12
movaps 0x60(%rsp), %xmm4
mulps %xmm0, %xmm4
addps %xmm8, %xmm4
movaps 0x110(%rsp), %xmm9
mulps %xmm1, %xmm9
movaps %xmm3, %xmm8
movaps 0xe0(%rsp), %xmm3
mulps %xmm0, %xmm3
addps %xmm9, %xmm3
movaps 0x1d0(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm5
addps %xmm9, %xmm5
movaps 0x90(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm13
addps %xmm9, %xmm13
movaps 0x70(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm0, %xmm12
addps %xmm9, %xmm12
movaps 0x80(%rsp), %xmm15
mulps %xmm1, %xmm15
movaps 0x30(%rsp), %xmm9
mulps %xmm1, %xmm9
mulps %xmm1, %xmm7
mulps %xmm0, %xmm8
addps %xmm15, %xmm8
movaps %xmm8, (%rsp)
movaps 0x40(%rsp), %xmm1
mulps %xmm0, %xmm1
addps %xmm9, %xmm1
movaps %xmm1, 0x40(%rsp)
mulps 0xb0(%rsp), %xmm0
addps %xmm7, %xmm0
movaps %xmm3, %xmm1
mulps %xmm13, %xmm1
movaps %xmm12, %xmm7
movaps %xmm5, %xmm12
movaps %xmm4, %xmm5
mulps %xmm7, %xmm5
subps %xmm1, %xmm5
movaps %xmm11, %xmm1
mulps %xmm7, %xmm1
movaps %xmm3, %xmm8
mulps %xmm12, %xmm8
subps %xmm1, %xmm8
movaps %xmm4, %xmm1
mulps %xmm12, %xmm1
movaps %xmm11, %xmm9
mulps %xmm13, %xmm9
subps %xmm1, %xmm9
movaps %xmm9, 0x10(%rsp)
movaps %xmm6, %xmm1
mulps %xmm7, %xmm1
movaps %xmm10, %xmm9
mulps %xmm13, %xmm9
subps %xmm1, %xmm9
movaps %xmm10, %xmm1
mulps %xmm12, %xmm1
mulps %xmm2, %xmm7
subps %xmm1, %xmm7
mulps %xmm2, %xmm13
mulps %xmm6, %xmm12
subps %xmm13, %xmm12
movaps %xmm10, %xmm1
mulps %xmm4, %xmm1
movaps %xmm6, %xmm14
mulps %xmm3, %xmm14
subps %xmm1, %xmm14
mulps %xmm2, %xmm3
movaps %xmm10, %xmm15
mulps %xmm11, %xmm15
subps %xmm3, %xmm15
movaps %xmm5, %xmm13
mulps %xmm6, %xmm11
mulps %xmm2, %xmm4
subps %xmm11, %xmm4
movaps 0x10(%rsp), %xmm3
mulps %xmm3, %xmm10
mulps %xmm8, %xmm6
addps %xmm10, %xmm6
mulps %xmm5, %xmm2
addps %xmm6, %xmm2
divps %xmm2, %xmm13
divps %xmm2, %xmm9
divps %xmm2, %xmm14
divps %xmm2, %xmm8
divps %xmm2, %xmm7
divps %xmm2, %xmm15
divps %xmm2, %xmm3
divps %xmm2, %xmm12
divps %xmm2, %xmm4
movaps %xmm0, %xmm1
mulps %xmm3, %xmm1
movaps %xmm3, %xmm5
movaps %xmm0, %xmm2
movaps %xmm12, 0x10(%rsp)
mulps %xmm12, %xmm2
movaps (%rsp), %xmm10
movaps 0x40(%rsp), %xmm11
movaps %xmm11, %xmm3
mulps %xmm8, %xmm3
addps %xmm1, %xmm3
movaps %xmm11, %xmm6
movaps %xmm7, 0x20(%rsp)
mulps %xmm7, %xmm6
addps %xmm2, %xmm6
mulps %xmm4, %xmm0
mulps %xmm15, %xmm11
addps %xmm0, %xmm11
movaps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps %xmm10, %xmm1
mulps %xmm9, %xmm1
addps %xmm6, %xmm1
mulps %xmm14, %xmm10
addps %xmm11, %xmm10
movaps %xmm5, %xmm12
jmp 0x1ccbaa
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movsd 0x4(%r9,%r10), %xmm0
movss (%r9,%r10), %xmm2
shufps $0x4c, %xmm0, %xmm2 # xmm2 = xmm2[0,3],xmm0[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm0
movss 0xc(%r9,%r10), %xmm12
shufps $0x4c, %xmm0, %xmm12 # xmm12 = xmm12[0,3],xmm0[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm0
movss 0x18(%r9,%r10), %xmm13
shufps $0x4c, %xmm0, %xmm13 # xmm13 = xmm13[0,3],xmm0[0,1]
shufps $0x78, %xmm13, %xmm13 # xmm13 = xmm13[0,2,3,1]
movsd 0x28(%r9,%r10), %xmm0
movss 0x24(%r9,%r10), %xmm8
shufps $0x4c, %xmm0, %xmm8 # xmm8 = xmm8[0,3],xmm0[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
jmp 0x1cdaac
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movsd 0x10(%r9,%r10), %xmm13
insertps $0x20, 0x8(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],xmm13[3]
movsd 0x34(%r9,%r10), %xmm0
movss (%r9,%r10), %xmm7
movss 0xc(%r9,%r10), %xmm12
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%r9,%r10), %xmm8
movsd 0x1c(%r9,%r10), %xmm0
movlhps %xmm0, %xmm8 # xmm8 = xmm8[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm8 # xmm8 = xmm8[0,2],xmm0[1,3]
movss 0x24(%r9,%r10), %xmm3
movss 0x28(%r9,%r10), %xmm2
movss 0x2c(%r9,%r10), %xmm0
movss 0x30(%r9,%r10), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d1ed38(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d1ed30(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm8 # xmm8 = xmm8[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
movaps %xmm7, %xmm2
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm4, %xmm13 # xmm13 = xmm13[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm0, %xmm12 # xmm12 = xmm12[0,1,2],xmm0[0]
jmp 0x1cdaac
movq (%rdx), %r9
movq 0x10(%rdx), %r10
imulq %rcx, %r10
movss (%r9,%r10), %xmm2
movss 0x4(%r9,%r10), %xmm12
movss 0x8(%r9,%r10), %xmm13
movss 0xc(%r9,%r10), %xmm8
insertps $0x1c, 0x10(%r9,%r10), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm13 # xmm13 = xmm13[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm13 # xmm13 = xmm13[0,1],mem[0],zero
insertps $0x1c, 0x1c(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
movaps %xmm2, %xmm0
movaps %xmm2, %xmm1
shufps $0x0, %xmm2, %xmm1 # xmm1 = xmm1[0,0],xmm2[0,0]
movaps %xmm2, %xmm5
shufps $0x55, %xmm2, %xmm5 # xmm5 = xmm5[1,1],xmm2[1,1]
movaps %xmm2, %xmm3
shufps $0xaa, %xmm2, %xmm3 # xmm3 = xmm3[2,2],xmm2[2,2]
shufps $0xff, %xmm2, %xmm0 # xmm0 = xmm0[3,3],xmm2[3,3]
movaps %xmm0, %xmm15
movaps %xmm12, %xmm2
shufps $0x0, %xmm12, %xmm2 # xmm2 = xmm2[0,0],xmm12[0,0]
movaps %xmm12, %xmm11
shufps $0x55, %xmm12, %xmm11 # xmm11 = xmm11[1,1],xmm12[1,1]
movaps %xmm12, %xmm4
shufps $0xaa, %xmm12, %xmm4 # xmm4 = xmm4[2,2],xmm12[2,2]
shufps $0xff, %xmm12, %xmm12 # xmm12 = xmm12[3,3,3,3]
movaps %xmm13, %xmm0
shufps $0x0, %xmm13, %xmm0 # xmm0 = xmm0[0,0],xmm13[0,0]
movaps %xmm13, %xmm9
shufps $0x55, %xmm13, %xmm9 # xmm9 = xmm9[1,1],xmm13[1,1]
movaps %xmm13, %xmm10
shufps $0xaa, %xmm13, %xmm10 # xmm10 = xmm10[2,2],xmm13[2,2]
shufps $0xff, %xmm13, %xmm13 # xmm13 = xmm13[3,3,3,3]
movaps %xmm8, %xmm6
shufps $0x0, %xmm8, %xmm6 # xmm6 = xmm6[0,0],xmm8[0,0]
movaps %xmm8, %xmm7
shufps $0x55, %xmm8, %xmm7 # xmm7 = xmm7[1,1],xmm8[1,1]
movaps %xmm8, %xmm14
shufps $0xaa, %xmm8, %xmm14 # xmm14 = xmm14[2,2],xmm8[2,2]
shufps $0xff, %xmm8, %xmm8 # xmm8 = xmm8[3,3,3,3]
movl 0x58(%rsi,%rdi), %esi
cmpl $0x9134, %esi # imm = 0x9134
movaps %xmm0, 0x20(%rsp)
movaps %xmm1, 0xc0(%rsp)
movaps %xmm2, 0x10(%rsp)
movaps %xmm3, 0x60(%rsp)
movaps %xmm4, 0x50(%rsp)
movaps %xmm5, 0x40(%rsp)
movaps %xmm6, 0xa0(%rsp)
movaps %xmm7, 0xf0(%rsp)
movaps %xmm9, 0xe0(%rsp)
movaps %xmm10, 0xd0(%rsp)
movaps %xmm11, 0xb0(%rsp)
movaps %xmm14, 0x110(%rsp)
je 0x1cdd2e
cmpl $0x9234, %esi # imm = 0x9234
je 0x1cdbd5
cmpl $0xb001, %esi # imm = 0xB001
je 0x1cdc3b
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1cdd97
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movaps (%rsi,%rcx), %xmm10
movaps 0x10(%rsi,%rcx), %xmm9
movaps 0x20(%rsi,%rcx), %xmm7
movaps 0x30(%rsi,%rcx), %xmm11
jmp 0x1cdd97
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movsd 0x4(%rsi,%rcx), %xmm0
movss (%rsi,%rcx), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x10(%rsi,%rcx), %xmm0
movss 0xc(%rsi,%rcx), %xmm9
shufps $0x4c, %xmm0, %xmm9 # xmm9 = xmm9[0,3],xmm0[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x1c(%rsi,%rcx), %xmm0
movss 0x18(%rsi,%rcx), %xmm7
shufps $0x4c, %xmm0, %xmm7 # xmm7 = xmm7[0,3],xmm0[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x28(%rsi,%rcx), %xmm0
movss 0x24(%rsi,%rcx), %xmm11
shufps $0x4c, %xmm0, %xmm11 # xmm11 = xmm11[0,3],xmm0[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
jmp 0x1cdd97
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movsd 0x10(%rsi,%rcx), %xmm7
insertps $0x20, 0x8(%rsi,%rcx), %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
movsd 0x34(%rsi,%rcx), %xmm0
movss (%rsi,%rcx), %xmm10
movss 0xc(%rsi,%rcx), %xmm9
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x18(%rsi,%rcx), %xmm11
movsd 0x1c(%rsi,%rcx), %xmm0
movlhps %xmm0, %xmm11 # xmm11 = xmm11[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm11 # xmm11 = xmm11[0,2],xmm0[1,3]
movss 0x24(%rsi,%rcx), %xmm3
movss 0x28(%rsi,%rcx), %xmm2
movss 0x2c(%rsi,%rcx), %xmm0
movss 0x30(%rsi,%rcx), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d1ea45(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d1ea3d(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm11 # xmm11 = xmm11[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm10 # xmm10 = xmm10[0,1,2],xmm2[0]
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0],mem[0],xmm9[2,3]
insertps $0x30, %xmm4, %xmm7 # xmm7 = xmm7[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
insertps $0x30, %xmm0, %xmm9 # xmm9 = xmm9[0,1,2],xmm0[0]
jmp 0x1cdd97
movq 0x38(%rdx), %rsi
imulq 0x48(%rdx), %rcx
movss (%rsi,%rcx), %xmm10
movss 0x4(%rsi,%rcx), %xmm9
movss 0x8(%rsi,%rcx), %xmm7
movss 0xc(%rsi,%rcx), %xmm11
insertps $0x1c, 0x10(%rsi,%rcx), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rcx), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rcx), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rcx), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rcx), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
movaps %xmm10, 0x1a0(%rsp)
movaps %xmm10, 0x1b0(%rsp)
movaps %xmm10, 0x160(%rsp)
shufps $0xff, %xmm10, %xmm10 # xmm10 = xmm10[3,3,3,3]
movaps %xmm11, 0x100(%rsp)
movaps %xmm11, 0x140(%rsp)
movaps %xmm11, 0x170(%rsp)
shufps $0xff, %xmm11, %xmm11 # xmm11 = xmm11[3,3,3,3]
movaps %xmm8, %xmm0
mulps %xmm11, %xmm0
movaps %xmm15, %xmm1
movaps %xmm15, 0x80(%rsp)
mulps %xmm10, %xmm1
addps %xmm0, %xmm1
movaps %xmm9, 0x210(%rsp)
movaps %xmm9, 0x220(%rsp)
movaps %xmm9, 0x120(%rsp)
shufps $0xff, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3]
movaps %xmm12, 0x90(%rsp)
mulps %xmm9, %xmm12
addps %xmm1, %xmm12
movaps %xmm7, 0x130(%rsp)
movaps %xmm7, 0x150(%rsp)
movaps %xmm7, 0x1c0(%rsp)
shufps $0xff, %xmm7, %xmm7 # xmm7 = xmm7[3,3,3,3]
movaps %xmm13, %xmm3
mulps %xmm7, %xmm3
addps %xmm12, %xmm3
movaps 0x1d1e87e(%rip), %xmm6 # 0x1eec6d0
movaps %xmm3, %xmm14
xorps %xmm6, %xmm14
movaps %xmm3, %xmm0
cmpltps %xmm14, %xmm0
movaps %xmm11, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm11
movaps %xmm10, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm10
movaps %xmm10, 0x200(%rsp)
movaps %xmm9, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm9
movaps %xmm7, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm7
movaps %xmm7, (%rsp)
maxps %xmm3, %xmm14
andps 0x1d1e814(%rip), %xmm3 # 0x1eec6c0
movaps 0x1d23d1d(%rip), %xmm0 # 0x1ef1bd0
mulps %xmm3, %xmm0
addps 0x1d23d23(%rip), %xmm0 # 0x1ef1be0
mulps %xmm3, %xmm0
addps 0x1d23d29(%rip), %xmm0 # 0x1ef1bf0
mulps %xmm3, %xmm0
addps 0x1d23d2f(%rip), %xmm0 # 0x1ef1c00
mulps %xmm3, %xmm0
addps 0x1d23d35(%rip), %xmm0 # 0x1ef1c10
mulps %xmm3, %xmm0
addps 0x1d23d3b(%rip), %xmm0 # 0x1ef1c20
movaps 0x1d1eb23(%rip), %xmm10 # 0x1eeca10
movaps %xmm10, %xmm2
subps %xmm3, %xmm2
sqrtps %xmm2, %xmm2
mulps %xmm0, %xmm2
movaps 0x1d23d2f(%rip), %xmm4 # 0x1ef1c30
movaps %xmm4, %xmm0
subps %xmm2, %xmm0
xorps %xmm7, %xmm7
maxps %xmm0, %xmm7
xorps %xmm2, %xmm2
movaps %xmm14, %xmm0
cmpltps %xmm2, %xmm0
movaps %xmm7, %xmm5
xorps %xmm6, %xmm5
blendvps %xmm0, %xmm5, %xmm7
movaps %xmm4, %xmm5
subps %xmm7, %xmm5
movaps %xmm3, %xmm0
cmpnleps %xmm10, %xmm0
blendvps %xmm0, 0x1d23d06(%rip), %xmm5 # 0x1ef1c40
movaps %xmm13, 0x70(%rsp)
movaps 0x1d0(%rsp), %xmm13
mulps %xmm13, %xmm5
movaps 0x1d23cfc(%rip), %xmm0 # 0x1ef1c50
mulps %xmm5, %xmm0
roundps $0x1, %xmm0, %xmm3
cvtps2dq %xmm3, %xmm0
mulps %xmm4, %xmm3
subps %xmm3, %xmm5
movaps %xmm9, %xmm15
movaps %xmm9, 0x1e0(%rsp)
movapd 0x1d23ce3(%rip), %xmm9 # 0x1ef1c60
andpd %xmm0, %xmm9
movaps %xmm11, %xmm1
movaps %xmm11, 0x190(%rsp)
movaps %xmm5, %xmm11
mulps %xmm5, %xmm11
movaps 0x1d23cf1(%rip), %xmm12 # 0x1ef1c90
mulps %xmm11, %xmm12
addps 0x1d23cf5(%rip), %xmm12 # 0x1ef1ca0
movaps 0x1d23cfe(%rip), %xmm7 # 0x1ef1cb0
mulps %xmm11, %xmm7
addps 0x1d23d03(%rip), %xmm7 # 0x1ef1cc0
mulps %xmm11, %xmm12
addps 0x1d23d07(%rip), %xmm12 # 0x1ef1cd0
movapd 0x1d1eb4f(%rip), %xmm3 # 0x1eecb20
andpd %xmm3, %xmm0
mulps %xmm11, %xmm7
addps 0x1d23d00(%rip), %xmm7 # 0x1ef1ce0
mulps %xmm11, %xmm12
addps 0x1d23d04(%rip), %xmm12 # 0x1ef1cf0
movapd %xmm0, %xmm4
pcmpeqd %xmm2, %xmm4
mulps %xmm11, %xmm7
addps 0x1d23d01(%rip), %xmm7 # 0x1ef1d00
psubd %xmm0, %xmm2
mulps %xmm11, %xmm12
addps 0x1d23d01(%rip), %xmm12 # 0x1ef1d10
mulps %xmm11, %xmm7
addps 0x1d1eac6(%rip), %xmm7 # 0x1eecae0
mulps %xmm11, %xmm12
mulps %xmm11, %xmm7
addps %xmm10, %xmm12
mulps %xmm5, %xmm12
addps %xmm10, %xmm7
movaps %xmm12, %xmm5
movdqa %xmm2, %xmm0
blendvps %xmm0, %xmm7, %xmm5
movdqa %xmm4, %xmm0
blendvps %xmm0, %xmm7, %xmm12
movapd %xmm9, %xmm0
pcmpgtd %xmm3, %xmm0
movaps %xmm5, %xmm2
xorps %xmm6, %xmm2
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm5, 0x1f0(%rsp)
pcmpeqd %xmm0, %xmm0
paddd %xmm9, %xmm0
pminud %xmm0, %xmm3
pcmpeqd %xmm0, %xmm3
xorps %xmm12, %xmm6
movdqa %xmm3, %xmm0
blendvps %xmm0, %xmm6, %xmm12
movaps %xmm8, %xmm7
mulps %xmm14, %xmm7
subps %xmm1, %xmm7
movaps 0x80(%rsp), %xmm3
mulps %xmm14, %xmm3
movaps 0x200(%rsp), %xmm1
subps %xmm1, %xmm3
movaps %xmm7, %xmm0
mulps %xmm7, %xmm0
movaps %xmm3, %xmm2
mulps %xmm3, %xmm2
addps %xmm0, %xmm2
movaps 0x90(%rsp), %xmm4
mulps %xmm14, %xmm4
subps %xmm15, %xmm4
movaps %xmm4, %xmm0
mulps %xmm4, %xmm0
addps %xmm2, %xmm0
movaps 0x70(%rsp), %xmm6
mulps %xmm14, %xmm6
subps (%rsp), %xmm6
movaps %xmm6, %xmm2
mulps %xmm6, %xmm2
addps %xmm0, %xmm2
rsqrtps %xmm2, %xmm9
mulps 0x1d1e5f4(%rip), %xmm2 # 0x1eec6e0
mulps %xmm9, %xmm2
movaps %xmm9, %xmm15
mulps %xmm9, %xmm9
mulps %xmm2, %xmm9
movaps 0x1d23c1c(%rip), %xmm11 # 0x1ef1d20
mulps %xmm11, %xmm15
subps %xmm9, %xmm15
mulps %xmm15, %xmm7
mulps %xmm5, %xmm7
movaps %xmm8, %xmm0
mulps %xmm12, %xmm0
subps %xmm7, %xmm0
movaps %xmm0, 0x30(%rsp)
movaps 0x190(%rsp), %xmm2
subps %xmm8, %xmm2
mulps %xmm13, %xmm2
addps %xmm8, %xmm2
movaps 0x80(%rsp), %xmm0
subps %xmm0, %xmm1
mulps %xmm13, %xmm1
addps %xmm0, %xmm1
movaps %xmm2, %xmm7
mulps %xmm2, %xmm7
movaps %xmm1, %xmm8
mulps %xmm1, %xmm8
addps %xmm7, %xmm8
movaps 0x90(%rsp), %xmm9
movaps 0x1e0(%rsp), %xmm5
subps %xmm9, %xmm5
mulps %xmm13, %xmm5
addps %xmm9, %xmm5
movaps %xmm5, %xmm7
mulps %xmm5, %xmm7
addps %xmm8, %xmm7
movaps (%rsp), %xmm8
movaps 0x70(%rsp), %xmm0
subps %xmm0, %xmm8
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
movaps %xmm8, (%rsp)
mulps %xmm8, %xmm8
addps %xmm7, %xmm8
rsqrtps %xmm8, %xmm7
mulps 0x1d1e52f(%rip), %xmm8 # 0x1eec6e0
mulps %xmm7, %xmm11
mulps %xmm7, %xmm8
mulps %xmm7, %xmm7
mulps %xmm8, %xmm7
subps %xmm7, %xmm11
mulps %xmm11, %xmm2
cmpnleps 0x1d23b5f(%rip), %xmm14 # 0x1ef1d30
movaps %xmm14, %xmm0
movaps 0x30(%rsp), %xmm7
blendvps %xmm0, %xmm2, %xmm7
movaps %xmm7, 0x30(%rsp)
mulps %xmm15, %xmm3
movaps 0x1f0(%rsp), %xmm2
mulps %xmm2, %xmm3
movaps 0x80(%rsp), %xmm7
mulps %xmm12, %xmm7
subps %xmm3, %xmm7
mulps %xmm11, %xmm1
blendvps %xmm0, %xmm1, %xmm7
mulps %xmm15, %xmm4
mulps %xmm2, %xmm4
movaps %xmm9, %xmm8
mulps %xmm12, %xmm8
subps %xmm4, %xmm8
mulps %xmm11, %xmm5
blendvps %xmm0, %xmm5, %xmm8
mulps %xmm6, %xmm15
mulps %xmm2, %xmm15
mulps 0x70(%rsp), %xmm12
subps %xmm15, %xmm12
mulps (%rsp), %xmm11
blendvps %xmm0, %xmm11, %xmm12
movaps 0x1a0(%rsp), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
subps %xmm13, %xmm10
mulps %xmm13, %xmm0
movaps %xmm0, %xmm1
movaps 0xc0(%rsp), %xmm0
mulps %xmm10, %xmm0
addps %xmm1, %xmm0
movaps 0x1b0(%rsp), %xmm1
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm13, %xmm1
movaps 0x40(%rsp), %xmm3
mulps %xmm10, %xmm3
addps %xmm1, %xmm3
movaps %xmm3, 0x40(%rsp)
movaps 0x160(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x60(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x60(%rsp)
movaps 0x210(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x10(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x10(%rsp)
movaps 0x220(%rsp), %xmm1
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm13, %xmm1
movaps 0xb0(%rsp), %xmm2
mulps %xmm10, %xmm2
addps %xmm1, %xmm2
movaps %xmm2, 0xb0(%rsp)
movaps 0x120(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x50(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x50(%rsp)
movaps 0x130(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm13, %xmm1
movaps %xmm1, %xmm3
movaps 0x20(%rsp), %xmm1
mulps %xmm10, %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x20(%rsp)
movaps 0x150(%rsp), %xmm11
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
movaps 0x1c0(%rsp), %xmm9
shufps $0xaa, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
movaps 0x100(%rsp), %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
movaps 0x140(%rsp), %xmm3
shufps $0x55, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
movaps 0x170(%rsp), %xmm1
shufps $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
mulps %xmm13, %xmm11
mulps %xmm13, %xmm9
movaps 0xe0(%rsp), %xmm14
mulps %xmm10, %xmm14
addps %xmm11, %xmm14
movaps %xmm14, 0xe0(%rsp)
movaps 0xd0(%rsp), %xmm11
mulps %xmm10, %xmm11
addps %xmm9, %xmm11
movaps %xmm11, 0xd0(%rsp)
mulps %xmm13, %xmm6
mulps %xmm13, %xmm3
mulps %xmm13, %xmm1
movaps 0xa0(%rsp), %xmm9
mulps %xmm10, %xmm9
addps %xmm6, %xmm9
movaps %xmm9, 0xa0(%rsp)
movaps 0xf0(%rsp), %xmm6
mulps %xmm10, %xmm6
addps %xmm3, %xmm6
movaps %xmm6, 0xf0(%rsp)
mulps 0x110(%rsp), %xmm10
addps %xmm1, %xmm10
movaps %xmm10, 0x90(%rsp)
movaps %xmm7, %xmm3
movaps %xmm7, %xmm15
movaps 0x30(%rsp), %xmm1
movaps %xmm1, %xmm6
movaps %xmm7, %xmm9
movaps %xmm1, %xmm10
movaps %xmm1, %xmm2
mulps %xmm1, %xmm7
mulps %xmm1, %xmm2
mulps %xmm3, %xmm3
movaps %xmm2, %xmm14
addps %xmm3, %xmm14
mulps %xmm8, %xmm15
mulps %xmm12, %xmm6
movaps %xmm15, %xmm1
addps %xmm6, %xmm1
subps %xmm6, %xmm15
movaps %xmm12, %xmm11
mulps %xmm12, %xmm9
mulps %xmm8, %xmm10
mulps %xmm8, %xmm12
movaps %xmm8, %xmm13
mulps %xmm8, %xmm13
subps %xmm13, %xmm14
subps %xmm3, %xmm2
movaps %xmm9, %xmm6
subps %xmm10, %xmm6
addps %xmm9, %xmm10
movaps %xmm10, %xmm8
movaps %xmm12, %xmm10
addps %xmm7, %xmm10
subps %xmm7, %xmm12
movaps %xmm13, %xmm3
addps %xmm2, %xmm3
subps %xmm13, %xmm2
movaps %xmm3, %xmm13
mulps %xmm11, %xmm11
subps %xmm11, %xmm14
subps %xmm11, %xmm13
addps %xmm11, %xmm2
addps %xmm1, %xmm1
addps %xmm6, %xmm6
xorps %xmm3, %xmm3
movaps %xmm6, %xmm7
mulps %xmm3, %xmm7
movaps %xmm1, %xmm9
mulps %xmm3, %xmm9
addps %xmm9, %xmm6
addps %xmm7, %xmm9
addps %xmm1, %xmm7
addps %xmm14, %xmm9
movaps %xmm9, %xmm5
movaps %xmm9, (%rsp)
mulps %xmm3, %xmm14
addps %xmm14, %xmm7
addps %xmm6, %xmm14
addps %xmm10, %xmm10
movaps %xmm10, %xmm9
mulps %xmm3, %xmm9
movaps %xmm13, %xmm1
mulps %xmm3, %xmm1
addps %xmm9, %xmm13
addps %xmm1, %xmm9
addps %xmm10, %xmm1
addps %xmm15, %xmm15
addps %xmm15, %xmm9
mulps %xmm3, %xmm15
addps %xmm15, %xmm13
addps %xmm1, %xmm15
addps %xmm12, %xmm12
movaps %xmm2, %xmm10
mulps %xmm3, %xmm10
movaps %xmm12, %xmm1
mulps %xmm3, %xmm1
movaps %xmm1, %xmm4
addps %xmm10, %xmm4
addps %xmm12, %xmm10
addps %xmm2, %xmm1
addps %xmm8, %xmm8
addps %xmm8, %xmm4
movaps %xmm4, %xmm2
movaps %xmm4, 0x80(%rsp)
mulps %xmm3, %xmm8
addps %xmm8, %xmm10
addps %xmm1, %xmm8
mulps %xmm3, %xmm2
movaps %xmm9, %xmm1
mulps %xmm3, %xmm1
addps %xmm2, %xmm1
movaps %xmm0, %xmm6
mulps %xmm5, %xmm6
addps %xmm1, %xmm6
movaps %xmm10, %xmm12
mulps %xmm3, %xmm12
movaps %xmm13, %xmm4
mulps %xmm3, %xmm13
addps %xmm12, %xmm13
movaps %xmm0, %xmm11
mulps %xmm7, %xmm11
addps %xmm13, %xmm11
movaps 0x40(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x40(%rsp)
movaps 0x60(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x60(%rsp)
movaps 0x50(%rsp), %xmm1
addps %xmm3, %xmm1
movaps %xmm1, 0x50(%rsp)
movaps %xmm8, %xmm13
mulps %xmm3, %xmm13
movaps %xmm15, 0x70(%rsp)
mulps %xmm15, %xmm3
addps %xmm13, %xmm3
mulps %xmm14, %xmm0
movaps %xmm14, 0x30(%rsp)
addps %xmm3, %xmm0
movaps %xmm0, 0xc0(%rsp)
movaps 0xb0(%rsp), %xmm3
movaps %xmm3, %xmm1
mulps %xmm9, %xmm1
addps %xmm2, %xmm1
movaps %xmm3, %xmm2
mulps %xmm4, %xmm2
addps %xmm12, %xmm2
mulps %xmm15, %xmm3
addps %xmm13, %xmm3
movaps 0x10(%rsp), %xmm0
movaps %xmm0, %xmm5
movaps (%rsp), %xmm15
mulps %xmm15, %xmm5
addps %xmm1, %xmm5
movaps %xmm5, 0xb0(%rsp)
movaps %xmm0, %xmm12
mulps %xmm7, %xmm12
addps %xmm2, %xmm12
mulps %xmm14, %xmm0
addps %xmm3, %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0xd0(%rsp), %xmm5
movaps %xmm5, %xmm2
mulps 0x80(%rsp), %xmm2
movaps 0xe0(%rsp), %xmm3
movaps %xmm3, %xmm13
mulps %xmm9, %xmm13
addps %xmm2, %xmm13
movaps %xmm5, %xmm2
mulps %xmm10, %xmm2
movaps %xmm3, %xmm14
mulps %xmm4, %xmm14
addps %xmm2, %xmm14
mulps %xmm8, %xmm5
mulps 0x70(%rsp), %xmm3
addps %xmm5, %xmm3
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm5
mulps %xmm15, %xmm5
addps %xmm13, %xmm5
movaps %xmm0, %xmm1
mulps %xmm7, %xmm1
addps %xmm14, %xmm1
movaps 0x30(%rsp), %xmm13
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps 0x90(%rsp), %xmm2
movaps 0x80(%rsp), %xmm3
mulps %xmm2, %xmm3
mulps %xmm2, %xmm10
mulps %xmm2, %xmm8
movaps 0xf0(%rsp), %xmm2
mulps %xmm2, %xmm9
addps %xmm3, %xmm9
mulps %xmm2, %xmm4
addps %xmm10, %xmm4
movaps 0x70(%rsp), %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
movaps %xmm3, %xmm8
movaps 0xa0(%rsp), %xmm2
mulps %xmm2, %xmm15
addps %xmm9, %xmm15
addps 0x40(%rsp), %xmm15
movaps %xmm15, (%rsp)
mulps %xmm2, %xmm7
addps %xmm4, %xmm7
addps 0x60(%rsp), %xmm7
movaps %xmm13, %xmm3
mulps %xmm2, %xmm3
addps %xmm8, %xmm3
addps 0x50(%rsp), %xmm3
movaps %xmm3, 0x30(%rsp)
movaps 0x10(%rsp), %xmm4
movaps %xmm4, %xmm2
mulps %xmm1, %xmm2
movaps %xmm12, %xmm13
mulps %xmm0, %xmm13
subps %xmm2, %xmm13
movaps 0xb0(%rsp), %xmm10
movaps %xmm10, %xmm2
mulps %xmm0, %xmm2
movaps %xmm4, %xmm8
mulps %xmm5, %xmm8
subps %xmm2, %xmm8
movaps %xmm12, %xmm2
mulps %xmm5, %xmm2
mulps %xmm1, %xmm10
subps %xmm2, %xmm10
movaps %xmm0, %xmm2
mulps %xmm11, %xmm2
movaps %xmm1, %xmm9
movaps 0xc0(%rsp), %xmm3
mulps %xmm3, %xmm9
subps %xmm2, %xmm9
movaps %xmm5, %xmm2
mulps %xmm3, %xmm2
mulps %xmm6, %xmm0
subps %xmm2, %xmm0
mulps %xmm6, %xmm1
mulps %xmm11, %xmm5
subps %xmm1, %xmm5
movaps %xmm3, %xmm2
mulps %xmm12, %xmm2
movaps %xmm11, %xmm14
mulps %xmm4, %xmm14
subps %xmm2, %xmm14
mulps %xmm6, %xmm4
movaps %xmm3, %xmm2
movaps %xmm3, %xmm15
movaps 0xb0(%rsp), %xmm1
mulps %xmm1, %xmm15
subps %xmm4, %xmm15
mulps %xmm11, %xmm1
mulps %xmm6, %xmm12
subps %xmm1, %xmm12
movaps %xmm12, %xmm4
mulps %xmm10, %xmm2
mulps %xmm8, %xmm11
addps %xmm2, %xmm11
mulps %xmm13, %xmm6
addps %xmm11, %xmm6
divps %xmm6, %xmm13
divps %xmm6, %xmm9
divps %xmm6, %xmm14
divps %xmm6, %xmm8
divps %xmm6, %xmm0
divps %xmm6, %xmm15
divps %xmm6, %xmm10
divps %xmm6, %xmm5
divps %xmm6, %xmm4
movaps 0x30(%rsp), %xmm3
movaps %xmm3, %xmm1
mulps %xmm10, %xmm1
movaps %xmm3, %xmm2
movaps %xmm3, %xmm11
movaps %xmm5, 0x10(%rsp)
mulps %xmm5, %xmm2
movaps %xmm7, %xmm3
mulps %xmm8, %xmm3
addps %xmm1, %xmm3
movaps %xmm7, %xmm6
movaps %xmm0, 0x20(%rsp)
mulps %xmm0, %xmm6
addps %xmm2, %xmm6
mulps %xmm4, %xmm11
mulps %xmm15, %xmm7
addps %xmm11, %xmm7
movaps %xmm10, %xmm12
movaps (%rsp), %xmm10
movaps %xmm10, %xmm0
mulps %xmm13, %xmm0
addps %xmm3, %xmm0
movaps %xmm10, %xmm1
mulps %xmm9, %xmm1
addps %xmm6, %xmm1
mulps %xmm14, %xmm10
addps %xmm7, %xmm10
jmp 0x1ccbaa
testl %esi, %esi
je 0x1cf0f4
movq 0x88(%rdx), %rdx
movaps 0x250(%rsp), %xmm0
movaps %xmm0, (%rsp)
movzbl %sil, %esi
andl $0xf, %esi
bsfq %rsi, %rsi
movslq %esi, %rsi
movslq 0x230(%rsp,%rsi,4), %rsi
movd %esi, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpeqd %xmm11, %xmm0
pand (%rsp), %xmm0
imulq $0x38, %rsi, %rdi
leaq (%rdx,%rdi), %rsi
movl 0x20(%rdx,%rdi), %r9d
cmpl $0x9134, %r9d # imm = 0x9134
je 0x1ce995
cmpl $0x9234, %r9d # imm = 0x9234
je 0x1ce922
cmpl $0xb001, %r9d # imm = 0xB001
je 0x1cea10
cmpl $0x9244, %r9d # imm = 0x9244
jne 0x1ceb36
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movaps (%r9,%r10), %xmm8
movaps 0x10(%r9,%r10), %xmm12
movaps 0x20(%r9,%r10), %xmm2
movaps %xmm2, 0x180(%rsp)
movaps 0x30(%r9,%r10), %xmm2
jmp 0x1ceb36
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movsd 0x4(%r9,%r10), %xmm2
movss (%r9,%r10), %xmm8
shufps $0x4c, %xmm2, %xmm8 # xmm8 = xmm8[0,3],xmm2[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x10(%r9,%r10), %xmm2
movss 0xc(%r9,%r10), %xmm12
shufps $0x4c, %xmm2, %xmm12 # xmm12 = xmm12[0,3],xmm2[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%r9,%r10), %xmm2
movss 0x18(%r9,%r10), %xmm5
shufps $0x4c, %xmm2, %xmm5 # xmm5 = xmm5[0,3],xmm2[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
movaps %xmm5, 0x180(%rsp)
movsd 0x28(%r9,%r10), %xmm5
movss 0x24(%r9,%r10), %xmm2
shufps $0x4c, %xmm5, %xmm2 # xmm2 = xmm2[0,3],xmm5[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1ceb36
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movss (%r9,%r10), %xmm8
movss 0x4(%r9,%r10), %xmm12
movss 0x8(%r9,%r10), %xmm5
movss 0xc(%r9,%r10), %xmm2
insertps $0x1c, 0x10(%r9,%r10), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x20(%r9,%r10), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x14(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%r9,%r10), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x28(%r9,%r10), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
movaps %xmm5, 0x180(%rsp)
insertps $0x1c, 0x1c(%r9,%r10), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%r9,%r10), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
jmp 0x1ceb36
movq (%rsi), %r9
movq 0x10(%rsi), %r10
imulq %rcx, %r10
movsd 0x10(%r9,%r10), %xmm6
insertps $0x20, 0x8(%r9,%r10), %xmm6 # xmm6 = xmm6[0,1],mem[0],xmm6[3]
movsd 0x34(%r9,%r10), %xmm2
movss (%r9,%r10), %xmm8
movss 0xc(%r9,%r10), %xmm12
movlhps %xmm2, %xmm8 # xmm8 = xmm8[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm8 # xmm8 = xmm8[0,2],xmm2[1,3]
movss 0x18(%r9,%r10), %xmm2
movsd 0x1c(%r9,%r10), %xmm5
movlhps %xmm5, %xmm2 # xmm2 = xmm2[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm2 # xmm2 = xmm2[0,2],xmm5[1,3]
movss 0x24(%r9,%r10), %xmm10
movss 0x28(%r9,%r10), %xmm9
movss 0x2c(%r9,%r10), %xmm5
movss 0x30(%r9,%r10), %xmm7
movss %xmm7, 0x180(%rsp)
movaps %xmm9, %xmm13
mulss %xmm9, %xmm13
movaps %xmm10, %xmm15
mulss %xmm10, %xmm15
addss %xmm13, %xmm15
movaps %xmm5, %xmm13
mulss %xmm5, %xmm13
addss %xmm15, %xmm13
movaps %xmm7, %xmm15
mulss %xmm7, %xmm15
addss %xmm13, %xmm15
movaps %xmm15, %xmm13
rsqrtss %xmm15, %xmm13
movaps %xmm13, %xmm7
mulss 0x1d1dc4e(%rip), %xmm7 # 0x1eec718
mulss 0x1d1dc49(%rip), %xmm15 # 0x1eec71c
mulss %xmm13, %xmm15
mulss %xmm13, %xmm13
mulss %xmm15, %xmm13
addss %xmm7, %xmm13
mulss %xmm13, %xmm10
insertps $0x30, %xmm10, %xmm2 # xmm2 = xmm2[0,1,2],xmm10[0]
mulss %xmm13, %xmm9
insertps $0x30, %xmm9, %xmm8 # xmm8 = xmm8[0,1,2],xmm9[0]
mulss %xmm13, %xmm5
mulss 0x180(%rsp), %xmm13
insertps $0x10, 0x4(%r9,%r10), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm13, %xmm6 # xmm6 = xmm6[0,1,2],xmm13[0]
movaps %xmm6, 0x180(%rsp)
insertps $0x20, 0x3c(%r9,%r10), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm5, %xmm12 # xmm12 = xmm12[0,1,2],xmm5[0]
movaps %xmm8, %xmm5
shufps $0x0, %xmm8, %xmm5 # xmm5 = xmm5[0,0],xmm8[0,0]
movaps 0xc0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xc0(%rsp)
movaps %xmm8, %xmm5
shufps $0x55, %xmm8, %xmm5 # xmm5 = xmm5[1,1],xmm8[1,1]
movaps 0x40(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x40(%rsp)
movaps %xmm8, %xmm5
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
movaps 0x60(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x60(%rsp)
movaps %xmm8, %xmm5
shufps $0xff, %xmm8, %xmm5 # xmm5 = xmm5[3,3],xmm8[3,3]
movaps 0x90(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x90(%rsp)
movaps %xmm12, %xmm5
shufps $0x0, %xmm12, %xmm5 # xmm5 = xmm5[0,0],xmm12[0,0]
movaps 0x10(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x10(%rsp)
movaps %xmm12, %xmm5
shufps $0x55, %xmm12, %xmm5 # xmm5 = xmm5[1,1],xmm12[1,1]
movaps 0xb0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xb0(%rsp)
movaps %xmm12, %xmm5
shufps $0xaa, %xmm12, %xmm5 # xmm5 = xmm5[2,2],xmm12[2,2]
movaps 0x50(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x50(%rsp)
movaps %xmm12, %xmm5
shufps $0xff, %xmm12, %xmm5 # xmm5 = xmm5[3,3],xmm12[3,3]
movaps 0x70(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x70(%rsp)
movaps 0x180(%rsp), %xmm7
movaps %xmm7, %xmm5
shufps $0x0, %xmm7, %xmm5 # xmm5 = xmm5[0,0],xmm7[0,0]
movaps 0x20(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x20(%rsp)
movaps %xmm7, %xmm5
shufps $0x55, %xmm7, %xmm5 # xmm5 = xmm5[1,1],xmm7[1,1]
movaps 0xe0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xe0(%rsp)
movaps %xmm7, %xmm5
shufps $0xaa, %xmm7, %xmm5 # xmm5 = xmm5[2,2],xmm7[2,2]
movaps 0xd0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xd0(%rsp)
movaps %xmm7, %xmm5
shufps $0xff, %xmm7, %xmm5 # xmm5 = xmm5[3,3],xmm7[3,3]
movaps 0x80(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x80(%rsp)
movaps %xmm2, %xmm5
shufps $0x0, %xmm2, %xmm5 # xmm5 = xmm5[0,0],xmm2[0,0]
movaps 0xa0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xa0(%rsp)
movaps %xmm2, %xmm5
shufps $0x55, %xmm2, %xmm5 # xmm5 = xmm5[1,1],xmm2[1,1]
movaps 0xf0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0xf0(%rsp)
movaps %xmm2, %xmm5
shufps $0xaa, %xmm2, %xmm5 # xmm5 = xmm5[2,2],xmm2[2,2]
movaps 0x160(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x160(%rsp)
movaps %xmm2, %xmm5
shufps $0xff, %xmm2, %xmm5 # xmm5 = xmm5[3,3],xmm2[3,3]
movaps 0x140(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x140(%rsp)
movdqa (%rsp), %xmm5
pxor %xmm0, %xmm5
movdqa %xmm5, (%rsp)
movl 0x58(%rdx,%rdi), %edi
cmpl $0x9134, %edi # imm = 0x9134
je 0x1cedb0
cmpl $0x9234, %edi # imm = 0x9234
je 0x1ced4d
cmpl $0xb001, %edi # imm = 0xB001
je 0x1cee1b
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1cef21
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movaps (%rdi,%rsi), %xmm14
movaps 0x10(%rdi,%rsi), %xmm3
movaps 0x20(%rdi,%rsi), %xmm1
movaps 0x30(%rdi,%rsi), %xmm4
jmp 0x1cef21
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movsd 0x4(%rdi,%rsi), %xmm1
movss (%rdi,%rsi), %xmm14
shufps $0x4c, %xmm1, %xmm14 # xmm14 = xmm14[0,3],xmm1[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%rdi,%rsi), %xmm1
movss 0xc(%rdi,%rsi), %xmm3
shufps $0x4c, %xmm1, %xmm3 # xmm3 = xmm3[0,3],xmm1[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x1c(%rdi,%rsi), %xmm4
movss 0x18(%rdi,%rsi), %xmm1
shufps $0x4c, %xmm4, %xmm1 # xmm1 = xmm1[0,3],xmm4[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x28(%rdi,%rsi), %xmm5
movss 0x24(%rdi,%rsi), %xmm4
shufps $0x4c, %xmm5, %xmm4 # xmm4 = xmm4[0,3],xmm5[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
jmp 0x1cef21
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movss (%rdi,%rsi), %xmm14
movss 0x4(%rdi,%rsi), %xmm3
movss 0x8(%rdi,%rsi), %xmm1
movss 0xc(%rdi,%rsi), %xmm4
insertps $0x1c, 0x10(%rdi,%rsi), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rsi), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rsi), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rsi), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rsi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rsi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rsi), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rsi), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
jmp 0x1cef21
movq 0x38(%rsi), %rdi
movq 0x48(%rsi), %rsi
imulq %rcx, %rsi
movsd 0x10(%rdi,%rsi), %xmm1
insertps $0x20, 0x8(%rdi,%rsi), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
movsd 0x34(%rdi,%rsi), %xmm4
movss (%rdi,%rsi), %xmm14
movss 0xc(%rdi,%rsi), %xmm3
movlhps %xmm4, %xmm14 # xmm14 = xmm14[0],xmm4[0]
shufps $0xd8, %xmm4, %xmm14 # xmm14 = xmm14[0,2],xmm4[1,3]
movss 0x18(%rdi,%rsi), %xmm4
movsd 0x1c(%rdi,%rsi), %xmm5
movlhps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm4 # xmm4 = xmm4[0,2],xmm5[1,3]
movss 0x24(%rdi,%rsi), %xmm10
movss 0x28(%rdi,%rsi), %xmm9
movss 0x2c(%rdi,%rsi), %xmm5
movss 0x30(%rdi,%rsi), %xmm7
movaps %xmm9, %xmm13
mulss %xmm9, %xmm13
movaps %xmm10, %xmm15
mulss %xmm10, %xmm15
addss %xmm13, %xmm15
movaps %xmm5, %xmm13
mulss %xmm5, %xmm13
addss %xmm15, %xmm13
movaps %xmm7, %xmm15
mulss %xmm7, %xmm15
addss %xmm13, %xmm15
movaps %xmm15, %xmm13
rsqrtss %xmm15, %xmm13
movaps %xmm13, %xmm6
mulss 0x1d1d853(%rip), %xmm6 # 0x1eec718
mulss 0x1d1d84e(%rip), %xmm15 # 0x1eec71c
mulss %xmm13, %xmm15
mulss %xmm13, %xmm13
mulss %xmm15, %xmm13
addss %xmm6, %xmm13
mulss %xmm13, %xmm10
insertps $0x30, %xmm10, %xmm4 # xmm4 = xmm4[0,1,2],xmm10[0]
mulss %xmm13, %xmm9
insertps $0x30, %xmm9, %xmm14 # xmm14 = xmm14[0,1,2],xmm9[0]
mulss %xmm13, %xmm5
mulss %xmm7, %xmm13
insertps $0x10, 0x4(%rdi,%rsi), %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
insertps $0x30, %xmm13, %xmm1 # xmm1 = xmm1[0,1,2],xmm13[0]
insertps $0x20, 0x3c(%rdi,%rsi), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
insertps $0x30, %xmm5, %xmm3 # xmm3 = xmm3[0,1,2],xmm5[0]
movaps %xmm14, %xmm5
shufps $0x0, %xmm14, %xmm5 # xmm5 = xmm5[0,0],xmm14[0,0]
movaps 0x1e0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1e0(%rsp)
movaps %xmm14, %xmm5
shufps $0x55, %xmm14, %xmm5 # xmm5 = xmm5[1,1],xmm14[1,1]
movaps 0x1f0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1f0(%rsp)
movaps %xmm14, %xmm5
shufps $0xaa, %xmm14, %xmm5 # xmm5 = xmm5[2,2],xmm14[2,2]
movaps 0x200(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x200(%rsp)
movaps %xmm14, %xmm5
shufps $0xff, %xmm14, %xmm5 # xmm5 = xmm5[3,3],xmm14[3,3]
movaps 0x100(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x100(%rsp)
movaps %xmm3, %xmm5
shufps $0x0, %xmm3, %xmm5 # xmm5 = xmm5[0,0],xmm3[0,0]
movaps 0x210(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x210(%rsp)
movaps %xmm3, %xmm5
shufps $0x55, %xmm3, %xmm5 # xmm5 = xmm5[1,1],xmm3[1,1]
movaps 0x220(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x220(%rsp)
movaps %xmm3, %xmm5
shufps $0xaa, %xmm3, %xmm5 # xmm5 = xmm5[2,2],xmm3[2,2]
movaps 0x1a0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1a0(%rsp)
movaps %xmm3, %xmm5
shufps $0xff, %xmm3, %xmm5 # xmm5 = xmm5[3,3],xmm3[3,3]
movaps 0x110(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x110(%rsp)
movaps %xmm1, %xmm5
shufps $0x0, %xmm1, %xmm5 # xmm5 = xmm5[0,0],xmm1[0,0]
movaps 0x120(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x120(%rsp)
movaps %xmm1, %xmm5
shufps $0x55, %xmm1, %xmm5 # xmm5 = xmm5[1,1],xmm1[1,1]
movaps 0x130(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x130(%rsp)
movaps %xmm1, %xmm5
shufps $0xaa, %xmm1, %xmm5 # xmm5 = xmm5[2,2],xmm1[2,2]
movaps 0x1b0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1b0(%rsp)
movaps %xmm1, %xmm5
shufps $0xff, %xmm1, %xmm5 # xmm5 = xmm5[3,3],xmm1[3,3]
movaps 0x30(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x30(%rsp)
movaps %xmm4, %xmm5
shufps $0x0, %xmm4, %xmm5 # xmm5 = xmm5[0,0],xmm4[0,0]
movaps 0x1c0(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x1c0(%rsp)
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps 0x150(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x150(%rsp)
movaps %xmm4, %xmm5
shufps $0xaa, %xmm4, %xmm5 # xmm5 = xmm5[2,2],xmm4[2,2]
movaps 0x170(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x170(%rsp)
movaps %xmm4, %xmm5
shufps $0xff, %xmm4, %xmm5 # xmm5 = xmm5[3,3],xmm4[3,3]
movaps 0x190(%rsp), %xmm6
blendvps %xmm0, %xmm5, %xmm6
movaps %xmm6, 0x190(%rsp)
movaps (%rsp), %xmm0
movmskps %xmm0, %esi
testl %esi, %esi
jne 0x1ce88d
jmp 0x1cf0f4
movaps 0x190(%rsp), %xmm15
movaps %xmm15, %xmm0
mulps 0x140(%rsp), %xmm0
movaps 0x100(%rsp), %xmm7
movaps %xmm7, %xmm1
movaps %xmm7, %xmm6
mulps 0x90(%rsp), %xmm1
addps %xmm0, %xmm1
movaps 0x110(%rsp), %xmm14
movaps %xmm14, %xmm0
mulps 0x70(%rsp), %xmm0
addps %xmm1, %xmm0
movaps 0x30(%rsp), %xmm1
movaps %xmm1, %xmm3
movaps %xmm1, %xmm4
mulps 0x80(%rsp), %xmm3
addps %xmm0, %xmm3
movaps 0x1d1d57c(%rip), %xmm1 # 0x1eec6d0
movaps %xmm3, %xmm9
xorps %xmm1, %xmm9
movaps %xmm3, %xmm0
cmpltps %xmm9, %xmm0
movaps %xmm15, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm15
movaps %xmm7, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm6
movaps %xmm6, 0x100(%rsp)
movaps %xmm14, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm14
movaps %xmm14, 0x110(%rsp)
movaps %xmm4, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm4
movaps %xmm4, 0x30(%rsp)
maxps %xmm3, %xmm9
andps 0x1d1d50b(%rip), %xmm3 # 0x1eec6c0
movaps 0x1d22a14(%rip), %xmm0 # 0x1ef1bd0
mulps %xmm3, %xmm0
addps 0x1d22a1a(%rip), %xmm0 # 0x1ef1be0
mulps %xmm3, %xmm0
addps 0x1d22a20(%rip), %xmm0 # 0x1ef1bf0
mulps %xmm3, %xmm0
addps 0x1d22a26(%rip), %xmm0 # 0x1ef1c00
mulps %xmm3, %xmm0
addps 0x1d22a2c(%rip), %xmm0 # 0x1ef1c10
mulps %xmm3, %xmm0
addps 0x1d22a32(%rip), %xmm0 # 0x1ef1c20
movaps 0x1d1d81a(%rip), %xmm11 # 0x1eeca10
movaps %xmm11, %xmm2
subps %xmm3, %xmm2
sqrtps %xmm2, %xmm2
mulps %xmm0, %xmm2
movaps 0x1d22a26(%rip), %xmm4 # 0x1ef1c30
movaps %xmm4, %xmm0
subps %xmm2, %xmm0
xorps %xmm5, %xmm5
maxps %xmm0, %xmm5
xorps %xmm2, %xmm2
movaps %xmm9, %xmm0
cmpltps %xmm2, %xmm0
xorps %xmm6, %xmm6
movaps %xmm5, %xmm2
xorps %xmm1, %xmm2
blendvps %xmm0, %xmm2, %xmm5
movaps %xmm4, %xmm2
subps %xmm5, %xmm2
movaps %xmm3, %xmm0
cmpnleps %xmm11, %xmm0
blendvps %xmm0, 0x1d229fa(%rip), %xmm2 # 0x1ef1c40
movaps 0x1d0(%rsp), %xmm10
mulps %xmm10, %xmm2
movaps 0x1d229f6(%rip), %xmm0 # 0x1ef1c50
mulps %xmm2, %xmm0
roundps $0x1, %xmm0, %xmm3
cvtps2dq %xmm3, %xmm0
mulps %xmm4, %xmm3
subps %xmm3, %xmm2
movapd 0x1d229eb(%rip), %xmm3 # 0x1ef1c60
andpd %xmm0, %xmm3
movaps %xmm2, %xmm5
mulps %xmm2, %xmm5
movaps 0x1d22a09(%rip), %xmm12 # 0x1ef1c90
mulps %xmm5, %xmm12
addps 0x1d22a0d(%rip), %xmm12 # 0x1ef1ca0
movaps 0x1d22a16(%rip), %xmm4 # 0x1ef1cb0
mulps %xmm5, %xmm4
addps 0x1d22a1c(%rip), %xmm4 # 0x1ef1cc0
mulps %xmm5, %xmm12
addps 0x1d22a20(%rip), %xmm12 # 0x1ef1cd0
movapd 0x1d1d867(%rip), %xmm8 # 0x1eecb20
andpd %xmm8, %xmm0
mulps %xmm5, %xmm4
addps 0x1d22a18(%rip), %xmm4 # 0x1ef1ce0
mulps %xmm5, %xmm12
addps 0x1d22a1c(%rip), %xmm12 # 0x1ef1cf0
movapd %xmm0, %xmm7
pcmpeqd %xmm6, %xmm7
mulps %xmm5, %xmm4
addps 0x1d22a1a(%rip), %xmm4 # 0x1ef1d00
psubd %xmm0, %xmm6
mulps %xmm5, %xmm12
addps 0x1d22a1a(%rip), %xmm12 # 0x1ef1d10
mulps %xmm5, %xmm4
addps 0x1d1d7e0(%rip), %xmm4 # 0x1eecae0
mulps %xmm5, %xmm12
mulps %xmm5, %xmm4
addps %xmm11, %xmm12
mulps %xmm2, %xmm12
addps %xmm11, %xmm4
movaps %xmm12, %xmm13
movdqa %xmm6, %xmm0
blendvps %xmm0, %xmm4, %xmm13
movdqa %xmm7, %xmm0
blendvps %xmm0, %xmm4, %xmm12
movapd %xmm3, %xmm0
pcmpgtd %xmm8, %xmm0
movaps %xmm13, %xmm4
xorps %xmm1, %xmm4
blendvps %xmm0, %xmm4, %xmm13
pcmpeqd %xmm0, %xmm0
paddd %xmm3, %xmm0
pminud %xmm0, %xmm8
pcmpeqd %xmm0, %xmm8
xorps %xmm12, %xmm1
movdqa %xmm8, %xmm0
blendvps %xmm0, %xmm1, %xmm12
movaps 0x140(%rsp), %xmm2
movaps %xmm2, %xmm3
mulps %xmm9, %xmm3
subps %xmm15, %xmm3
movaps 0x90(%rsp), %xmm4
mulps %xmm9, %xmm4
movaps %xmm9, 0x190(%rsp)
movaps 0x100(%rsp), %xmm7
subps %xmm7, %xmm4
movaps %xmm3, %xmm0
mulps %xmm3, %xmm0
movaps %xmm4, %xmm1
mulps %xmm4, %xmm1
addps %xmm0, %xmm1
movaps 0x70(%rsp), %xmm5
mulps %xmm9, %xmm5
subps %xmm14, %xmm5
movaps %xmm5, %xmm0
mulps %xmm5, %xmm0
addps %xmm1, %xmm0
movaps 0x80(%rsp), %xmm1
mulps %xmm9, %xmm1
subps 0x30(%rsp), %xmm1
movaps %xmm1, (%rsp)
mulps %xmm1, %xmm1
addps %xmm0, %xmm1
rsqrtps %xmm1, %xmm6
mulps 0x1d1d300(%rip), %xmm1 # 0x1eec6e0
mulps %xmm6, %xmm1
movaps %xmm6, %xmm8
mulps %xmm6, %xmm6
mulps %xmm1, %xmm6
movaps 0x1d2292b(%rip), %xmm9 # 0x1ef1d20
mulps %xmm9, %xmm8
subps %xmm6, %xmm8
mulps %xmm8, %xmm3
mulps %xmm13, %xmm3
movaps %xmm2, %xmm14
mulps %xmm12, %xmm14
subps %xmm3, %xmm14
subps %xmm2, %xmm15
mulps %xmm10, %xmm15
addps %xmm2, %xmm15
movaps %xmm7, %xmm2
movaps 0x90(%rsp), %xmm0
subps %xmm0, %xmm2
mulps %xmm10, %xmm2
addps %xmm0, %xmm2
movaps %xmm15, %xmm3
mulps %xmm15, %xmm3
movaps %xmm2, %xmm6
mulps %xmm2, %xmm6
addps %xmm3, %xmm6
movaps 0x110(%rsp), %xmm7
movaps 0x70(%rsp), %xmm1
subps %xmm1, %xmm7
mulps %xmm10, %xmm7
addps %xmm1, %xmm7
movaps %xmm7, %xmm3
mulps %xmm7, %xmm3
addps %xmm6, %xmm3
movaps 0x30(%rsp), %xmm1
movaps 0x80(%rsp), %xmm0
subps %xmm0, %xmm1
mulps %xmm10, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0x30(%rsp)
mulps %xmm1, %xmm1
addps %xmm3, %xmm1
rsqrtps %xmm1, %xmm3
mulps 0x1d1d251(%rip), %xmm1 # 0x1eec6e0
mulps %xmm3, %xmm9
mulps %xmm3, %xmm1
mulps %xmm3, %xmm3
mulps %xmm1, %xmm3
subps %xmm3, %xmm9
mulps %xmm9, %xmm15
movaps 0x190(%rsp), %xmm1
cmpnleps 0x1d2287c(%rip), %xmm1 # 0x1ef1d30
movaps %xmm1, %xmm0
blendvps %xmm0, %xmm15, %xmm14
mulps %xmm8, %xmm4
mulps %xmm13, %xmm4
movaps 0x90(%rsp), %xmm3
mulps %xmm12, %xmm3
subps %xmm4, %xmm3
mulps %xmm9, %xmm2
blendvps %xmm0, %xmm2, %xmm3
mulps %xmm8, %xmm5
mulps %xmm13, %xmm5
movaps 0x70(%rsp), %xmm6
mulps %xmm12, %xmm6
subps %xmm5, %xmm6
mulps %xmm9, %xmm7
blendvps %xmm0, %xmm7, %xmm6
mulps (%rsp), %xmm8
mulps %xmm13, %xmm8
mulps 0x80(%rsp), %xmm12
subps %xmm8, %xmm12
mulps 0x30(%rsp), %xmm9
blendvps %xmm0, %xmm9, %xmm12
subps %xmm10, %xmm11
movaps 0x1e0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xc0(%rsp), %xmm7
mulps %xmm11, %xmm7
addps %xmm0, %xmm7
movaps 0x1f0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x40(%rsp), %xmm1
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0x40(%rsp)
movaps 0x200(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x60(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x60(%rsp)
movaps 0x210(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x10(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x10(%rsp)
movaps 0x220(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xb0(%rsp), %xmm1
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
movaps %xmm1, 0xb0(%rsp)
movaps 0x1a0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x50(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x50(%rsp)
movaps 0x120(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0x20(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0x20(%rsp)
movaps 0x130(%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x1b0(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xe0(%rsp), %xmm4
mulps %xmm11, %xmm4
addps %xmm2, %xmm4
movaps %xmm4, 0xe0(%rsp)
movaps 0xd0(%rsp), %xmm2
mulps %xmm11, %xmm2
addps %xmm0, %xmm2
movaps %xmm2, 0xd0(%rsp)
movaps 0x1c0(%rsp), %xmm4
mulps %xmm10, %xmm4
movaps 0x150(%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x170(%rsp), %xmm0
mulps %xmm10, %xmm0
movaps 0xa0(%rsp), %xmm9
mulps %xmm11, %xmm9
addps %xmm4, %xmm9
movaps %xmm9, 0xa0(%rsp)
movaps 0xf0(%rsp), %xmm8
mulps %xmm11, %xmm8
addps %xmm2, %xmm8
movaps %xmm8, 0xf0(%rsp)
mulps 0x160(%rsp), %xmm11
addps %xmm0, %xmm11
movaps %xmm7, %xmm0
movaps %xmm11, 0x90(%rsp)
movaps %xmm3, %xmm7
movaps %xmm3, %xmm15
movaps %xmm14, %xmm9
movaps %xmm3, %xmm8
movaps %xmm14, %xmm10
mulps %xmm14, %xmm3
movaps %xmm14, %xmm2
mulps %xmm14, %xmm2
mulps %xmm7, %xmm7
movaps %xmm2, %xmm14
addps %xmm7, %xmm14
mulps %xmm6, %xmm15
mulps %xmm12, %xmm9
movaps %xmm15, %xmm1
addps %xmm9, %xmm1
subps %xmm9, %xmm15
movaps %xmm12, %xmm9
mulps %xmm12, %xmm8
mulps %xmm6, %xmm10
mulps %xmm6, %xmm12
movaps %xmm6, %xmm11
mulps %xmm6, %xmm11
subps %xmm11, %xmm14
subps %xmm7, %xmm2
movaps %xmm8, %xmm6
subps %xmm10, %xmm6
addps %xmm8, %xmm10
movaps %xmm10, %xmm8
movaps %xmm12, %xmm10
addps %xmm3, %xmm10
subps %xmm3, %xmm12
movaps %xmm11, %xmm13
addps %xmm2, %xmm13
subps %xmm11, %xmm2
mulps %xmm9, %xmm9
subps %xmm9, %xmm14
subps %xmm9, %xmm13
addps %xmm9, %xmm2
jmp 0x1ce4aa
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersector1::intersect(embree::sse42::InstanceArrayIntersector1::Precalculations const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
void InstanceArrayIntersector1::intersect(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return;
#endif
RTCRayQueryContext* user_context = context->user;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_);
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.intersect((RTCRayHit&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
instance_id_stack::pop(user_context);
}
}
|
pushq %r14
pushq %rbx
subq $0x38, %rsp
movq %rsi, %rbx
movq (%rdx), %rax
movl (%rcx), %edi
movl 0x4(%rcx), %r8d
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %rsi
movq 0x58(%rsi), %rax
testq %rax, %rax
jne 0x1cf794
movq 0x90(%rsi), %rax
movq 0xa0(%rsi), %r9
imulq %rdi, %r9
movl (%rax,%r9), %eax
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
cmpq %r9, %rax
je 0x1cf792
movq 0x60(%rsi), %r9
movq (%r9,%rax,8), %rax
jmp 0x1cf794
xorl %eax, %eax
testq %rax, %rax
je 0x1cfb28
movl 0x34(%rsi), %r9d
testl %r9d, 0x24(%rbx)
je 0x1cfb28
movq 0x8(%rdx), %r14
cmpl $-0x1, (%r14)
jne 0x1cfb28
movl %r8d, (%r14)
movl %edi, 0x4(%r14)
movl (%rcx), %ecx
movzbl 0x3d(%rsi), %r8d
shll $0x8, %r8d
movq 0x88(%rsi), %rsi
movl 0x20(%rsi), %edi
cmpl $0x100, %r8d # imm = 0x100
je 0x1cfb30
cmpl $0x9134, %edi # imm = 0x9134
je 0x1cf989
cmpl $0x9234, %edi # imm = 0x9234
je 0x1cf82e
cmpl $0xb001, %edi # imm = 0xB001
je 0x1cf88a
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1cf9e8
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movaps (%rdi,%rcx), %xmm0
movaps 0x10(%rdi,%rcx), %xmm7
movaps 0x20(%rdi,%rcx), %xmm4
movaps 0x30(%rdi,%rcx), %xmm5
jmp 0x1cf9e8
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movsd 0x4(%rdi,%rcx), %xmm1
movss (%rdi,%rcx), %xmm0
shufps $0x4c, %xmm1, %xmm0 # xmm0 = xmm0[0,3],xmm1[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x10(%rdi,%rcx), %xmm1
movss 0xc(%rdi,%rcx), %xmm7
shufps $0x4c, %xmm1, %xmm7 # xmm7 = xmm7[0,3],xmm1[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x1c(%rdi,%rcx), %xmm1
movss 0x18(%rdi,%rcx), %xmm4
shufps $0x4c, %xmm1, %xmm4 # xmm4 = xmm4[0,3],xmm1[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x28(%rdi,%rcx), %xmm1
movss 0x24(%rdi,%rcx), %xmm5
shufps $0x4c, %xmm1, %xmm5 # xmm5 = xmm5[0,3],xmm1[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x1cf9e8
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movsd 0x10(%rdi,%rcx), %xmm4
insertps $0x20, 0x8(%rdi,%rcx), %xmm4 # xmm4 = xmm4[0,1],mem[0],xmm4[3]
movsd 0x34(%rdi,%rcx), %xmm1
movss (%rdi,%rcx), %xmm0
movss 0xc(%rdi,%rcx), %xmm7
movlhps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm0 # xmm0 = xmm0[0,2],xmm1[1,3]
movss 0x18(%rdi,%rcx), %xmm5
movsd 0x1c(%rdi,%rcx), %xmm1
movlhps %xmm1, %xmm5 # xmm5 = xmm5[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm5 # xmm5 = xmm5[0,2],xmm1[1,3]
movss 0x24(%rdi,%rcx), %xmm6
movss 0x28(%rdi,%rcx), %xmm3
movss 0x2c(%rdi,%rcx), %xmm1
movss 0x30(%rdi,%rcx), %xmm2
movaps %xmm3, %xmm8
mulss %xmm3, %xmm8
movaps %xmm6, %xmm9
mulss %xmm6, %xmm9
addss %xmm8, %xmm9
movaps %xmm1, %xmm8
mulss %xmm1, %xmm8
addss %xmm9, %xmm8
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm8, %xmm9
movaps %xmm9, %xmm8
rsqrtss %xmm9, %xmm8
movss 0x1d1cdf0(%rip), %xmm10 # 0x1eec718
mulss %xmm8, %xmm10
mulss 0x1d1cde6(%rip), %xmm9 # 0x1eec71c
mulss %xmm8, %xmm9
mulss %xmm8, %xmm8
mulss %xmm9, %xmm8
addss %xmm10, %xmm8
mulss %xmm8, %xmm6
insertps $0x30, %xmm6, %xmm5 # xmm5 = xmm5[0,1,2],xmm6[0]
mulss %xmm8, %xmm3
insertps $0x30, %xmm3, %xmm0 # xmm0 = xmm0[0,1,2],xmm3[0]
mulss %xmm8, %xmm1
mulss %xmm2, %xmm8
insertps $0x10, 0x4(%rdi,%rcx), %xmm7 # xmm7 = xmm7[0],mem[0],xmm7[2,3]
insertps $0x30, %xmm8, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[0]
insertps $0x20, 0x3c(%rdi,%rcx), %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
insertps $0x30, %xmm1, %xmm7 # xmm7 = xmm7[0,1,2],xmm1[0]
jmp 0x1cf9e8
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movss (%rdi,%rcx), %xmm0
movss 0x4(%rdi,%rcx), %xmm7
movss 0x8(%rdi,%rcx), %xmm4
movss 0xc(%rdi,%rcx), %xmm5
insertps $0x1c, 0x10(%rdi,%rcx), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rcx), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rcx), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rcx), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rcx), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rcx), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rcx), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rcx), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
movaps %xmm4, %xmm3
shufps $0xc9, %xmm4, %xmm3 # xmm3 = xmm3[1,2],xmm4[0,3]
movaps %xmm7, %xmm2
shufps $0xc9, %xmm7, %xmm2 # xmm2 = xmm2[1,2],xmm7[0,3]
movaps %xmm0, %xmm6
mulps %xmm2, %xmm6
mulps %xmm4, %xmm2
movaps %xmm7, %xmm1
mulps %xmm3, %xmm1
subps %xmm2, %xmm1
movaps %xmm1, %xmm2
shufps $0xc9, %xmm1, %xmm2 # xmm2 = xmm2[1,2],xmm1[0,3]
movaps %xmm0, %xmm8
shufps $0xc9, %xmm0, %xmm8 # xmm8 = xmm8[1,2],xmm0[0,3]
mulps %xmm0, %xmm3
mulps %xmm8, %xmm4
subps %xmm3, %xmm4
mulps %xmm7, %xmm8
subps %xmm8, %xmm6
unpcklps %xmm6, %xmm1 # xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
shufps $0xc9, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
dpps $0x7f, %xmm2, %xmm0
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
xorps %xmm3, %xmm3
movss %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[1,2,3]
insertps $0x4a, %xmm4, %xmm4 # xmm4 = xmm4[1],zero,xmm4[2],zero
unpcklps %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
movaps %xmm2, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
unpckhps %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
divps %xmm0, %xmm3
divps %xmm0, %xmm2
divps %xmm0, %xmm1
movaps %xmm5, %xmm0
shufps $0x0, %xmm5, %xmm0 # xmm0 = xmm0[0,0],xmm5[0,0]
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm1, %xmm5
mulps %xmm2, %xmm4
addps %xmm5, %xmm4
mulps %xmm3, %xmm0
addps %xmm4, %xmm0
movaps (%rbx), %xmm6
movaps %xmm6, (%rsp)
movaps 0x10(%rbx), %xmm8
movaps %xmm6, %xmm4
shufps $0x0, %xmm6, %xmm4 # xmm4 = xmm4[0,0],xmm6[0,0]
movaps %xmm6, %xmm5
shufps $0x55, %xmm6, %xmm5 # xmm5 = xmm5[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm1, %xmm6
subps %xmm0, %xmm6
mulps %xmm2, %xmm5
addps %xmm6, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
insertps $0x30, 0xc(%rbx), %xmm4 # xmm4 = xmm4[0,1,2],mem[0]
movaps %xmm4, (%rbx)
movaps %xmm8, %xmm5
movaps %xmm8, 0x10(%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm8, %xmm4
shufps $0x55, %xmm8, %xmm4 # xmm4 = xmm4[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
mulps %xmm1, %xmm5
mulps %xmm2, %xmm4
addps %xmm5, %xmm4
mulps %xmm3, %xmm0
addps %xmm4, %xmm0
insertps $0x30, 0x1c(%rbx), %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
movaps %xmm0, 0x10(%rbx)
movq 0x10(%rdx), %rcx
leaq 0x20(%rsp), %rdx
movq %rax, (%rdx)
movq %r14, 0x8(%rdx)
movq %rcx, 0x10(%rdx)
leaq 0x58(%rax), %rdi
movq %rbx, %rsi
callq *0x78(%rax)
movaps (%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
orq $-0x1, (%r14)
addq $0x38, %rsp
popq %rbx
popq %r14
retq
cmpl $0x9134, %edi # imm = 0x9134
je 0x1cfce5
cmpl $0x9234, %edi # imm = 0x9234
je 0x1cfb7e
cmpl $0xb001, %edi # imm = 0xB001
je 0x1cfbe0
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1cfd4a
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movaps (%rdi,%rcx), %xmm0
movaps 0x10(%rdi,%rcx), %xmm12
movaps 0x20(%rdi,%rcx), %xmm11
movaps 0x30(%rdi,%rcx), %xmm1
jmp 0x1cfd4a
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movsd 0x4(%rdi,%rcx), %xmm1
movss (%rdi,%rcx), %xmm0
shufps $0x4c, %xmm1, %xmm0 # xmm0 = xmm0[0,3],xmm1[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x10(%rdi,%rcx), %xmm2
movss 0xc(%rdi,%rcx), %xmm12
shufps $0x4c, %xmm2, %xmm12 # xmm12 = xmm12[0,3],xmm2[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%rdi,%rcx), %xmm3
movss 0x18(%rdi,%rcx), %xmm11
shufps $0x4c, %xmm3, %xmm11 # xmm11 = xmm11[0,3],xmm3[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
movsd 0x28(%rdi,%rcx), %xmm4
movss 0x24(%rdi,%rcx), %xmm1
shufps $0x4c, %xmm4, %xmm1 # xmm1 = xmm1[0,3],xmm4[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
jmp 0x1cfd4a
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movsd 0x10(%rdi,%rcx), %xmm11
insertps $0x20, 0x8(%rdi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],xmm11[3]
movsd 0x34(%rdi,%rcx), %xmm3
movss (%rdi,%rcx), %xmm0
movss 0xc(%rdi,%rcx), %xmm12
movlhps %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm0 # xmm0 = xmm0[0,2],xmm3[1,3]
movss 0x18(%rdi,%rcx), %xmm1
movsd 0x1c(%rdi,%rcx), %xmm4
movlhps %xmm4, %xmm1 # xmm1 = xmm1[0],xmm4[0]
shufps $0xd8, %xmm4, %xmm1 # xmm1 = xmm1[0,2],xmm4[1,3]
movss 0x24(%rdi,%rcx), %xmm7
movss 0x28(%rdi,%rcx), %xmm6
movss 0x2c(%rdi,%rcx), %xmm4
movss 0x30(%rdi,%rcx), %xmm5
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
movaps %xmm7, %xmm9
mulss %xmm7, %xmm9
addss %xmm8, %xmm9
movaps %xmm4, %xmm8
mulss %xmm4, %xmm8
addss %xmm9, %xmm8
movaps %xmm5, %xmm9
mulss %xmm5, %xmm9
addss %xmm8, %xmm9
movaps %xmm9, %xmm8
rsqrtss %xmm9, %xmm8
movss 0x1d1ca97(%rip), %xmm10 # 0x1eec718
mulss %xmm8, %xmm10
mulss 0x1d1ca8d(%rip), %xmm9 # 0x1eec71c
mulss %xmm8, %xmm9
mulss %xmm8, %xmm8
mulss %xmm9, %xmm8
addss %xmm10, %xmm8
mulss %xmm8, %xmm7
insertps $0x30, %xmm7, %xmm1 # xmm1 = xmm1[0,1,2],xmm7[0]
mulss %xmm8, %xmm6
insertps $0x30, %xmm6, %xmm0 # xmm0 = xmm0[0,1,2],xmm6[0]
mulss %xmm8, %xmm4
mulss %xmm5, %xmm8
insertps $0x10, 0x4(%rdi,%rcx), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm8, %xmm11 # xmm11 = xmm11[0,1,2],xmm8[0]
insertps $0x20, 0x3c(%rdi,%rcx), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm4, %xmm12 # xmm12 = xmm12[0,1,2],xmm4[0]
jmp 0x1cfd4a
movq (%rsi), %rdi
imulq 0x10(%rsi), %rcx
movss (%rdi,%rcx), %xmm0
movss 0x4(%rdi,%rcx), %xmm12
movss 0x8(%rdi,%rcx), %xmm11
movss 0xc(%rdi,%rcx), %xmm1
insertps $0x1c, 0x10(%rdi,%rcx), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdi,%rcx), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdi,%rcx), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdi,%rcx), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdi,%rcx), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdi,%rcx), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdi,%rcx), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdi,%rcx), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
movaps %xmm1, (%rsp)
movaps %xmm1, %xmm8
shufps $0xff, %xmm1, %xmm8 # xmm8 = xmm8[3,3],xmm1[3,3]
movaps %xmm0, %xmm9
shufps $0xff, %xmm0, %xmm9 # xmm9 = xmm9[3,3],xmm0[3,3]
movaps %xmm12, %xmm4
shufps $0xff, %xmm12, %xmm4 # xmm4 = xmm4[3,3],xmm12[3,3]
movaps %xmm11, %xmm5
shufps $0xff, %xmm11, %xmm5 # xmm5 = xmm5[3,3],xmm11[3,3]
movaps %xmm9, %xmm10
mulss %xmm9, %xmm10
movaps %xmm5, %xmm14
mulss %xmm8, %xmm14
movaps %xmm9, %xmm6
movaps %xmm12, %xmm1
movaps %xmm9, %xmm12
movaps %xmm4, %xmm7
mulss %xmm8, %xmm7
mulss %xmm8, %xmm9
movaps %xmm8, %xmm13
mulss %xmm8, %xmm13
movaps %xmm13, %xmm8
addss %xmm10, %xmm8
movaps 0x1d1c91a(%rip), %xmm2 # 0x1eec6d0
mulss %xmm4, %xmm6
movaps %xmm11, %xmm3
movaps %xmm6, %xmm11
addss %xmm14, %xmm11
subss %xmm14, %xmm6
movaps %xmm4, %xmm15
xorps %xmm2, %xmm15
mulss %xmm4, %xmm15
addss %xmm15, %xmm8
xorps %xmm5, %xmm2
mulss %xmm5, %xmm2
addss %xmm2, %xmm8
mulss %xmm5, %xmm12
subss %xmm10, %xmm13
movaps %xmm4, %xmm10
mulss %xmm4, %xmm10
addss %xmm13, %xmm10
addss %xmm2, %xmm10
movaps %xmm12, %xmm14
subss %xmm7, %xmm14
mulss %xmm5, %xmm4
addss %xmm12, %xmm7
movaps %xmm4, %xmm12
addss %xmm9, %xmm12
subss %xmm9, %xmm4
addss %xmm11, %xmm11
addss %xmm14, %xmm14
addss %xmm15, %xmm13
mulss %xmm5, %xmm5
addss %xmm13, %xmm5
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps 0x1d1c8aa(%rip), %xmm9 # 0x1eec700
mulps %xmm9, %xmm14
movsd 0x1d1c88d(%rip), %xmm13 # 0x1eec6f0
mulps %xmm13, %xmm11
addps %xmm14, %xmm11
movss 0x1d1c8a0(%rip), %xmm14 # 0x1eec714
mulps %xmm14, %xmm8
addps %xmm11, %xmm8
addss %xmm12, %xmm12
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm9, %xmm12
mulps %xmm13, %xmm10
addps %xmm12, %xmm10
addss %xmm6, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm14, %xmm6
addps %xmm10, %xmm6
xorps %xmm10, %xmm10
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm9, %xmm5
movaps %xmm0, %xmm9
shufps $0xe9, %xmm10, %xmm9 # xmm9 = xmm9[1,2],xmm10[2,3]
blendps $0x4, %xmm1, %xmm9 # xmm9 = xmm9[0,1],xmm1[2],xmm9[3]
addss %xmm7, %xmm7
addss %xmm4, %xmm4
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm13, %xmm4
addps %xmm5, %xmm4
mulps %xmm14, %xmm7
addps %xmm4, %xmm7
addps %xmm10, %xmm9
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm3, %xmm4
movaps %xmm3, %xmm11
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
mulps %xmm7, %xmm3
movaps (%rsp), %xmm2
movaps %xmm2, %xmm5
movaps %xmm2, %xmm12
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
mulps %xmm7, %xmm2
movaps %xmm2, %xmm13
movaps %xmm7, %xmm2
mulps %xmm10, %xmm2
mulps %xmm6, %xmm10
addps %xmm2, %xmm10
mulps %xmm8, %xmm0
addps %xmm10, %xmm0
movaps %xmm1, %xmm7
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm6, %xmm1
addps %xmm2, %xmm1
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm8, %xmm7
addps %xmm1, %xmm7
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
mulps %xmm6, %xmm11
addps %xmm3, %xmm11
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm8, %xmm4
addps %xmm11, %xmm4
shufps $0x55, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
mulps %xmm6, %xmm12
addps %xmm13, %xmm12
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm8, %xmm5
addps %xmm12, %xmm5
addps %xmm9, %xmm5
jmp 0x1cf9e8
nop
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersector1::occluded(embree::sse42::InstanceArrayIntersector1::Precalculations const&, embree::RayK<1>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
bool InstanceArrayIntersector1::occluded(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return false;
#endif
RTCRayQueryContext* user_context = context->user;
bool occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_);
Accel* object = instance->getObject(prim.primID_);
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.occluded((RTCRay&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %r14
pushq %rbx
subq $0x38, %rsp
movq %rsi, %rbx
movq (%rdx), %rax
movl (%rcx), %esi
movl 0x4(%rcx), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
cmpq $0x0, 0x58(%rax)
je 0x1d0034
movl 0x34(%rax), %r8d
testl %r8d, 0x24(%rbx)
je 0x1d0064
movq 0x8(%rdx), %r14
cmpl $-0x1, (%r14)
jne 0x1d0064
movl %edi, (%r14)
movl %esi, 0x4(%r14)
movl (%rcx), %ecx
movzbl 0x3d(%rax), %r8d
shll $0x8, %r8d
movq 0x88(%rax), %rdi
movl 0x20(%rdi), %esi
cmpl $0x100, %r8d # imm = 0x100
je 0x1d03a7
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d01cc
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d006b
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d00ca
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d022e
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movaps (%rsi,%rdi), %xmm0
movaps 0x10(%rsi,%rdi), %xmm7
movaps 0x20(%rsi,%rdi), %xmm4
movaps 0x30(%rsi,%rdi), %xmm5
jmp 0x1d022e
movq 0x90(%rax), %r8
movq 0xa0(%rax), %r9
imulq %rsi, %r9
movl (%r8,%r9), %r8d
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
cmpq %r9, %r8
je 0x1d0064
movq 0x60(%rax), %r9
cmpq $0x0, (%r9,%r8,8)
jne 0x1cffa0
xorl %eax, %eax
jmp 0x1d039f
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movsd 0x4(%rsi,%rdi), %xmm1
movss (%rsi,%rdi), %xmm0
shufps $0x4c, %xmm1, %xmm0 # xmm0 = xmm0[0,3],xmm1[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x10(%rsi,%rdi), %xmm1
movss 0xc(%rsi,%rdi), %xmm7
shufps $0x4c, %xmm1, %xmm7 # xmm7 = xmm7[0,3],xmm1[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x1c(%rsi,%rdi), %xmm1
movss 0x18(%rsi,%rdi), %xmm4
shufps $0x4c, %xmm1, %xmm4 # xmm4 = xmm4[0,3],xmm1[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x28(%rsi,%rdi), %xmm1
movss 0x24(%rsi,%rdi), %xmm5
shufps $0x4c, %xmm1, %xmm5 # xmm5 = xmm5[0,3],xmm1[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x1d022e
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movsd 0x10(%rsi,%rdi), %xmm4
insertps $0x20, 0x8(%rsi,%rdi), %xmm4 # xmm4 = xmm4[0,1],mem[0],xmm4[3]
movsd 0x34(%rsi,%rdi), %xmm1
movss (%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm7
movlhps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm0 # xmm0 = xmm0[0,2],xmm1[1,3]
movss 0x18(%rsi,%rdi), %xmm5
movsd 0x1c(%rsi,%rdi), %xmm1
movlhps %xmm1, %xmm5 # xmm5 = xmm5[0],xmm1[0]
shufps $0xd8, %xmm1, %xmm5 # xmm5 = xmm5[0,2],xmm1[1,3]
movss 0x24(%rsi,%rdi), %xmm6
movss 0x28(%rsi,%rdi), %xmm3
movss 0x2c(%rsi,%rdi), %xmm1
movss 0x30(%rsi,%rdi), %xmm2
movaps %xmm3, %xmm8
mulss %xmm3, %xmm8
movaps %xmm6, %xmm9
mulss %xmm6, %xmm9
addss %xmm8, %xmm9
movaps %xmm1, %xmm8
mulss %xmm1, %xmm8
addss %xmm9, %xmm8
movaps %xmm2, %xmm9
mulss %xmm2, %xmm9
addss %xmm8, %xmm9
movaps %xmm9, %xmm8
rsqrtss %xmm9, %xmm8
movss 0x1d1c5ad(%rip), %xmm10 # 0x1eec718
mulss %xmm8, %xmm10
mulss 0x1d1c5a3(%rip), %xmm9 # 0x1eec71c
mulss %xmm8, %xmm9
mulss %xmm8, %xmm8
mulss %xmm9, %xmm8
addss %xmm10, %xmm8
mulss %xmm8, %xmm6
insertps $0x30, %xmm6, %xmm5 # xmm5 = xmm5[0,1,2],xmm6[0]
mulss %xmm8, %xmm3
insertps $0x30, %xmm3, %xmm0 # xmm0 = xmm0[0,1,2],xmm3[0]
mulss %xmm8, %xmm1
mulss %xmm2, %xmm8
insertps $0x10, 0x4(%rsi,%rdi), %xmm7 # xmm7 = xmm7[0],mem[0],xmm7[2,3]
insertps $0x30, %xmm8, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[0]
insertps $0x20, 0x3c(%rsi,%rdi), %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
insertps $0x30, %xmm1, %xmm7 # xmm7 = xmm7[0,1,2],xmm1[0]
jmp 0x1d022e
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movss (%rsi,%rdi), %xmm0
movss 0x4(%rsi,%rdi), %xmm7
movss 0x8(%rsi,%rdi), %xmm4
movss 0xc(%rsi,%rdi), %xmm5
insertps $0x1c, 0x10(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rdi), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rdi), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
movaps %xmm4, %xmm3
shufps $0xc9, %xmm4, %xmm3 # xmm3 = xmm3[1,2],xmm4[0,3]
movaps %xmm7, %xmm2
shufps $0xc9, %xmm7, %xmm2 # xmm2 = xmm2[1,2],xmm7[0,3]
movaps %xmm0, %xmm6
mulps %xmm2, %xmm6
mulps %xmm4, %xmm2
movaps %xmm7, %xmm1
mulps %xmm3, %xmm1
subps %xmm2, %xmm1
movaps %xmm1, %xmm2
shufps $0xc9, %xmm1, %xmm2 # xmm2 = xmm2[1,2],xmm1[0,3]
movaps %xmm0, %xmm8
shufps $0xc9, %xmm0, %xmm8 # xmm8 = xmm8[1,2],xmm0[0,3]
mulps %xmm0, %xmm3
mulps %xmm8, %xmm4
subps %xmm3, %xmm4
mulps %xmm7, %xmm8
subps %xmm8, %xmm6
unpcklps %xmm6, %xmm1 # xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
shufps $0xc9, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
dpps $0x7f, %xmm2, %xmm0
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
xorps %xmm3, %xmm3
movss %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[1,2,3]
insertps $0x4a, %xmm4, %xmm4 # xmm4 = xmm4[1],zero,xmm4[2],zero
unpcklps %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
movaps %xmm2, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
unpckhps %xmm4, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
divps %xmm0, %xmm3
divps %xmm0, %xmm2
divps %xmm0, %xmm1
movaps %xmm5, %xmm0
shufps $0x0, %xmm5, %xmm0 # xmm0 = xmm0[0,0],xmm5[0,0]
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm1, %xmm5
mulps %xmm2, %xmm4
addps %xmm5, %xmm4
mulps %xmm3, %xmm0
addps %xmm4, %xmm0
movq 0x58(%rax), %r8
testq %r8, %r8
jne 0x1d02ea
movq 0x60(%rax), %rsi
movq 0x90(%rax), %rdi
imulq 0xa0(%rax), %rcx
movl (%rdi,%rcx), %eax
movq (%rsi,%rax,8), %r8
movaps (%rbx), %xmm6
movaps %xmm6, (%rsp)
movaps 0x10(%rbx), %xmm8
movaps %xmm6, %xmm4
shufps $0x0, %xmm6, %xmm4 # xmm4 = xmm4[0,0],xmm6[0,0]
movaps %xmm6, %xmm5
shufps $0x55, %xmm6, %xmm5 # xmm5 = xmm5[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm1, %xmm6
subps %xmm0, %xmm6
mulps %xmm2, %xmm5
addps %xmm6, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
insertps $0x30, 0xc(%rbx), %xmm4 # xmm4 = xmm4[0,1,2],mem[0]
movaps %xmm4, (%rbx)
movaps %xmm8, %xmm5
movaps %xmm8, 0x10(%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm8, %xmm4
shufps $0x55, %xmm8, %xmm4 # xmm4 = xmm4[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
mulps %xmm1, %xmm5
mulps %xmm2, %xmm4
addps %xmm5, %xmm4
mulps %xmm3, %xmm0
addps %xmm4, %xmm0
insertps $0x30, 0x1c(%rbx), %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
movaps %xmm0, 0x10(%rbx)
movq 0x10(%rdx), %rax
leaq 0x20(%rsp), %rdx
movq %r8, (%rdx)
movq %r14, 0x8(%rdx)
movq %rax, 0x10(%rdx)
leaq 0x58(%r8), %rdi
movq %rbx, %rsi
callq *0x80(%r8)
movaps (%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
xorps %xmm0, %xmm0
ucomiss 0x20(%rbx), %xmm0
seta %al
orq $-0x1, (%r14)
addq $0x38, %rsp
popq %rbx
popq %r14
retq
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d0565
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d03f8
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d045d
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d05cd
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movaps (%rsi,%rdi), %xmm0
movaps 0x10(%rsi,%rdi), %xmm12
movaps 0x20(%rsi,%rdi), %xmm11
movaps 0x30(%rsi,%rdi), %xmm1
jmp 0x1d05cd
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movsd 0x4(%rsi,%rdi), %xmm1
movss (%rsi,%rdi), %xmm0
shufps $0x4c, %xmm1, %xmm0 # xmm0 = xmm0[0,3],xmm1[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x10(%rsi,%rdi), %xmm2
movss 0xc(%rsi,%rdi), %xmm12
shufps $0x4c, %xmm2, %xmm12 # xmm12 = xmm12[0,3],xmm2[0,1]
shufps $0x78, %xmm12, %xmm12 # xmm12 = xmm12[0,2,3,1]
movsd 0x1c(%rsi,%rdi), %xmm3
movss 0x18(%rsi,%rdi), %xmm11
shufps $0x4c, %xmm3, %xmm11 # xmm11 = xmm11[0,3],xmm3[0,1]
shufps $0x78, %xmm11, %xmm11 # xmm11 = xmm11[0,2,3,1]
movsd 0x28(%rsi,%rdi), %xmm4
movss 0x24(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm4, %xmm1 # xmm1 = xmm1[0,3],xmm4[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
jmp 0x1d05cd
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movsd 0x10(%rsi,%rdi), %xmm11
insertps $0x20, 0x8(%rsi,%rdi), %xmm11 # xmm11 = xmm11[0,1],mem[0],xmm11[3]
movsd 0x34(%rsi,%rdi), %xmm3
movss (%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm12
movlhps %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm0 # xmm0 = xmm0[0,2],xmm3[1,3]
movss 0x18(%rsi,%rdi), %xmm1
movsd 0x1c(%rsi,%rdi), %xmm4
movlhps %xmm4, %xmm1 # xmm1 = xmm1[0],xmm4[0]
shufps $0xd8, %xmm4, %xmm1 # xmm1 = xmm1[0,2],xmm4[1,3]
movss 0x24(%rsi,%rdi), %xmm7
movss 0x28(%rsi,%rdi), %xmm6
movss 0x2c(%rsi,%rdi), %xmm4
movss 0x30(%rsi,%rdi), %xmm5
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
movaps %xmm7, %xmm9
mulss %xmm7, %xmm9
addss %xmm8, %xmm9
movaps %xmm4, %xmm8
mulss %xmm4, %xmm8
addss %xmm9, %xmm8
movaps %xmm5, %xmm9
mulss %xmm5, %xmm9
addss %xmm8, %xmm9
movaps %xmm9, %xmm8
rsqrtss %xmm9, %xmm8
movss 0x1d1c217(%rip), %xmm10 # 0x1eec718
mulss %xmm8, %xmm10
mulss 0x1d1c20d(%rip), %xmm9 # 0x1eec71c
mulss %xmm8, %xmm9
mulss %xmm8, %xmm8
mulss %xmm9, %xmm8
addss %xmm10, %xmm8
mulss %xmm8, %xmm7
insertps $0x30, %xmm7, %xmm1 # xmm1 = xmm1[0,1,2],xmm7[0]
mulss %xmm8, %xmm6
insertps $0x30, %xmm6, %xmm0 # xmm0 = xmm0[0,1,2],xmm6[0]
mulss %xmm8, %xmm4
mulss %xmm5, %xmm8
insertps $0x10, 0x4(%rsi,%rdi), %xmm12 # xmm12 = xmm12[0],mem[0],xmm12[2,3]
insertps $0x30, %xmm8, %xmm11 # xmm11 = xmm11[0,1,2],xmm8[0]
insertps $0x20, 0x3c(%rsi,%rdi), %xmm12 # xmm12 = xmm12[0,1],mem[0],xmm12[3]
insertps $0x30, %xmm4, %xmm12 # xmm12 = xmm12[0,1,2],xmm4[0]
jmp 0x1d05cd
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rcx, %rdi
movss (%rsi,%rdi), %xmm0
movss 0x4(%rsi,%rdi), %xmm12
movss 0x8(%rsi,%rdi), %xmm11
movss 0xc(%rsi,%rdi), %xmm1
insertps $0x1c, 0x10(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rdi), %xmm12 # xmm12 = xmm12[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm12 # xmm12 = xmm12[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rdi), %xmm11 # xmm11 = xmm11[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm11 # xmm11 = xmm11[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
movaps %xmm1, (%rsp)
movaps %xmm1, %xmm8
shufps $0xff, %xmm1, %xmm8 # xmm8 = xmm8[3,3],xmm1[3,3]
movaps %xmm0, %xmm9
shufps $0xff, %xmm0, %xmm9 # xmm9 = xmm9[3,3],xmm0[3,3]
movaps %xmm12, %xmm4
shufps $0xff, %xmm12, %xmm4 # xmm4 = xmm4[3,3],xmm12[3,3]
movaps %xmm11, %xmm5
shufps $0xff, %xmm11, %xmm5 # xmm5 = xmm5[3,3],xmm11[3,3]
movaps %xmm9, %xmm10
mulss %xmm9, %xmm10
movaps %xmm5, %xmm14
mulss %xmm8, %xmm14
movaps %xmm9, %xmm6
movaps %xmm12, %xmm1
movaps %xmm9, %xmm12
movaps %xmm4, %xmm7
mulss %xmm8, %xmm7
mulss %xmm8, %xmm9
movaps %xmm8, %xmm13
mulss %xmm8, %xmm13
movaps %xmm13, %xmm8
addss %xmm10, %xmm8
movaps 0x1d1c097(%rip), %xmm2 # 0x1eec6d0
mulss %xmm4, %xmm6
movaps %xmm11, %xmm3
movaps %xmm6, %xmm11
addss %xmm14, %xmm11
subss %xmm14, %xmm6
movaps %xmm4, %xmm15
xorps %xmm2, %xmm15
mulss %xmm4, %xmm15
addss %xmm15, %xmm8
xorps %xmm5, %xmm2
mulss %xmm5, %xmm2
addss %xmm2, %xmm8
mulss %xmm5, %xmm12
subss %xmm10, %xmm13
movaps %xmm4, %xmm10
mulss %xmm4, %xmm10
addss %xmm13, %xmm10
addss %xmm2, %xmm10
movaps %xmm12, %xmm14
subss %xmm7, %xmm14
mulss %xmm5, %xmm4
addss %xmm12, %xmm7
movaps %xmm4, %xmm12
addss %xmm9, %xmm12
subss %xmm9, %xmm4
addss %xmm11, %xmm11
addss %xmm14, %xmm14
addss %xmm15, %xmm13
mulss %xmm5, %xmm5
addss %xmm13, %xmm5
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps 0x1d1c027(%rip), %xmm9 # 0x1eec700
mulps %xmm9, %xmm14
movsd 0x1d1c00a(%rip), %xmm13 # 0x1eec6f0
mulps %xmm13, %xmm11
addps %xmm14, %xmm11
movss 0x1d1c01d(%rip), %xmm14 # 0x1eec714
mulps %xmm14, %xmm8
addps %xmm11, %xmm8
addss %xmm12, %xmm12
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm9, %xmm12
mulps %xmm13, %xmm10
addps %xmm12, %xmm10
addss %xmm6, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm14, %xmm6
addps %xmm10, %xmm6
xorps %xmm10, %xmm10
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm9, %xmm5
movaps %xmm0, %xmm9
shufps $0xe9, %xmm10, %xmm9 # xmm9 = xmm9[1,2],xmm10[2,3]
blendps $0x4, %xmm1, %xmm9 # xmm9 = xmm9[0,1],xmm1[2],xmm9[3]
addss %xmm7, %xmm7
addss %xmm4, %xmm4
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm13, %xmm4
addps %xmm5, %xmm4
mulps %xmm14, %xmm7
addps %xmm4, %xmm7
addps %xmm10, %xmm9
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps %xmm3, %xmm4
movaps %xmm3, %xmm11
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
mulps %xmm7, %xmm3
movaps (%rsp), %xmm2
movaps %xmm2, %xmm5
movaps %xmm2, %xmm12
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
mulps %xmm7, %xmm2
movaps %xmm2, %xmm13
movaps %xmm7, %xmm2
mulps %xmm10, %xmm2
mulps %xmm6, %xmm10
addps %xmm2, %xmm10
mulps %xmm8, %xmm0
addps %xmm10, %xmm0
movaps %xmm1, %xmm7
shufps $0x55, %xmm1, %xmm1 # xmm1 = xmm1[1,1,1,1]
mulps %xmm6, %xmm1
addps %xmm2, %xmm1
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm8, %xmm7
addps %xmm1, %xmm7
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
mulps %xmm6, %xmm11
addps %xmm3, %xmm11
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm8, %xmm4
addps %xmm11, %xmm4
shufps $0x55, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
mulps %xmm6, %xmm12
addps %xmm13, %xmm12
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm8, %xmm5
addps %xmm12, %xmm5
addps %xmm9, %xmm5
jmp 0x1d022e
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersector1MB::intersect(embree::sse42::InstanceArrayIntersector1MB::Precalculations const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
void InstanceArrayIntersector1MB::intersect(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return;
#endif
RTCRayQueryContext* user_context = context->user;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_, ray.time());
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.intersect((RTCRayHit&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
instance_id_stack::pop(user_context);
}
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %rdx, %r14
movq %rsi, %rbx
movq (%rdx), %rdx
movl (%rcx), %eax
movl 0x4(%rcx), %esi
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rsi,8), %rdx
movq 0x58(%rdx), %r15
testq %r15, %r15
jne 0x1d0858
movq 0x90(%rdx), %rdi
movq 0xa0(%rdx), %r8
imulq %rax, %r8
movl (%rdi,%r8), %edi
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
cmpq %r8, %rdi
je 0x1d0855
movq 0x60(%rdx), %r8
movq (%r8,%rdi,8), %r15
jmp 0x1d0858
xorl %r15d, %r15d
testq %r15, %r15
je 0x1d10ee
movl 0x34(%rdx), %edi
testl %edi, 0x24(%rbx)
je 0x1d10ee
movq 0x8(%r14), %r12
cmpl $-0x1, (%r12)
jne 0x1d10ee
movl %esi, (%r12)
movl %eax, 0x4(%r12)
movl (%rcx), %eax
cmpl $0x1, 0x24(%rdx)
jne 0x1d08fb
movzbl 0x3d(%rdx), %esi
shll $0x8, %esi
movq 0x88(%rdx), %rcx
movl 0x20(%rcx), %edx
cmpl $0x100, %esi # imm = 0x100
je 0x1d10fd
cmpl $0x9134, %edx # imm = 0x9134
je 0x1d0c7f
cmpl $0x9234, %edx # imm = 0x9234
je 0x1d09c3
cmpl $0xb001, %edx # imm = 0xB001
je 0x1d0a84
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1d0fa5
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movaps (%rdx,%rax), %xmm14
movaps 0x10(%rdx,%rax), %xmm1
movaps 0x20(%rdx,%rax), %xmm0
movaps 0x30(%rdx,%rax), %xmm5
jmp 0x1d0fa5
movss 0x1c(%rbx), %xmm15
movss 0x28(%rdx), %xmm0
movss 0x2c(%rdx), %xmm1
movss 0x30(%rdx), %xmm2
subss %xmm1, %xmm15
subss %xmm1, %xmm2
divss %xmm2, %xmm15
mulss %xmm0, %xmm15
roundss $0x9, %xmm15, %xmm1
addss 0x1d2009a(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm1, %xmm0
cvttss2si %xmm0, %ecx
subss %xmm0, %xmm15
movzbl 0x3d(%rdx), %r8d
shll $0x8, %r8d
movq 0x88(%rdx), %rdx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rdi
movl 0x20(%rdx,%rsi), %esi
cmpl $0x100, %r8d # imm = 0x100
je 0x1d114e
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d0ce6
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d0a22
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d0b80
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d0d4b
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movaps (%rsi,%rdi), %xmm14
movaps 0x10(%rsi,%rdi), %xmm1
movaps 0x20(%rsi,%rdi), %xmm0
movaps 0x30(%rsi,%rdi), %xmm2
jmp 0x1d0d4b
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x4(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm14
shufps $0x4c, %xmm0, %xmm14 # xmm14 = xmm14[0,3],xmm0[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x1c(%rdx,%rax), %xmm2
movss 0x18(%rdx,%rax), %xmm0
shufps $0x4c, %xmm2, %xmm0 # xmm0 = xmm0[0,3],xmm2[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x28(%rdx,%rax), %xmm2
movss 0x24(%rdx,%rax), %xmm5
shufps $0x4c, %xmm2, %xmm5 # xmm5 = xmm5[0,3],xmm2[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x1d0fa5
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x4(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm14
shufps $0x4c, %xmm0, %xmm14 # xmm14 = xmm14[0,3],xmm0[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x1c(%rsi,%rdi), %xmm2
movss 0x18(%rsi,%rdi), %xmm0
shufps $0x4c, %xmm2, %xmm0 # xmm0 = xmm0[0,3],xmm2[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x28(%rsi,%rdi), %xmm3
movss 0x24(%rsi,%rdi), %xmm2
shufps $0x4c, %xmm3, %xmm2 # xmm2 = xmm2[0,3],xmm3[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1d0d4b
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x10(%rdx,%rax), %xmm0
insertps $0x20, 0x8(%rdx,%rax), %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
movsd 0x34(%rdx,%rax), %xmm2
movss (%rdx,%rax), %xmm14
movss 0xc(%rdx,%rax), %xmm1
movlhps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm14 # xmm14 = xmm14[0,2],xmm2[1,3]
movss 0x18(%rdx,%rax), %xmm5
movsd 0x1c(%rdx,%rax), %xmm2
movlhps %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm5 # xmm5 = xmm5[0,2],xmm2[1,3]
movss 0x24(%rdx,%rax), %xmm6
movss 0x28(%rdx,%rax), %xmm4
movss 0x2c(%rdx,%rax), %xmm2
movss 0x30(%rdx,%rax), %xmm3
movaps %xmm4, %xmm7
mulss %xmm4, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm2, %xmm7
mulss %xmm2, %xmm7
addss %xmm8, %xmm7
movaps %xmm3, %xmm8
mulss %xmm3, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d1bbf7(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d1bbed(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm5 # xmm5 = xmm5[0,1,2],xmm6[0]
mulss %xmm7, %xmm4
insertps $0x30, %xmm4, %xmm14 # xmm14 = xmm14[0,1,2],xmm4[0]
mulss %xmm7, %xmm2
mulss %xmm3, %xmm7
insertps $0x10, 0x4(%rdx,%rax), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
insertps $0x30, %xmm7, %xmm0 # xmm0 = xmm0[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rdx,%rax), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
insertps $0x30, %xmm2, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0]
jmp 0x1d0fa5
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x10(%rsi,%rdi), %xmm0
insertps $0x20, 0x8(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
movsd 0x34(%rsi,%rdi), %xmm2
movss (%rsi,%rdi), %xmm14
movss 0xc(%rsi,%rdi), %xmm1
movlhps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm14 # xmm14 = xmm14[0,2],xmm2[1,3]
movss 0x18(%rsi,%rdi), %xmm2
movsd 0x1c(%rsi,%rdi), %xmm3
movlhps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm2 # xmm2 = xmm2[0,2],xmm3[1,3]
movss 0x24(%rsi,%rdi), %xmm6
movss 0x28(%rsi,%rdi), %xmm5
movss 0x2c(%rsi,%rdi), %xmm3
movss 0x30(%rsi,%rdi), %xmm4
movaps %xmm5, %xmm7
mulss %xmm5, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm3, %xmm7
mulss %xmm3, %xmm7
addss %xmm8, %xmm7
movaps %xmm4, %xmm8
mulss %xmm4, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d1baf8(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d1baee(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm2 # xmm2 = xmm2[0,1,2],xmm6[0]
mulss %xmm7, %xmm5
insertps $0x30, %xmm5, %xmm14 # xmm14 = xmm14[0,1,2],xmm5[0]
mulss %xmm7, %xmm3
mulss %xmm4, %xmm7
insertps $0x10, 0x4(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
insertps $0x30, %xmm7, %xmm0 # xmm0 = xmm0[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
insertps $0x30, %xmm3, %xmm1 # xmm1 = xmm1[0,1,2],xmm3[0]
jmp 0x1d0d4b
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movss (%rdx,%rax), %xmm14
movss 0x4(%rdx,%rax), %xmm1
movss 0x8(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm5
insertps $0x1c, 0x10(%rdx,%rax), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdx,%rax), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdx,%rax), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdx,%rax), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdx,%rax), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdx,%rax), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdx,%rax), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdx,%rax), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
jmp 0x1d0fa5
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movss (%rsi,%rdi), %xmm14
movss 0x4(%rsi,%rdi), %xmm1
movss 0x8(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm2
insertps $0x1c, 0x10(%rsi,%rdi), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x1d0f05
cmpl $0x9234, %ecx # imm = 0x9234
je 0x1d0da5
cmpl $0xb001, %ecx # imm = 0xB001
je 0x1d0e01
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x1d0f64
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movaps (%rcx,%rax), %xmm4
movaps 0x10(%rcx,%rax), %xmm6
movaps 0x20(%rcx,%rax), %xmm3
movaps 0x30(%rcx,%rax), %xmm7
jmp 0x1d0f64
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x4(%rcx,%rax), %xmm3
movss (%rcx,%rax), %xmm4
shufps $0x4c, %xmm3, %xmm4 # xmm4 = xmm4[0,3],xmm3[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x10(%rcx,%rax), %xmm3
movss 0xc(%rcx,%rax), %xmm6
shufps $0x4c, %xmm3, %xmm6 # xmm6 = xmm6[0,3],xmm3[0,1]
shufps $0x78, %xmm6, %xmm6 # xmm6 = xmm6[0,2,3,1]
movsd 0x1c(%rcx,%rax), %xmm5
movss 0x18(%rcx,%rax), %xmm3
shufps $0x4c, %xmm5, %xmm3 # xmm3 = xmm3[0,3],xmm5[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x28(%rcx,%rax), %xmm5
movss 0x24(%rcx,%rax), %xmm7
shufps $0x4c, %xmm5, %xmm7 # xmm7 = xmm7[0,3],xmm5[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
jmp 0x1d0f64
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x10(%rcx,%rax), %xmm3
insertps $0x20, 0x8(%rcx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
movsd 0x34(%rcx,%rax), %xmm5
movss (%rcx,%rax), %xmm4
movss 0xc(%rcx,%rax), %xmm6
movlhps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm4 # xmm4 = xmm4[0,2],xmm5[1,3]
movss 0x18(%rcx,%rax), %xmm7
movsd 0x1c(%rcx,%rax), %xmm5
movlhps %xmm5, %xmm7 # xmm7 = xmm7[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm7 # xmm7 = xmm7[0,2],xmm5[1,3]
movss 0x24(%rcx,%rax), %xmm10
movss 0x28(%rcx,%rax), %xmm9
movss 0x2c(%rcx,%rax), %xmm5
movss 0x30(%rcx,%rax), %xmm8
movaps %xmm9, %xmm11
mulss %xmm9, %xmm11
movaps %xmm10, %xmm12
mulss %xmm10, %xmm12
addss %xmm11, %xmm12
movaps %xmm5, %xmm11
mulss %xmm5, %xmm11
addss %xmm12, %xmm11
movaps %xmm8, %xmm12
mulss %xmm8, %xmm12
addss %xmm11, %xmm12
movaps %xmm12, %xmm11
rsqrtss %xmm12, %xmm11
movss 0x1d1b876(%rip), %xmm13 # 0x1eec718
mulss %xmm11, %xmm13
mulss 0x1d1b86c(%rip), %xmm12 # 0x1eec71c
mulss %xmm11, %xmm12
mulss %xmm11, %xmm11
mulss %xmm12, %xmm11
addss %xmm13, %xmm11
mulss %xmm11, %xmm10
insertps $0x30, %xmm10, %xmm7 # xmm7 = xmm7[0,1,2],xmm10[0]
mulss %xmm11, %xmm9
insertps $0x30, %xmm9, %xmm4 # xmm4 = xmm4[0,1,2],xmm9[0]
mulss %xmm11, %xmm5
mulss %xmm8, %xmm11
insertps $0x10, 0x4(%rcx,%rax), %xmm6 # xmm6 = xmm6[0],mem[0],xmm6[2,3]
insertps $0x30, %xmm11, %xmm3 # xmm3 = xmm3[0,1,2],xmm11[0]
insertps $0x20, 0x3c(%rcx,%rax), %xmm6 # xmm6 = xmm6[0,1],mem[0],xmm6[3]
insertps $0x30, %xmm5, %xmm6 # xmm6 = xmm6[0,1,2],xmm5[0]
jmp 0x1d0f64
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movss (%rcx,%rax), %xmm4
movss 0x4(%rcx,%rax), %xmm6
movss 0x8(%rcx,%rax), %xmm3
movss 0xc(%rcx,%rax), %xmm7
insertps $0x1c, 0x10(%rcx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x20(%rcx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x14(%rcx,%rax), %xmm6 # xmm6 = xmm6[0],mem[0],zero,zero
insertps $0x28, 0x24(%rcx,%rax), %xmm6 # xmm6 = xmm6[0,1],mem[0],zero
insertps $0x1c, 0x18(%rcx,%rax), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x28(%rcx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rcx,%rax), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rcx,%rax), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
movss 0x1d1b7a8(%rip), %xmm5 # 0x1eec714
subss %xmm15, %xmm5
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulps %xmm15, %xmm4
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm5, %xmm14
addps %xmm4, %xmm14
mulps %xmm15, %xmm6
mulps %xmm5, %xmm1
addps %xmm6, %xmm1
mulps %xmm15, %xmm3
mulps %xmm5, %xmm0
addps %xmm3, %xmm0
mulps %xmm7, %xmm15
mulps %xmm2, %xmm5
addps %xmm15, %xmm5
movaps %xmm0, %xmm4
shufps $0xc9, %xmm0, %xmm4 # xmm4 = xmm4[1,2],xmm0[0,3]
movaps %xmm1, %xmm6
shufps $0xc9, %xmm1, %xmm6 # xmm6 = xmm6[1,2],xmm1[0,3]
movaps %xmm0, %xmm3
mulps %xmm6, %xmm3
movaps %xmm1, %xmm2
mulps %xmm4, %xmm2
subps %xmm3, %xmm2
movaps %xmm2, %xmm3
shufps $0xc9, %xmm2, %xmm3 # xmm3 = xmm3[1,2],xmm2[0,3]
movaps %xmm14, %xmm7
shufps $0xc9, %xmm14, %xmm7 # xmm7 = xmm7[1,2],xmm14[0,3]
mulps %xmm14, %xmm4
mulps %xmm7, %xmm0
subps %xmm4, %xmm0
mulps %xmm1, %xmm7
mulps %xmm14, %xmm6
subps %xmm7, %xmm6
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
shufps $0xc9, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
dpps $0x7f, %xmm3, %xmm14
unpcklps %xmm6, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
xorps %xmm1, %xmm1
movss %xmm0, %xmm1 # xmm1 = xmm0[0],xmm1[1,2,3]
insertps $0x4a, %xmm0, %xmm0 # xmm0 = xmm0[1],zero,xmm0[2],zero
unpcklps %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
movaps %xmm3, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
unpckhps %xmm0, %xmm3 # xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
divps %xmm14, %xmm1
divps %xmm14, %xmm3
divps %xmm14, %xmm2
movaps %xmm5, %xmm0
shufps $0x0, %xmm5, %xmm0 # xmm0 = xmm0[0,0],xmm5[0,0]
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm2, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
mulps %xmm1, %xmm0
addps %xmm4, %xmm0
movaps (%rbx), %xmm6
movaps %xmm6, 0x10(%rsp)
movaps 0x10(%rbx), %xmm8
movaps %xmm6, %xmm4
shufps $0x0, %xmm6, %xmm4 # xmm4 = xmm4[0,0],xmm6[0,0]
movaps %xmm6, %xmm5
shufps $0x55, %xmm6, %xmm5 # xmm5 = xmm5[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm2, %xmm6
subps %xmm0, %xmm6
mulps %xmm3, %xmm5
addps %xmm6, %xmm5
mulps %xmm1, %xmm4
addps %xmm5, %xmm4
insertps $0x30, 0xc(%rbx), %xmm4 # xmm4 = xmm4[0,1,2],mem[0]
movaps %xmm4, (%rbx)
movaps %xmm8, %xmm5
movaps %xmm8, (%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm8, %xmm4
shufps $0x55, %xmm8, %xmm4 # xmm4 = xmm4[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
mulps %xmm2, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
mulps %xmm1, %xmm0
addps %xmm4, %xmm0
insertps $0x30, 0x1c(%rbx), %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
movaps %xmm0, 0x10(%rbx)
movq 0x10(%r14), %rax
leaq 0x160(%rsp), %rdx
movq %r15, (%rdx)
movq %r12, 0x8(%rdx)
movq %rax, 0x10(%rdx)
leaq 0x58(%r15), %rdi
movq %rbx, %rsi
callq *0x78(%r15)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps (%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
orq $-0x1, (%r12)
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r14
popq %r15
retq
cmpl $0x9134, %edx # imm = 0x9134
je 0x1d1497
cmpl $0x9234, %edx # imm = 0x9234
je 0x1d11bd
cmpl $0xb001, %edx # imm = 0xB001
je 0x1d128e
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1d14f9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movaps (%rdx,%rax), %xmm10
movaps 0x10(%rdx,%rax), %xmm4
movaps 0x20(%rdx,%rax), %xmm3
movaps 0x30(%rdx,%rax), %xmm2
jmp 0x1d14f9
movaps %xmm15, 0xa0(%rsp)
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d1719
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d121c
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d138a
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d178e
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movaps (%rsi,%rdi), %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x10(%rsi,%rdi), %xmm0
movaps %xmm0, 0x40(%rsp)
movaps 0x20(%rsi,%rdi), %xmm0
movaps %xmm0, (%rsp)
movaps 0x30(%rsi,%rdi), %xmm0
movaps %xmm0, 0x30(%rsp)
jmp 0x1d178e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x4(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x10(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm4
shufps $0x4c, %xmm0, %xmm4 # xmm4 = xmm4[0,3],xmm0[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x1c(%rdx,%rax), %xmm0
movss 0x18(%rdx,%rax), %xmm3
shufps $0x4c, %xmm0, %xmm3 # xmm3 = xmm3[0,3],xmm0[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x28(%rdx,%rax), %xmm0
movss 0x24(%rdx,%rax), %xmm2
shufps $0x4c, %xmm0, %xmm2 # xmm2 = xmm2[0,3],xmm0[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1d14f9
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x4(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x10(%rsp)
movsd 0x10(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x40(%rsp)
movsd 0x1c(%rsi,%rdi), %xmm0
movss 0x18(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, (%rsp)
movsd 0x28(%rsi,%rdi), %xmm0
movss 0x24(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x30(%rsp)
jmp 0x1d178e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x10(%rdx,%rax), %xmm3
insertps $0x20, 0x8(%rdx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
movsd 0x34(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm10
movss 0xc(%rdx,%rax), %xmm4
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x18(%rdx,%rax), %xmm2
movsd 0x1c(%rdx,%rax), %xmm0
movlhps %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm2 # xmm2 = xmm2[0,2],xmm0[1,3]
movss 0x24(%rdx,%rax), %xmm6
movss 0x28(%rdx,%rax), %xmm5
movss 0x2c(%rdx,%rax), %xmm0
movss 0x30(%rdx,%rax), %xmm1
movaps %xmm5, %xmm7
mulss %xmm5, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm0, %xmm7
mulss %xmm0, %xmm7
addss %xmm8, %xmm7
movaps %xmm1, %xmm8
mulss %xmm1, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d1b3ed(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d1b3e3(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm2 # xmm2 = xmm2[0,1,2],xmm6[0]
mulss %xmm7, %xmm5
insertps $0x30, %xmm5, %xmm10 # xmm10 = xmm10[0,1,2],xmm5[0]
mulss %xmm7, %xmm0
mulss %xmm1, %xmm7
insertps $0x10, 0x4(%rdx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],xmm4[2,3]
insertps $0x30, %xmm7, %xmm3 # xmm3 = xmm3[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rdx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],xmm4[3]
insertps $0x30, %xmm0, %xmm4 # xmm4 = xmm4[0,1,2],xmm0[0]
jmp 0x1d14f9
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x10(%rsi,%rdi), %xmm9
insertps $0x20, 0x8(%rsi,%rdi), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
movsd 0x34(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm7
movss 0xc(%rsi,%rdi), %xmm8
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%rsi,%rdi), %xmm10
movsd 0x1c(%rsi,%rdi), %xmm0
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x24(%rsi,%rdi), %xmm3
movss 0x28(%rsi,%rdi), %xmm2
movss 0x2c(%rsi,%rdi), %xmm0
movss 0x30(%rsi,%rdi), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d1b2f5(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d1b2ed(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm10 # xmm10 = xmm10[0,1,2],xmm3[0]
movaps %xmm10, 0x30(%rsp)
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
movaps %xmm7, 0x10(%rsp)
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rsi,%rdi), %xmm8 # xmm8 = xmm8[0],mem[0],xmm8[2,3]
insertps $0x30, %xmm4, %xmm9 # xmm9 = xmm9[0,1,2],xmm4[0]
movaps %xmm9, (%rsp)
insertps $0x20, 0x3c(%rsi,%rdi), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
insertps $0x30, %xmm0, %xmm8 # xmm8 = xmm8[0,1,2],xmm0[0]
movaps %xmm8, 0x40(%rsp)
jmp 0x1d178e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movss (%rdx,%rax), %xmm10
movss 0x4(%rdx,%rax), %xmm4
movss 0x8(%rdx,%rax), %xmm3
movss 0xc(%rdx,%rax), %xmm2
insertps $0x1c, 0x10(%rdx,%rax), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdx,%rax), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdx,%rax), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdx,%rax), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdx,%rax), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
movaps %xmm10, 0x10(%rsp)
movaps %xmm2, %xmm7
shufps $0xff, %xmm2, %xmm7 # xmm7 = xmm7[3,3],xmm2[3,3]
movaps %xmm10, %xmm8
shufps $0xff, %xmm10, %xmm8 # xmm8 = xmm8[3,3],xmm10[3,3]
movaps %xmm4, %xmm0
shufps $0xff, %xmm4, %xmm0 # xmm0 = xmm0[3,3],xmm4[3,3]
movaps %xmm3, %xmm5
shufps $0xff, %xmm3, %xmm5 # xmm5 = xmm5[3,3],xmm3[3,3]
movaps %xmm8, %xmm9
mulss %xmm8, %xmm9
movaps %xmm5, %xmm13
mulss %xmm7, %xmm13
movaps %xmm8, %xmm6
movaps %xmm8, %xmm11
movaps %xmm0, %xmm1
mulss %xmm7, %xmm1
mulss %xmm7, %xmm8
movaps %xmm7, %xmm12
mulss %xmm7, %xmm12
movaps %xmm12, %xmm7
addss %xmm9, %xmm7
movaps 0x1d1b173(%rip), %xmm15 # 0x1eec6d0
mulss %xmm0, %xmm6
movaps %xmm6, %xmm10
addss %xmm13, %xmm10
subss %xmm13, %xmm6
movaps %xmm0, %xmm14
xorps %xmm15, %xmm14
mulss %xmm0, %xmm14
addss %xmm14, %xmm7
xorps %xmm5, %xmm15
mulss %xmm5, %xmm15
addss %xmm15, %xmm7
mulss %xmm5, %xmm11
subss %xmm9, %xmm12
movaps %xmm0, %xmm9
mulss %xmm0, %xmm9
addss %xmm12, %xmm9
addss %xmm15, %xmm9
movaps %xmm11, %xmm13
subss %xmm1, %xmm13
mulss %xmm5, %xmm0
addss %xmm11, %xmm1
movaps %xmm0, %xmm11
addss %xmm8, %xmm11
subss %xmm8, %xmm0
addss %xmm10, %xmm10
addss %xmm13, %xmm13
addss %xmm14, %xmm12
movaps 0x10(%rsp), %xmm14
mulss %xmm5, %xmm5
addss %xmm12, %xmm5
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movaps 0x1d1b100(%rip), %xmm8 # 0x1eec700
mulps %xmm8, %xmm13
movsd 0x1d1b0e3(%rip), %xmm12 # 0x1eec6f0
mulps %xmm12, %xmm10
addps %xmm13, %xmm10
movss 0x1d1b0f6(%rip), %xmm13 # 0x1eec714
mulps %xmm13, %xmm7
addps %xmm10, %xmm7
addss %xmm11, %xmm11
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm8, %xmm11
mulps %xmm12, %xmm9
addps %xmm11, %xmm9
addss %xmm6, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm13, %xmm6
addps %xmm9, %xmm6
xorps %xmm9, %xmm9
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm8, %xmm5
movaps %xmm14, %xmm8
shufps $0xe9, %xmm9, %xmm8 # xmm8 = xmm8[1,2],xmm9[2,3]
blendps $0x4, %xmm4, %xmm8 # xmm8 = xmm8[0,1],xmm4[2],xmm8[3]
addss %xmm1, %xmm1
addss %xmm0, %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm12, %xmm0
addps %xmm5, %xmm0
mulps %xmm13, %xmm1
addps %xmm0, %xmm1
addps %xmm9, %xmm8
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm3, %xmm0
movaps %xmm3, %xmm10
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
mulps %xmm1, %xmm3
movaps %xmm2, %xmm5
movaps %xmm2, %xmm11
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
mulps %xmm1, %xmm2
movaps %xmm1, %xmm12
mulps %xmm9, %xmm12
mulps %xmm6, %xmm9
addps %xmm12, %xmm9
mulps %xmm7, %xmm14
addps %xmm9, %xmm14
movaps %xmm4, %xmm1
shufps $0x55, %xmm4, %xmm4 # xmm4 = xmm4[1,1,1,1]
mulps %xmm6, %xmm4
addps %xmm12, %xmm4
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm7, %xmm1
addps %xmm4, %xmm1
shufps $0x55, %xmm10, %xmm10 # xmm10 = xmm10[1,1,1,1]
mulps %xmm6, %xmm10
addps %xmm3, %xmm10
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm7, %xmm0
addps %xmm10, %xmm0
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
mulps %xmm6, %xmm11
addps %xmm2, %xmm11
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm7, %xmm5
addps %xmm11, %xmm5
addps %xmm8, %xmm5
jmp 0x1d0fa5
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movss (%rsi,%rdi), %xmm0
movss 0x4(%rsi,%rdi), %xmm1
movss 0x8(%rsi,%rdi), %xmm2
movss 0xc(%rsi,%rdi), %xmm3
insertps $0x1c, 0x10(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
movaps %xmm0, 0x10(%rsp)
insertps $0x1c, 0x14(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
movaps %xmm1, 0x40(%rsp)
insertps $0x1c, 0x18(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
movaps %xmm2, (%rsp)
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
movaps %xmm3, 0x30(%rsp)
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x1d1941
cmpl $0x9234, %ecx # imm = 0x9234
je 0x1d17eb
cmpl $0xb001, %ecx # imm = 0xB001
je 0x1d1850
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x1d19a9
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movaps (%rcx,%rax), %xmm7
movaps 0x10(%rcx,%rax), %xmm9
movaps 0x20(%rcx,%rax), %xmm8
movaps 0x30(%rcx,%rax), %xmm10
jmp 0x1d19a9
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x4(%rcx,%rax), %xmm0
movss (%rcx,%rax), %xmm7
shufps $0x4c, %xmm0, %xmm7 # xmm7 = xmm7[0,3],xmm0[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x10(%rcx,%rax), %xmm0
movss 0xc(%rcx,%rax), %xmm9
shufps $0x4c, %xmm0, %xmm9 # xmm9 = xmm9[0,3],xmm0[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x1c(%rcx,%rax), %xmm0
movss 0x18(%rcx,%rax), %xmm8
shufps $0x4c, %xmm0, %xmm8 # xmm8 = xmm8[0,3],xmm0[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x28(%rcx,%rax), %xmm0
movss 0x24(%rcx,%rax), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
jmp 0x1d19a9
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x10(%rcx,%rax), %xmm8
insertps $0x20, 0x8(%rcx,%rax), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
movsd 0x34(%rcx,%rax), %xmm0
movss (%rcx,%rax), %xmm7
movss 0xc(%rcx,%rax), %xmm9
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%rcx,%rax), %xmm10
movsd 0x1c(%rcx,%rax), %xmm0
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x24(%rcx,%rax), %xmm3
movss 0x28(%rcx,%rax), %xmm2
movss 0x2c(%rcx,%rax), %xmm0
movss 0x30(%rcx,%rax), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d1ae32(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d1ae2a(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm10 # xmm10 = xmm10[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rcx,%rax), %xmm9 # xmm9 = xmm9[0],mem[0],xmm9[2,3]
insertps $0x30, %xmm4, %xmm8 # xmm8 = xmm8[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%rcx,%rax), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
insertps $0x30, %xmm0, %xmm9 # xmm9 = xmm9[0,1,2],xmm0[0]
jmp 0x1d19a9
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movss (%rcx,%rax), %xmm7
movss 0x4(%rcx,%rax), %xmm9
movss 0x8(%rcx,%rax), %xmm8
movss 0xc(%rcx,%rax), %xmm10
insertps $0x1c, 0x10(%rcx,%rax), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x20(%rcx,%rax), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x14(%rcx,%rax), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x24(%rcx,%rax), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x18(%rcx,%rax), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x28(%rcx,%rax), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rcx,%rax), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rcx,%rax), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
movaps 0x30(%rsp), %xmm2
shufps $0xff, %xmm2, %xmm2 # xmm2 = xmm2[3,3,3,3]
movaps 0x10(%rsp), %xmm1
shufps $0xff, %xmm1, %xmm1 # xmm1 = xmm1[3,3,3,3]
movaps 0x40(%rsp), %xmm3
shufps $0xff, %xmm3, %xmm3 # xmm3 = xmm3[3,3,3,3]
movaps (%rsp), %xmm4
shufps $0xff, %xmm4, %xmm4 # xmm4 = xmm4[3,3,3,3]
movaps %xmm10, 0x120(%rsp)
shufps $0xff, %xmm10, %xmm10 # xmm10 = xmm10[3,3,3,3]
movaps %xmm7, 0x150(%rsp)
shufps $0xff, %xmm7, %xmm7 # xmm7 = xmm7[3,3,3,3]
movaps %xmm9, 0x130(%rsp)
shufps $0xff, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3]
movaps %xmm8, 0x140(%rsp)
movaps %xmm8, %xmm0
shufps $0xff, %xmm8, %xmm0 # xmm0 = xmm0[3,3],xmm8[3,3]
movaps %xmm1, 0xe0(%rsp)
mulss %xmm7, %xmm1
movaps %xmm2, 0x110(%rsp)
mulss %xmm10, %xmm2
addss %xmm1, %xmm2
movaps %xmm3, 0x80(%rsp)
movaps %xmm3, %xmm1
movaps %xmm9, 0x60(%rsp)
mulss %xmm9, %xmm1
addss %xmm2, %xmm1
movaps %xmm4, 0x70(%rsp)
mulss %xmm0, %xmm4
addss %xmm1, %xmm4
movaps 0x1d1ac7f(%rip), %xmm1 # 0x1eec6d0
movaps %xmm4, %xmm3
xorps %xmm1, %xmm3
movaps %xmm3, %xmm2
movaps %xmm4, %xmm5
ucomiss %xmm4, %xmm3
movaps %xmm0, %xmm3
xorps %xmm1, %xmm3
movaps %xmm3, 0xd0(%rsp)
ja 0x1d1a7b
movaps %xmm10, 0xb0(%rsp)
jmp 0x1d1a92
movaps 0x1d1ac4e(%rip), %xmm1 # 0x1eec6d0
xorps %xmm1, %xmm10
movaps %xmm10, 0xb0(%rsp)
xorps %xmm1, %xmm7
movaps %xmm7, 0xc0(%rsp)
ja 0x1d1aa4
movaps %xmm0, 0xd0(%rsp)
movaps %xmm5, %xmm1
movaps %xmm5, %xmm0
cmpltss %xmm2, %xmm0
movaps %xmm0, 0x100(%rsp)
maxss %xmm5, %xmm2
movaps %xmm2, 0x90(%rsp)
andps 0x1d1abf6(%rip), %xmm1 # 0x1eec6c0
movss 0x1d1eeae(%rip), %xmm2 # 0x1ef0980
mulss %xmm1, %xmm2
addss 0x1d1eea6(%rip), %xmm2 # 0x1ef0984
movaps 0x60(%rsp), %xmm0
mulss %xmm1, %xmm2
addss 0x1d1ee99(%rip), %xmm2 # 0x1ef0988
xorps 0x1d1abda(%rip), %xmm0 # 0x1eec6d0
movaps %xmm0, 0x50(%rsp)
mulss %xmm1, %xmm2
addss 0x1d1ee85(%rip), %xmm2 # 0x1ef098c
mulss %xmm1, %xmm2
addss 0x1d1ee7d(%rip), %xmm2 # 0x1ef0990
mulss %xmm1, %xmm2
addss 0x1d1ee75(%rip), %xmm2 # 0x1ef0994
movss %xmm2, 0x2c(%rsp)
movss 0x1d1abe7(%rip), %xmm0 # 0x1eec714
movaps %xmm1, 0xf0(%rsp)
subss %xmm1, %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jb 0x1d1b47
sqrtss %xmm0, %xmm0
jmp 0x1d1b4c
callq 0x6aa20
mulss 0x2c(%rsp), %xmm0
movss 0x1d1ee3e(%rip), %xmm1 # 0x1ef0998
movaps %xmm1, %xmm2
subss %xmm0, %xmm2
movaps 0x90(%rsp), %xmm0
xorps %xmm3, %xmm3
cmpltss %xmm3, %xmm0
maxss %xmm2, %xmm3
movaps %xmm0, %xmm2
andnps %xmm3, %xmm2
xorps 0x1d1ab4e(%rip), %xmm3 # 0x1eec6d0
andps %xmm0, %xmm3
orps %xmm2, %xmm3
movaps %xmm1, %xmm2
subss %xmm3, %xmm2
movss 0x1d1ab7d(%rip), %xmm5 # 0x1eec714
movaps %xmm5, %xmm3
cmpltss 0xf0(%rsp), %xmm3
movss 0x1d1edf0(%rip), %xmm0 # 0x1ef099c
andps %xmm3, %xmm0
andnps %xmm2, %xmm3
orps %xmm3, %xmm0
mulss 0xa0(%rsp), %xmm0
movss 0x1d1edda(%rip), %xmm2 # 0x1ef09a0
mulss %xmm0, %xmm2
roundss $0x9, %xmm2, %xmm2
cvttss2si %xmm2, %eax
mulss %xmm1, %xmm2
subss %xmm2, %xmm0
movaps %xmm0, %xmm3
mulss %xmm0, %xmm3
movss 0x1d1edb9(%rip), %xmm1 # 0x1ef09a4
mulss %xmm3, %xmm1
addss 0x1d1edb1(%rip), %xmm1 # 0x1ef09a8
movss 0x1d1edad(%rip), %xmm2 # 0x1ef09ac
mulss %xmm3, %xmm2
addss 0x1d1eda5(%rip), %xmm2 # 0x1ef09b0
movaps 0x100(%rsp), %xmm6
movaps 0x50(%rsp), %xmm4
andps %xmm6, %xmm4
movaps %xmm4, 0x50(%rsp)
mulss %xmm3, %xmm1
addss 0x1d1ed88(%rip), %xmm1 # 0x1ef09b4
andnps 0x60(%rsp), %xmm6
mulss %xmm3, %xmm2
addss 0x1d1ed7b(%rip), %xmm2 # 0x1ef09b8
movl %eax, %ecx
mulss %xmm3, %xmm1
addss 0x1d1ed71(%rip), %xmm1 # 0x1ef09bc
andl $0x3, %ecx
mulss %xmm3, %xmm2
addss 0x1d1ed66(%rip), %xmm2 # 0x1ef09c0
mulss %xmm3, %xmm1
addss 0x1d1ed5e(%rip), %xmm1 # 0x1ef09c4
mulss %xmm3, %xmm2
movss 0x1d1aaaa(%rip), %xmm7 # 0x1eec71c
movaps %xmm7, %xmm4
addss %xmm7, %xmm2
mulss %xmm3, %xmm1
addss %xmm5, %xmm1
mulss %xmm3, %xmm2
addss %xmm5, %xmm2
mulss %xmm0, %xmm1
testb $0x1, %al
je 0x1d1c97
movaps %xmm2, %xmm11
jmp 0x1d1c9e
movaps %xmm1, %xmm11
movaps %xmm2, %xmm1
orps 0x50(%rsp), %xmm6
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x1d1cb3
xorps 0x1d1aa1d(%rip), %xmm11 # 0x1eec6d0
cmpl $0x2, %eax
jae 0x1d1cbf
xorps 0x1d1aa11(%rip), %xmm1 # 0x1eec6d0
movaps 0x90(%rsp), %xmm7
movaps %xmm7, %xmm0
movaps 0x110(%rsp), %xmm15
mulss %xmm15, %xmm0
movaps 0xb0(%rsp), %xmm14
subss %xmm14, %xmm0
movaps %xmm7, %xmm9
mulss 0xe0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm13
subss %xmm13, %xmm9
movaps %xmm7, %xmm8
mulss 0x80(%rsp), %xmm8
subss %xmm6, %xmm8
mulss 0x70(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm3
subss %xmm3, %xmm7
movaps %xmm9, %xmm2
mulss %xmm9, %xmm2
movaps %xmm0, %xmm5
mulss %xmm0, %xmm5
addss %xmm2, %xmm5
movaps %xmm8, %xmm2
mulss %xmm8, %xmm2
addss %xmm5, %xmm2
movaps %xmm7, %xmm5
mulss %xmm7, %xmm5
addss %xmm2, %xmm5
movaps %xmm5, %xmm10
rsqrtss %xmm5, %xmm10
movaps %xmm10, %xmm2
mulss %xmm4, %xmm5
mulss %xmm10, %xmm5
mulss %xmm10, %xmm10
mulss %xmm5, %xmm10
movss 0x1d1a99d(%rip), %xmm4 # 0x1eec718
mulss %xmm4, %xmm2
addss %xmm2, %xmm10
mulss %xmm10, %xmm0
movaps %xmm11, 0x60(%rsp)
mulss %xmm11, %xmm0
movaps %xmm1, %xmm11
mulss %xmm15, %xmm11
subss %xmm0, %xmm11
movaps 0xa0(%rsp), %xmm2
movss 0x1d1a961(%rip), %xmm12 # 0x1eec714
subss %xmm2, %xmm12
mulss %xmm2, %xmm14
mulss %xmm12, %xmm15
addss %xmm14, %xmm15
movaps 0xe0(%rsp), %xmm14
mulss %xmm2, %xmm13
movaps %xmm12, %xmm5
mulss %xmm14, %xmm5
addss %xmm13, %xmm5
mulss %xmm2, %xmm6
movaps %xmm12, %xmm0
mulss 0x80(%rsp), %xmm0
addss %xmm6, %xmm0
mulss %xmm2, %xmm3
movaps %xmm12, 0x50(%rsp)
movaps %xmm12, %xmm2
mulss 0x70(%rsp), %xmm2
addss %xmm3, %xmm2
movaps %xmm5, %xmm12
mulss %xmm5, %xmm12
movaps %xmm15, %xmm13
mulss %xmm15, %xmm13
addss %xmm12, %xmm13
movaps %xmm0, %xmm12
mulss %xmm0, %xmm12
addss %xmm13, %xmm12
movaps %xmm2, %xmm13
mulss %xmm2, %xmm13
addss %xmm12, %xmm13
movaps %xmm13, %xmm12
mulss 0x1d1a8cc(%rip), %xmm13 # 0x1eec71c
rsqrtss %xmm12, %xmm12
mulss %xmm12, %xmm4
mulss %xmm12, %xmm13
mulss %xmm12, %xmm12
mulss %xmm13, %xmm12
addss %xmm4, %xmm12
movss 0x1d1eb52(%rip), %xmm6 # 0x1ef09c8
movaps 0x90(%rsp), %xmm3
ucomiss %xmm6, %xmm3
cmpltss %xmm3, %xmm6
movaps %xmm6, %xmm4
andnps %xmm11, %xmm4
mulss %xmm12, %xmm15
andps %xmm15, %xmm6
orps %xmm4, %xmm6
ja 0x1d1eeb
mulss %xmm10, %xmm9
mulss %xmm10, %xmm8
mulss %xmm10, %xmm7
movaps 0x60(%rsp), %xmm2
mulss %xmm2, %xmm9
mulss %xmm1, %xmm14
subss %xmm9, %xmm14
mulss %xmm2, %xmm8
movaps 0x80(%rsp), %xmm0
mulss %xmm1, %xmm0
subss %xmm8, %xmm0
mulss %xmm2, %xmm7
movaps 0x70(%rsp), %xmm2
mulss %xmm1, %xmm2
subss %xmm7, %xmm2
movaps %xmm14, %xmm5
jmp 0x1d1efa
mulss %xmm12, %xmm5
mulss %xmm12, %xmm0
mulss %xmm12, %xmm2
movaps 0xa0(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps 0x150(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps 0x50(%rsp), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movaps 0x10(%rsp), %xmm14
mulps %xmm13, %xmm14
addps %xmm3, %xmm14
movaps 0x130(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps 0x40(%rsp), %xmm15
mulps %xmm13, %xmm15
addps %xmm3, %xmm15
movaps 0x140(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps (%rsp), %xmm4
mulps %xmm13, %xmm4
addps %xmm3, %xmm4
movaps %xmm4, (%rsp)
mulps 0x120(%rsp), %xmm1
mulps 0x30(%rsp), %xmm13
addps %xmm1, %xmm13
movaps %xmm5, %xmm1
mulss %xmm5, %xmm1
movaps %xmm6, %xmm7
mulss %xmm6, %xmm7
movaps %xmm7, %xmm3
addss %xmm1, %xmm3
movaps %xmm0, %xmm10
movaps 0x1d1a740(%rip), %xmm11 # 0x1eec6d0
xorps %xmm11, %xmm10
mulss %xmm0, %xmm10
addss %xmm10, %xmm3
xorps %xmm2, %xmm11
mulss %xmm2, %xmm11
addss %xmm11, %xmm3
movaps %xmm6, %xmm9
mulss %xmm2, %xmm9
movaps %xmm5, %xmm4
mulss %xmm0, %xmm4
movaps %xmm4, %xmm8
addss %xmm9, %xmm8
subss %xmm9, %xmm4
movaps %xmm5, %xmm12
mulss %xmm2, %xmm12
subss %xmm1, %xmm7
movaps %xmm0, %xmm9
mulss %xmm0, %xmm9
addss %xmm7, %xmm9
addss %xmm11, %xmm9
movaps %xmm6, %xmm1
mulss %xmm0, %xmm1
mulss %xmm5, %xmm6
movaps %xmm12, %xmm11
subss %xmm1, %xmm11
mulss %xmm2, %xmm0
addss %xmm12, %xmm1
movaps %xmm0, %xmm5
addss %xmm6, %xmm5
subss %xmm6, %xmm0
addss %xmm8, %xmm8
addss %xmm11, %xmm11
addss %xmm10, %xmm7
mulss %xmm2, %xmm2
addss %xmm7, %xmm2
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps 0x1d1a6c2(%rip), %xmm6 # 0x1eec700
mulps %xmm6, %xmm11
movsd 0x1d1a6a5(%rip), %xmm10 # 0x1eec6f0
mulps %xmm10, %xmm8
addps %xmm11, %xmm8
movss 0x1d1a6b8(%rip), %xmm11 # 0x1eec714
mulps %xmm11, %xmm3
addps %xmm8, %xmm3
addss %xmm5, %xmm5
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm6, %xmm5
mulps %xmm10, %xmm9
addps %xmm5, %xmm9
addss %xmm4, %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm11, %xmm4
addps %xmm9, %xmm4
xorps %xmm7, %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm6, %xmm2
movaps %xmm14, %xmm6
shufps $0xe9, %xmm7, %xmm6 # xmm6 = xmm6[1,2],xmm7[2,3]
blendps $0x4, %xmm15, %xmm6 # xmm6 = xmm6[0,1],xmm15[2],xmm6[3]
addss %xmm1, %xmm1
addss %xmm0, %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm10, %xmm0
addps %xmm2, %xmm0
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
addps %xmm7, %xmm6
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps (%rsp), %xmm5
movaps %xmm5, %xmm0
movaps %xmm5, %xmm2
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm1, %xmm5
movaps %xmm5, %xmm10
movaps %xmm13, %xmm5
movaps %xmm13, %xmm8
shufps $0xaa, %xmm13, %xmm13 # xmm13 = xmm13[2,2,2,2]
mulps %xmm1, %xmm13
movaps %xmm1, %xmm9
mulps %xmm7, %xmm9
mulps %xmm4, %xmm7
addps %xmm9, %xmm7
mulps %xmm3, %xmm14
addps %xmm7, %xmm14
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm15 # xmm15 = xmm15[1,1,1,1]
mulps %xmm4, %xmm15
addps %xmm9, %xmm15
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm3, %xmm1
addps %xmm15, %xmm1
shufps $0x55, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
mulps %xmm4, %xmm2
addps %xmm10, %xmm2
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm3, %xmm0
addps %xmm2, %xmm0
shufps $0x55, %xmm8, %xmm8 # xmm8 = xmm8[1,1,1,1]
mulps %xmm4, %xmm8
addps %xmm13, %xmm8
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm3, %xmm5
addps %xmm8, %xmm5
addps %xmm6, %xmm5
jmp 0x1d0fa5
nop
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersector1MB::occluded(embree::sse42::InstanceArrayIntersector1MB::Precalculations const&, embree::RayK<1>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
bool InstanceArrayIntersector1MB::occluded(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return false;
#endif
RTCRayQueryContext* user_context = context->user;
bool occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_, ray.time());
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.occluded((RTCRay&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %rdx, %r14
movq %rsi, %rbx
movq (%rdx), %rdx
movl (%rcx), %eax
movl 0x4(%rcx), %esi
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rsi,8), %rdx
movq 0x58(%rdx), %r15
testq %r15, %r15
jne 0x1d21b8
movq 0x90(%rdx), %rdi
movq 0xa0(%rdx), %r8
imulq %rax, %r8
movl (%rdi,%r8), %edi
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
cmpq %r8, %rdi
je 0x1d21b5
movq 0x60(%rdx), %r8
movq (%r8,%rdi,8), %r15
jmp 0x1d21b8
xorl %r15d, %r15d
testq %r15, %r15
je 0x1d2257
movl 0x34(%rdx), %edi
testl %edi, 0x24(%rbx)
je 0x1d2257
movq 0x8(%r14), %r12
cmpl $-0x1, (%r12)
jne 0x1d2257
movl %esi, (%r12)
movl %eax, 0x4(%r12)
movl (%rcx), %eax
cmpl $0x1, 0x24(%rdx)
jne 0x1d225e
movzbl 0x3d(%rdx), %esi
shll $0x8, %esi
movq 0x88(%rdx), %rcx
movl 0x20(%rcx), %edx
cmpl $0x100, %esi # imm = 0x100
je 0x1d2a6d
cmpl $0x9134, %edx # imm = 0x9134
je 0x1d25e2
cmpl $0x9234, %edx # imm = 0x9234
je 0x1d2326
cmpl $0xb001, %edx # imm = 0xB001
je 0x1d23e7
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1d2908
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movaps (%rdx,%rax), %xmm14
movaps 0x10(%rdx,%rax), %xmm1
movaps 0x20(%rdx,%rax), %xmm0
movaps 0x30(%rdx,%rax), %xmm5
jmp 0x1d2908
xorl %eax, %eax
jmp 0x1d2a5e
movss 0x1c(%rbx), %xmm15
movss 0x28(%rdx), %xmm0
movss 0x2c(%rdx), %xmm1
movss 0x30(%rdx), %xmm2
subss %xmm1, %xmm15
subss %xmm1, %xmm2
divss %xmm2, %xmm15
mulss %xmm0, %xmm15
roundss $0x9, %xmm15, %xmm1
addss 0x1d1e737(%rip), %xmm0 # 0x1ef09cc
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm1, %xmm0
cvttss2si %xmm0, %ecx
subss %xmm0, %xmm15
movzbl 0x3d(%rdx), %r8d
shll $0x8, %r8d
movq 0x88(%rdx), %rdx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rdi
movl 0x20(%rdx,%rsi), %esi
cmpl $0x100, %r8d # imm = 0x100
je 0x1d2abe
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d2649
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d2385
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d24e3
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d26ae
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movaps (%rsi,%rdi), %xmm14
movaps 0x10(%rsi,%rdi), %xmm1
movaps 0x20(%rsi,%rdi), %xmm0
movaps 0x30(%rsi,%rdi), %xmm2
jmp 0x1d26ae
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x4(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm14
shufps $0x4c, %xmm0, %xmm14 # xmm14 = xmm14[0,3],xmm0[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x1c(%rdx,%rax), %xmm2
movss 0x18(%rdx,%rax), %xmm0
shufps $0x4c, %xmm2, %xmm0 # xmm0 = xmm0[0,3],xmm2[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x28(%rdx,%rax), %xmm2
movss 0x24(%rdx,%rax), %xmm5
shufps $0x4c, %xmm2, %xmm5 # xmm5 = xmm5[0,3],xmm2[0,1]
shufps $0x78, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x1d2908
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x4(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm14
shufps $0x4c, %xmm0, %xmm14 # xmm14 = xmm14[0,3],xmm0[0,1]
shufps $0x78, %xmm14, %xmm14 # xmm14 = xmm14[0,2,3,1]
movsd 0x10(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movsd 0x1c(%rsi,%rdi), %xmm2
movss 0x18(%rsi,%rdi), %xmm0
shufps $0x4c, %xmm2, %xmm0 # xmm0 = xmm0[0,3],xmm2[0,1]
shufps $0x78, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
movsd 0x28(%rsi,%rdi), %xmm3
movss 0x24(%rsi,%rdi), %xmm2
shufps $0x4c, %xmm3, %xmm2 # xmm2 = xmm2[0,3],xmm3[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1d26ae
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x10(%rdx,%rax), %xmm0
insertps $0x20, 0x8(%rdx,%rax), %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
movsd 0x34(%rdx,%rax), %xmm2
movss (%rdx,%rax), %xmm14
movss 0xc(%rdx,%rax), %xmm1
movlhps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm14 # xmm14 = xmm14[0,2],xmm2[1,3]
movss 0x18(%rdx,%rax), %xmm5
movsd 0x1c(%rdx,%rax), %xmm2
movlhps %xmm2, %xmm5 # xmm5 = xmm5[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm5 # xmm5 = xmm5[0,2],xmm2[1,3]
movss 0x24(%rdx,%rax), %xmm6
movss 0x28(%rdx,%rax), %xmm4
movss 0x2c(%rdx,%rax), %xmm2
movss 0x30(%rdx,%rax), %xmm3
movaps %xmm4, %xmm7
mulss %xmm4, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm2, %xmm7
mulss %xmm2, %xmm7
addss %xmm8, %xmm7
movaps %xmm3, %xmm8
mulss %xmm3, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d1a294(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d1a28a(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm5 # xmm5 = xmm5[0,1,2],xmm6[0]
mulss %xmm7, %xmm4
insertps $0x30, %xmm4, %xmm14 # xmm14 = xmm14[0,1,2],xmm4[0]
mulss %xmm7, %xmm2
mulss %xmm3, %xmm7
insertps $0x10, 0x4(%rdx,%rax), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
insertps $0x30, %xmm7, %xmm0 # xmm0 = xmm0[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rdx,%rax), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
insertps $0x30, %xmm2, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0]
jmp 0x1d2908
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x10(%rsi,%rdi), %xmm0
insertps $0x20, 0x8(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
movsd 0x34(%rsi,%rdi), %xmm2
movss (%rsi,%rdi), %xmm14
movss 0xc(%rsi,%rdi), %xmm1
movlhps %xmm2, %xmm14 # xmm14 = xmm14[0],xmm2[0]
shufps $0xd8, %xmm2, %xmm14 # xmm14 = xmm14[0,2],xmm2[1,3]
movss 0x18(%rsi,%rdi), %xmm2
movsd 0x1c(%rsi,%rdi), %xmm3
movlhps %xmm3, %xmm2 # xmm2 = xmm2[0],xmm3[0]
shufps $0xd8, %xmm3, %xmm2 # xmm2 = xmm2[0,2],xmm3[1,3]
movss 0x24(%rsi,%rdi), %xmm6
movss 0x28(%rsi,%rdi), %xmm5
movss 0x2c(%rsi,%rdi), %xmm3
movss 0x30(%rsi,%rdi), %xmm4
movaps %xmm5, %xmm7
mulss %xmm5, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm3, %xmm7
mulss %xmm3, %xmm7
addss %xmm8, %xmm7
movaps %xmm4, %xmm8
mulss %xmm4, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d1a195(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d1a18b(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm2 # xmm2 = xmm2[0,1,2],xmm6[0]
mulss %xmm7, %xmm5
insertps $0x30, %xmm5, %xmm14 # xmm14 = xmm14[0,1,2],xmm5[0]
mulss %xmm7, %xmm3
mulss %xmm4, %xmm7
insertps $0x10, 0x4(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
insertps $0x30, %xmm7, %xmm0 # xmm0 = xmm0[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
insertps $0x30, %xmm3, %xmm1 # xmm1 = xmm1[0,1,2],xmm3[0]
jmp 0x1d26ae
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movss (%rdx,%rax), %xmm14
movss 0x4(%rdx,%rax), %xmm1
movss 0x8(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm5
insertps $0x1c, 0x10(%rdx,%rax), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdx,%rax), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdx,%rax), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdx,%rax), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdx,%rax), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdx,%rax), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdx,%rax), %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdx,%rax), %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
jmp 0x1d2908
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movss (%rsi,%rdi), %xmm14
movss 0x4(%rsi,%rdi), %xmm1
movss 0x8(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm2
insertps $0x1c, 0x10(%rsi,%rdi), %xmm14 # xmm14 = xmm14[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm14 # xmm14 = xmm14[0,1],mem[0],zero
insertps $0x1c, 0x14(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
insertps $0x1c, 0x18(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x1d2868
cmpl $0x9234, %ecx # imm = 0x9234
je 0x1d2708
cmpl $0xb001, %ecx # imm = 0xB001
je 0x1d2764
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x1d28c7
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movaps (%rcx,%rax), %xmm4
movaps 0x10(%rcx,%rax), %xmm6
movaps 0x20(%rcx,%rax), %xmm3
movaps 0x30(%rcx,%rax), %xmm7
jmp 0x1d28c7
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x4(%rcx,%rax), %xmm3
movss (%rcx,%rax), %xmm4
shufps $0x4c, %xmm3, %xmm4 # xmm4 = xmm4[0,3],xmm3[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x10(%rcx,%rax), %xmm3
movss 0xc(%rcx,%rax), %xmm6
shufps $0x4c, %xmm3, %xmm6 # xmm6 = xmm6[0,3],xmm3[0,1]
shufps $0x78, %xmm6, %xmm6 # xmm6 = xmm6[0,2,3,1]
movsd 0x1c(%rcx,%rax), %xmm5
movss 0x18(%rcx,%rax), %xmm3
shufps $0x4c, %xmm5, %xmm3 # xmm3 = xmm3[0,3],xmm5[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x28(%rcx,%rax), %xmm5
movss 0x24(%rcx,%rax), %xmm7
shufps $0x4c, %xmm5, %xmm7 # xmm7 = xmm7[0,3],xmm5[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
jmp 0x1d28c7
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x10(%rcx,%rax), %xmm3
insertps $0x20, 0x8(%rcx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
movsd 0x34(%rcx,%rax), %xmm5
movss (%rcx,%rax), %xmm4
movss 0xc(%rcx,%rax), %xmm6
movlhps %xmm5, %xmm4 # xmm4 = xmm4[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm4 # xmm4 = xmm4[0,2],xmm5[1,3]
movss 0x18(%rcx,%rax), %xmm7
movsd 0x1c(%rcx,%rax), %xmm5
movlhps %xmm5, %xmm7 # xmm7 = xmm7[0],xmm5[0]
shufps $0xd8, %xmm5, %xmm7 # xmm7 = xmm7[0,2],xmm5[1,3]
movss 0x24(%rcx,%rax), %xmm10
movss 0x28(%rcx,%rax), %xmm9
movss 0x2c(%rcx,%rax), %xmm5
movss 0x30(%rcx,%rax), %xmm8
movaps %xmm9, %xmm11
mulss %xmm9, %xmm11
movaps %xmm10, %xmm12
mulss %xmm10, %xmm12
addss %xmm11, %xmm12
movaps %xmm5, %xmm11
mulss %xmm5, %xmm11
addss %xmm12, %xmm11
movaps %xmm8, %xmm12
mulss %xmm8, %xmm12
addss %xmm11, %xmm12
movaps %xmm12, %xmm11
rsqrtss %xmm12, %xmm11
movss 0x1d19f13(%rip), %xmm13 # 0x1eec718
mulss %xmm11, %xmm13
mulss 0x1d19f09(%rip), %xmm12 # 0x1eec71c
mulss %xmm11, %xmm12
mulss %xmm11, %xmm11
mulss %xmm12, %xmm11
addss %xmm13, %xmm11
mulss %xmm11, %xmm10
insertps $0x30, %xmm10, %xmm7 # xmm7 = xmm7[0,1,2],xmm10[0]
mulss %xmm11, %xmm9
insertps $0x30, %xmm9, %xmm4 # xmm4 = xmm4[0,1,2],xmm9[0]
mulss %xmm11, %xmm5
mulss %xmm8, %xmm11
insertps $0x10, 0x4(%rcx,%rax), %xmm6 # xmm6 = xmm6[0],mem[0],xmm6[2,3]
insertps $0x30, %xmm11, %xmm3 # xmm3 = xmm3[0,1,2],xmm11[0]
insertps $0x20, 0x3c(%rcx,%rax), %xmm6 # xmm6 = xmm6[0,1],mem[0],xmm6[3]
insertps $0x30, %xmm5, %xmm6 # xmm6 = xmm6[0,1,2],xmm5[0]
jmp 0x1d28c7
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movss (%rcx,%rax), %xmm4
movss 0x4(%rcx,%rax), %xmm6
movss 0x8(%rcx,%rax), %xmm3
movss 0xc(%rcx,%rax), %xmm7
insertps $0x1c, 0x10(%rcx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x20(%rcx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x14(%rcx,%rax), %xmm6 # xmm6 = xmm6[0],mem[0],zero,zero
insertps $0x28, 0x24(%rcx,%rax), %xmm6 # xmm6 = xmm6[0,1],mem[0],zero
insertps $0x1c, 0x18(%rcx,%rax), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x28(%rcx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rcx,%rax), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rcx,%rax), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
movss 0x1d19e45(%rip), %xmm5 # 0x1eec714
subss %xmm15, %xmm5
shufps $0x0, %xmm15, %xmm15 # xmm15 = xmm15[0,0,0,0]
mulps %xmm15, %xmm4
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm5, %xmm14
addps %xmm4, %xmm14
mulps %xmm15, %xmm6
mulps %xmm5, %xmm1
addps %xmm6, %xmm1
mulps %xmm15, %xmm3
mulps %xmm5, %xmm0
addps %xmm3, %xmm0
mulps %xmm7, %xmm15
mulps %xmm2, %xmm5
addps %xmm15, %xmm5
movaps %xmm0, %xmm4
shufps $0xc9, %xmm0, %xmm4 # xmm4 = xmm4[1,2],xmm0[0,3]
movaps %xmm1, %xmm6
shufps $0xc9, %xmm1, %xmm6 # xmm6 = xmm6[1,2],xmm1[0,3]
movaps %xmm0, %xmm3
mulps %xmm6, %xmm3
movaps %xmm1, %xmm2
mulps %xmm4, %xmm2
subps %xmm3, %xmm2
movaps %xmm2, %xmm3
shufps $0xc9, %xmm2, %xmm3 # xmm3 = xmm3[1,2],xmm2[0,3]
movaps %xmm14, %xmm7
shufps $0xc9, %xmm14, %xmm7 # xmm7 = xmm7[1,2],xmm14[0,3]
mulps %xmm14, %xmm4
mulps %xmm7, %xmm0
subps %xmm4, %xmm0
mulps %xmm1, %xmm7
mulps %xmm14, %xmm6
subps %xmm7, %xmm6
unpcklps %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
shufps $0xc9, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
dpps $0x7f, %xmm3, %xmm14
unpcklps %xmm6, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
xorps %xmm1, %xmm1
movss %xmm0, %xmm1 # xmm1 = xmm0[0],xmm1[1,2,3]
insertps $0x4a, %xmm0, %xmm0 # xmm0 = xmm0[1],zero,xmm0[2],zero
unpcklps %xmm1, %xmm2 # xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
movaps %xmm3, %xmm1
unpcklps %xmm0, %xmm1 # xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
unpckhps %xmm0, %xmm3 # xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
divps %xmm14, %xmm1
divps %xmm14, %xmm3
divps %xmm14, %xmm2
movaps %xmm5, %xmm0
shufps $0x0, %xmm5, %xmm0 # xmm0 = xmm0[0,0],xmm5[0,0]
movaps %xmm5, %xmm4
shufps $0x55, %xmm5, %xmm4 # xmm4 = xmm4[1,1],xmm5[1,1]
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm2, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
mulps %xmm1, %xmm0
addps %xmm4, %xmm0
movaps (%rbx), %xmm6
movaps %xmm6, 0x10(%rsp)
movaps 0x10(%rbx), %xmm8
movaps %xmm6, %xmm4
shufps $0x0, %xmm6, %xmm4 # xmm4 = xmm4[0,0],xmm6[0,0]
movaps %xmm6, %xmm5
shufps $0x55, %xmm6, %xmm5 # xmm5 = xmm5[1,1],xmm6[1,1]
shufps $0xaa, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
mulps %xmm2, %xmm6
subps %xmm0, %xmm6
mulps %xmm3, %xmm5
addps %xmm6, %xmm5
mulps %xmm1, %xmm4
addps %xmm5, %xmm4
insertps $0x30, 0xc(%rbx), %xmm4 # xmm4 = xmm4[0,1,2],mem[0]
movaps %xmm4, (%rbx)
movaps %xmm8, %xmm5
movaps %xmm8, (%rsp)
movaps %xmm8, %xmm0
shufps $0x0, %xmm8, %xmm0 # xmm0 = xmm0[0,0],xmm8[0,0]
movaps %xmm8, %xmm4
shufps $0x55, %xmm8, %xmm4 # xmm4 = xmm4[1,1],xmm8[1,1]
shufps $0xaa, %xmm8, %xmm5 # xmm5 = xmm5[2,2],xmm8[2,2]
mulps %xmm2, %xmm5
mulps %xmm3, %xmm4
addps %xmm5, %xmm4
mulps %xmm1, %xmm0
addps %xmm4, %xmm0
insertps $0x30, 0x1c(%rbx), %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
movaps %xmm0, 0x10(%rbx)
movq 0x10(%r14), %rax
leaq 0x160(%rsp), %rdx
movq %r15, (%rdx)
movq %r12, 0x8(%rdx)
movq %rax, 0x10(%rdx)
leaq 0x58(%r15), %rdi
movq %rbx, %rsi
callq *0x80(%r15)
movaps 0x10(%rsp), %xmm0
movaps %xmm0, (%rbx)
movaps (%rsp), %xmm0
movaps %xmm0, 0x10(%rbx)
xorps %xmm0, %xmm0
ucomiss 0x20(%rbx), %xmm0
seta %al
orq $-0x1, (%r12)
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r14
popq %r15
retq
cmpl $0x9134, %edx # imm = 0x9134
je 0x1d2e07
cmpl $0x9234, %edx # imm = 0x9234
je 0x1d2b2d
cmpl $0xb001, %edx # imm = 0xB001
je 0x1d2bfe
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1d2e69
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movaps (%rdx,%rax), %xmm10
movaps 0x10(%rdx,%rax), %xmm4
movaps 0x20(%rdx,%rax), %xmm3
movaps 0x30(%rdx,%rax), %xmm2
jmp 0x1d2e69
movaps %xmm15, 0xa0(%rsp)
cmpl $0x9134, %esi # imm = 0x9134
je 0x1d3089
cmpl $0x9234, %esi # imm = 0x9234
je 0x1d2b8c
cmpl $0xb001, %esi # imm = 0xB001
je 0x1d2cfa
cmpl $0x9244, %esi # imm = 0x9244
jne 0x1d30fe
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movaps (%rsi,%rdi), %xmm0
movaps %xmm0, 0x10(%rsp)
movaps 0x10(%rsi,%rdi), %xmm0
movaps %xmm0, 0x40(%rsp)
movaps 0x20(%rsi,%rdi), %xmm0
movaps %xmm0, (%rsp)
movaps 0x30(%rsi,%rdi), %xmm0
movaps %xmm0, 0x30(%rsp)
jmp 0x1d30fe
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x4(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
movsd 0x10(%rdx,%rax), %xmm0
movss 0xc(%rdx,%rax), %xmm4
shufps $0x4c, %xmm0, %xmm4 # xmm4 = xmm4[0,3],xmm0[0,1]
shufps $0x78, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
movsd 0x1c(%rdx,%rax), %xmm0
movss 0x18(%rdx,%rax), %xmm3
shufps $0x4c, %xmm0, %xmm3 # xmm3 = xmm3[0,3],xmm0[0,1]
shufps $0x78, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
movsd 0x28(%rdx,%rax), %xmm0
movss 0x24(%rdx,%rax), %xmm2
shufps $0x4c, %xmm0, %xmm2 # xmm2 = xmm2[0,3],xmm0[0,1]
shufps $0x78, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x1d2e69
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x4(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x10(%rsp)
movsd 0x10(%rsi,%rdi), %xmm0
movss 0xc(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x40(%rsp)
movsd 0x1c(%rsi,%rdi), %xmm0
movss 0x18(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, (%rsp)
movsd 0x28(%rsi,%rdi), %xmm0
movss 0x24(%rsi,%rdi), %xmm1
shufps $0x4c, %xmm0, %xmm1 # xmm1 = xmm1[0,3],xmm0[0,1]
shufps $0x78, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
movaps %xmm1, 0x30(%rsp)
jmp 0x1d30fe
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movsd 0x10(%rdx,%rax), %xmm3
insertps $0x20, 0x8(%rdx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
movsd 0x34(%rdx,%rax), %xmm0
movss (%rdx,%rax), %xmm10
movss 0xc(%rdx,%rax), %xmm4
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x18(%rdx,%rax), %xmm2
movsd 0x1c(%rdx,%rax), %xmm0
movlhps %xmm0, %xmm2 # xmm2 = xmm2[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm2 # xmm2 = xmm2[0,2],xmm0[1,3]
movss 0x24(%rdx,%rax), %xmm6
movss 0x28(%rdx,%rax), %xmm5
movss 0x2c(%rdx,%rax), %xmm0
movss 0x30(%rdx,%rax), %xmm1
movaps %xmm5, %xmm7
mulss %xmm5, %xmm7
movaps %xmm6, %xmm8
mulss %xmm6, %xmm8
addss %xmm7, %xmm8
movaps %xmm0, %xmm7
mulss %xmm0, %xmm7
addss %xmm8, %xmm7
movaps %xmm1, %xmm8
mulss %xmm1, %xmm8
addss %xmm7, %xmm8
movaps %xmm8, %xmm7
rsqrtss %xmm8, %xmm7
movss 0x1d19a7d(%rip), %xmm9 # 0x1eec718
mulss %xmm7, %xmm9
mulss 0x1d19a73(%rip), %xmm8 # 0x1eec71c
mulss %xmm7, %xmm8
mulss %xmm7, %xmm7
mulss %xmm8, %xmm7
addss %xmm9, %xmm7
mulss %xmm7, %xmm6
insertps $0x30, %xmm6, %xmm2 # xmm2 = xmm2[0,1,2],xmm6[0]
mulss %xmm7, %xmm5
insertps $0x30, %xmm5, %xmm10 # xmm10 = xmm10[0,1,2],xmm5[0]
mulss %xmm7, %xmm0
mulss %xmm1, %xmm7
insertps $0x10, 0x4(%rdx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],xmm4[2,3]
insertps $0x30, %xmm7, %xmm3 # xmm3 = xmm3[0,1,2],xmm7[0]
insertps $0x20, 0x3c(%rdx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],xmm4[3]
insertps $0x30, %xmm0, %xmm4 # xmm4 = xmm4[0,1,2],xmm0[0]
jmp 0x1d2e69
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movsd 0x10(%rsi,%rdi), %xmm9
insertps $0x20, 0x8(%rsi,%rdi), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
movsd 0x34(%rsi,%rdi), %xmm0
movss (%rsi,%rdi), %xmm7
movss 0xc(%rsi,%rdi), %xmm8
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%rsi,%rdi), %xmm10
movsd 0x1c(%rsi,%rdi), %xmm0
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x24(%rsi,%rdi), %xmm3
movss 0x28(%rsi,%rdi), %xmm2
movss 0x2c(%rsi,%rdi), %xmm0
movss 0x30(%rsi,%rdi), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d19985(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d1997d(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm10 # xmm10 = xmm10[0,1,2],xmm3[0]
movaps %xmm10, 0x30(%rsp)
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
movaps %xmm7, 0x10(%rsp)
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rsi,%rdi), %xmm8 # xmm8 = xmm8[0],mem[0],xmm8[2,3]
insertps $0x30, %xmm4, %xmm9 # xmm9 = xmm9[0,1,2],xmm4[0]
movaps %xmm9, (%rsp)
insertps $0x20, 0x3c(%rsi,%rdi), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
insertps $0x30, %xmm0, %xmm8 # xmm8 = xmm8[0,1,2],xmm0[0]
movaps %xmm8, 0x40(%rsp)
jmp 0x1d30fe
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
movss (%rdx,%rax), %xmm10
movss 0x4(%rdx,%rax), %xmm4
movss 0x8(%rdx,%rax), %xmm3
movss 0xc(%rdx,%rax), %xmm2
insertps $0x1c, 0x10(%rdx,%rax), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x20(%rdx,%rax), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
insertps $0x1c, 0x14(%rdx,%rax), %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
insertps $0x28, 0x24(%rdx,%rax), %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
insertps $0x1c, 0x18(%rdx,%rax), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x28(%rdx,%rax), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rdx,%rax), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rdx,%rax), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
movaps %xmm10, 0x10(%rsp)
movaps %xmm2, %xmm7
shufps $0xff, %xmm2, %xmm7 # xmm7 = xmm7[3,3],xmm2[3,3]
movaps %xmm10, %xmm8
shufps $0xff, %xmm10, %xmm8 # xmm8 = xmm8[3,3],xmm10[3,3]
movaps %xmm4, %xmm0
shufps $0xff, %xmm4, %xmm0 # xmm0 = xmm0[3,3],xmm4[3,3]
movaps %xmm3, %xmm5
shufps $0xff, %xmm3, %xmm5 # xmm5 = xmm5[3,3],xmm3[3,3]
movaps %xmm8, %xmm9
mulss %xmm8, %xmm9
movaps %xmm5, %xmm13
mulss %xmm7, %xmm13
movaps %xmm8, %xmm6
movaps %xmm8, %xmm11
movaps %xmm0, %xmm1
mulss %xmm7, %xmm1
mulss %xmm7, %xmm8
movaps %xmm7, %xmm12
mulss %xmm7, %xmm12
movaps %xmm12, %xmm7
addss %xmm9, %xmm7
movaps 0x1d19803(%rip), %xmm15 # 0x1eec6d0
mulss %xmm0, %xmm6
movaps %xmm6, %xmm10
addss %xmm13, %xmm10
subss %xmm13, %xmm6
movaps %xmm0, %xmm14
xorps %xmm15, %xmm14
mulss %xmm0, %xmm14
addss %xmm14, %xmm7
xorps %xmm5, %xmm15
mulss %xmm5, %xmm15
addss %xmm15, %xmm7
mulss %xmm5, %xmm11
subss %xmm9, %xmm12
movaps %xmm0, %xmm9
mulss %xmm0, %xmm9
addss %xmm12, %xmm9
addss %xmm15, %xmm9
movaps %xmm11, %xmm13
subss %xmm1, %xmm13
mulss %xmm5, %xmm0
addss %xmm11, %xmm1
movaps %xmm0, %xmm11
addss %xmm8, %xmm11
subss %xmm8, %xmm0
addss %xmm10, %xmm10
addss %xmm13, %xmm13
addss %xmm14, %xmm12
movaps 0x10(%rsp), %xmm14
mulss %xmm5, %xmm5
addss %xmm12, %xmm5
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movaps 0x1d19790(%rip), %xmm8 # 0x1eec700
mulps %xmm8, %xmm13
movsd 0x1d19773(%rip), %xmm12 # 0x1eec6f0
mulps %xmm12, %xmm10
addps %xmm13, %xmm10
movss 0x1d19786(%rip), %xmm13 # 0x1eec714
mulps %xmm13, %xmm7
addps %xmm10, %xmm7
addss %xmm11, %xmm11
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm8, %xmm11
mulps %xmm12, %xmm9
addps %xmm11, %xmm9
addss %xmm6, %xmm6
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm13, %xmm6
addps %xmm9, %xmm6
xorps %xmm9, %xmm9
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm8, %xmm5
movaps %xmm14, %xmm8
shufps $0xe9, %xmm9, %xmm8 # xmm8 = xmm8[1,2],xmm9[2,3]
blendps $0x4, %xmm4, %xmm8 # xmm8 = xmm8[0,1],xmm4[2],xmm8[3]
addss %xmm1, %xmm1
addss %xmm0, %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm12, %xmm0
addps %xmm5, %xmm0
mulps %xmm13, %xmm1
addps %xmm0, %xmm1
addps %xmm9, %xmm8
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps %xmm3, %xmm0
movaps %xmm3, %xmm10
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
mulps %xmm1, %xmm3
movaps %xmm2, %xmm5
movaps %xmm2, %xmm11
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
mulps %xmm1, %xmm2
movaps %xmm1, %xmm12
mulps %xmm9, %xmm12
mulps %xmm6, %xmm9
addps %xmm12, %xmm9
mulps %xmm7, %xmm14
addps %xmm9, %xmm14
movaps %xmm4, %xmm1
shufps $0x55, %xmm4, %xmm4 # xmm4 = xmm4[1,1,1,1]
mulps %xmm6, %xmm4
addps %xmm12, %xmm4
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm7, %xmm1
addps %xmm4, %xmm1
shufps $0x55, %xmm10, %xmm10 # xmm10 = xmm10[1,1,1,1]
mulps %xmm6, %xmm10
addps %xmm3, %xmm10
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm7, %xmm0
addps %xmm10, %xmm0
shufps $0x55, %xmm11, %xmm11 # xmm11 = xmm11[1,1,1,1]
mulps %xmm6, %xmm11
addps %xmm2, %xmm11
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm7, %xmm5
addps %xmm11, %xmm5
addps %xmm8, %xmm5
jmp 0x1d2908
movq (%rdi), %rsi
movq 0x10(%rdi), %rdi
imulq %rax, %rdi
movss (%rsi,%rdi), %xmm0
movss 0x4(%rsi,%rdi), %xmm1
movss 0x8(%rsi,%rdi), %xmm2
movss 0xc(%rsi,%rdi), %xmm3
insertps $0x1c, 0x10(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
insertps $0x28, 0x20(%rsi,%rdi), %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
movaps %xmm0, 0x10(%rsp)
insertps $0x1c, 0x14(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
insertps $0x28, 0x24(%rsi,%rdi), %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
movaps %xmm1, 0x40(%rsp)
insertps $0x1c, 0x18(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
insertps $0x28, 0x28(%rsi,%rdi), %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
movaps %xmm2, (%rsp)
insertps $0x1c, 0x1c(%rsi,%rdi), %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rsi,%rdi), %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
movaps %xmm3, 0x30(%rsp)
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x1d32b1
cmpl $0x9234, %ecx # imm = 0x9234
je 0x1d315b
cmpl $0xb001, %ecx # imm = 0xB001
je 0x1d31c0
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x1d3319
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movaps (%rcx,%rax), %xmm7
movaps 0x10(%rcx,%rax), %xmm9
movaps 0x20(%rcx,%rax), %xmm8
movaps 0x30(%rcx,%rax), %xmm10
jmp 0x1d3319
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x4(%rcx,%rax), %xmm0
movss (%rcx,%rax), %xmm7
shufps $0x4c, %xmm0, %xmm7 # xmm7 = xmm7[0,3],xmm0[0,1]
shufps $0x78, %xmm7, %xmm7 # xmm7 = xmm7[0,2,3,1]
movsd 0x10(%rcx,%rax), %xmm0
movss 0xc(%rcx,%rax), %xmm9
shufps $0x4c, %xmm0, %xmm9 # xmm9 = xmm9[0,3],xmm0[0,1]
shufps $0x78, %xmm9, %xmm9 # xmm9 = xmm9[0,2,3,1]
movsd 0x1c(%rcx,%rax), %xmm0
movss 0x18(%rcx,%rax), %xmm8
shufps $0x4c, %xmm0, %xmm8 # xmm8 = xmm8[0,3],xmm0[0,1]
shufps $0x78, %xmm8, %xmm8 # xmm8 = xmm8[0,2,3,1]
movsd 0x28(%rcx,%rax), %xmm0
movss 0x24(%rcx,%rax), %xmm10
shufps $0x4c, %xmm0, %xmm10 # xmm10 = xmm10[0,3],xmm0[0,1]
shufps $0x78, %xmm10, %xmm10 # xmm10 = xmm10[0,2,3,1]
jmp 0x1d3319
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movsd 0x10(%rcx,%rax), %xmm8
insertps $0x20, 0x8(%rcx,%rax), %xmm8 # xmm8 = xmm8[0,1],mem[0],xmm8[3]
movsd 0x34(%rcx,%rax), %xmm0
movss (%rcx,%rax), %xmm7
movss 0xc(%rcx,%rax), %xmm9
movlhps %xmm0, %xmm7 # xmm7 = xmm7[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm7 # xmm7 = xmm7[0,2],xmm0[1,3]
movss 0x18(%rcx,%rax), %xmm10
movsd 0x1c(%rcx,%rax), %xmm0
movlhps %xmm0, %xmm10 # xmm10 = xmm10[0],xmm0[0]
shufps $0xd8, %xmm0, %xmm10 # xmm10 = xmm10[0,2],xmm0[1,3]
movss 0x24(%rcx,%rax), %xmm3
movss 0x28(%rcx,%rax), %xmm2
movss 0x2c(%rcx,%rax), %xmm0
movss 0x30(%rcx,%rax), %xmm1
movaps %xmm2, %xmm4
mulss %xmm2, %xmm4
movaps %xmm3, %xmm5
mulss %xmm3, %xmm5
addss %xmm4, %xmm5
movaps %xmm0, %xmm4
mulss %xmm0, %xmm4
addss %xmm5, %xmm4
movaps %xmm1, %xmm5
mulss %xmm1, %xmm5
addss %xmm4, %xmm5
movaps %xmm5, %xmm4
rsqrtss %xmm5, %xmm4
movss 0x1d194c2(%rip), %xmm6 # 0x1eec718
mulss %xmm4, %xmm6
mulss 0x1d194ba(%rip), %xmm5 # 0x1eec71c
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm5, %xmm4
addss %xmm6, %xmm4
mulss %xmm4, %xmm3
insertps $0x30, %xmm3, %xmm10 # xmm10 = xmm10[0,1,2],xmm3[0]
mulss %xmm4, %xmm2
insertps $0x30, %xmm2, %xmm7 # xmm7 = xmm7[0,1,2],xmm2[0]
mulss %xmm4, %xmm0
mulss %xmm1, %xmm4
insertps $0x10, 0x4(%rcx,%rax), %xmm9 # xmm9 = xmm9[0],mem[0],xmm9[2,3]
insertps $0x30, %xmm4, %xmm8 # xmm8 = xmm8[0,1,2],xmm4[0]
insertps $0x20, 0x3c(%rcx,%rax), %xmm9 # xmm9 = xmm9[0,1],mem[0],xmm9[3]
insertps $0x30, %xmm0, %xmm9 # xmm9 = xmm9[0,1,2],xmm0[0]
jmp 0x1d3319
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
movss (%rcx,%rax), %xmm7
movss 0x4(%rcx,%rax), %xmm9
movss 0x8(%rcx,%rax), %xmm8
movss 0xc(%rcx,%rax), %xmm10
insertps $0x1c, 0x10(%rcx,%rax), %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
insertps $0x28, 0x20(%rcx,%rax), %xmm7 # xmm7 = xmm7[0,1],mem[0],zero
insertps $0x1c, 0x14(%rcx,%rax), %xmm9 # xmm9 = xmm9[0],mem[0],zero,zero
insertps $0x28, 0x24(%rcx,%rax), %xmm9 # xmm9 = xmm9[0,1],mem[0],zero
insertps $0x1c, 0x18(%rcx,%rax), %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
insertps $0x28, 0x28(%rcx,%rax), %xmm8 # xmm8 = xmm8[0,1],mem[0],zero
insertps $0x1c, 0x1c(%rcx,%rax), %xmm10 # xmm10 = xmm10[0],mem[0],zero,zero
insertps $0x28, 0x2c(%rcx,%rax), %xmm10 # xmm10 = xmm10[0,1],mem[0],zero
movaps 0x30(%rsp), %xmm2
shufps $0xff, %xmm2, %xmm2 # xmm2 = xmm2[3,3,3,3]
movaps 0x10(%rsp), %xmm1
shufps $0xff, %xmm1, %xmm1 # xmm1 = xmm1[3,3,3,3]
movaps 0x40(%rsp), %xmm3
shufps $0xff, %xmm3, %xmm3 # xmm3 = xmm3[3,3,3,3]
movaps (%rsp), %xmm4
shufps $0xff, %xmm4, %xmm4 # xmm4 = xmm4[3,3,3,3]
movaps %xmm10, 0x120(%rsp)
shufps $0xff, %xmm10, %xmm10 # xmm10 = xmm10[3,3,3,3]
movaps %xmm7, 0x150(%rsp)
shufps $0xff, %xmm7, %xmm7 # xmm7 = xmm7[3,3,3,3]
movaps %xmm9, 0x130(%rsp)
shufps $0xff, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3]
movaps %xmm8, 0x140(%rsp)
movaps %xmm8, %xmm0
shufps $0xff, %xmm8, %xmm0 # xmm0 = xmm0[3,3],xmm8[3,3]
movaps %xmm1, 0xe0(%rsp)
mulss %xmm7, %xmm1
movaps %xmm2, 0x110(%rsp)
mulss %xmm10, %xmm2
addss %xmm1, %xmm2
movaps %xmm3, 0x80(%rsp)
movaps %xmm3, %xmm1
movaps %xmm9, 0x60(%rsp)
mulss %xmm9, %xmm1
addss %xmm2, %xmm1
movaps %xmm4, 0x70(%rsp)
mulss %xmm0, %xmm4
addss %xmm1, %xmm4
movaps 0x1d1930f(%rip), %xmm1 # 0x1eec6d0
movaps %xmm4, %xmm3
xorps %xmm1, %xmm3
movaps %xmm3, %xmm2
movaps %xmm4, %xmm5
ucomiss %xmm4, %xmm3
movaps %xmm0, %xmm3
xorps %xmm1, %xmm3
movaps %xmm3, 0xd0(%rsp)
ja 0x1d33eb
movaps %xmm10, 0xb0(%rsp)
jmp 0x1d3402
movaps 0x1d192de(%rip), %xmm1 # 0x1eec6d0
xorps %xmm1, %xmm10
movaps %xmm10, 0xb0(%rsp)
xorps %xmm1, %xmm7
movaps %xmm7, 0xc0(%rsp)
ja 0x1d3414
movaps %xmm0, 0xd0(%rsp)
movaps %xmm5, %xmm1
movaps %xmm5, %xmm0
cmpltss %xmm2, %xmm0
movaps %xmm0, 0x100(%rsp)
maxss %xmm5, %xmm2
movaps %xmm2, 0x90(%rsp)
andps 0x1d19286(%rip), %xmm1 # 0x1eec6c0
movss 0x1d1d53e(%rip), %xmm2 # 0x1ef0980
mulss %xmm1, %xmm2
addss 0x1d1d536(%rip), %xmm2 # 0x1ef0984
movaps 0x60(%rsp), %xmm0
mulss %xmm1, %xmm2
addss 0x1d1d529(%rip), %xmm2 # 0x1ef0988
xorps 0x1d1926a(%rip), %xmm0 # 0x1eec6d0
movaps %xmm0, 0x50(%rsp)
mulss %xmm1, %xmm2
addss 0x1d1d515(%rip), %xmm2 # 0x1ef098c
mulss %xmm1, %xmm2
addss 0x1d1d50d(%rip), %xmm2 # 0x1ef0990
mulss %xmm1, %xmm2
addss 0x1d1d505(%rip), %xmm2 # 0x1ef0994
movss %xmm2, 0x2c(%rsp)
movss 0x1d19277(%rip), %xmm0 # 0x1eec714
movaps %xmm1, 0xf0(%rsp)
subss %xmm1, %xmm0
xorps %xmm1, %xmm1
ucomiss %xmm1, %xmm0
jb 0x1d34b7
sqrtss %xmm0, %xmm0
jmp 0x1d34bc
callq 0x6aa20
mulss 0x2c(%rsp), %xmm0
movss 0x1d1d4ce(%rip), %xmm1 # 0x1ef0998
movaps %xmm1, %xmm2
subss %xmm0, %xmm2
movaps 0x90(%rsp), %xmm0
xorps %xmm3, %xmm3
cmpltss %xmm3, %xmm0
maxss %xmm2, %xmm3
movaps %xmm0, %xmm2
andnps %xmm3, %xmm2
xorps 0x1d191de(%rip), %xmm3 # 0x1eec6d0
andps %xmm0, %xmm3
orps %xmm2, %xmm3
movaps %xmm1, %xmm2
subss %xmm3, %xmm2
movss 0x1d1920d(%rip), %xmm5 # 0x1eec714
movaps %xmm5, %xmm3
cmpltss 0xf0(%rsp), %xmm3
movss 0x1d1d480(%rip), %xmm0 # 0x1ef099c
andps %xmm3, %xmm0
andnps %xmm2, %xmm3
orps %xmm3, %xmm0
mulss 0xa0(%rsp), %xmm0
movss 0x1d1d46a(%rip), %xmm2 # 0x1ef09a0
mulss %xmm0, %xmm2
roundss $0x9, %xmm2, %xmm2
cvttss2si %xmm2, %eax
mulss %xmm1, %xmm2
subss %xmm2, %xmm0
movaps %xmm0, %xmm3
mulss %xmm0, %xmm3
movss 0x1d1d449(%rip), %xmm1 # 0x1ef09a4
mulss %xmm3, %xmm1
addss 0x1d1d441(%rip), %xmm1 # 0x1ef09a8
movss 0x1d1d43d(%rip), %xmm2 # 0x1ef09ac
mulss %xmm3, %xmm2
addss 0x1d1d435(%rip), %xmm2 # 0x1ef09b0
movaps 0x100(%rsp), %xmm6
movaps 0x50(%rsp), %xmm4
andps %xmm6, %xmm4
movaps %xmm4, 0x50(%rsp)
mulss %xmm3, %xmm1
addss 0x1d1d418(%rip), %xmm1 # 0x1ef09b4
andnps 0x60(%rsp), %xmm6
mulss %xmm3, %xmm2
addss 0x1d1d40b(%rip), %xmm2 # 0x1ef09b8
movl %eax, %ecx
mulss %xmm3, %xmm1
addss 0x1d1d401(%rip), %xmm1 # 0x1ef09bc
andl $0x3, %ecx
mulss %xmm3, %xmm2
addss 0x1d1d3f6(%rip), %xmm2 # 0x1ef09c0
mulss %xmm3, %xmm1
addss 0x1d1d3ee(%rip), %xmm1 # 0x1ef09c4
mulss %xmm3, %xmm2
movss 0x1d1913a(%rip), %xmm7 # 0x1eec71c
movaps %xmm7, %xmm4
addss %xmm7, %xmm2
mulss %xmm3, %xmm1
addss %xmm5, %xmm1
mulss %xmm3, %xmm2
addss %xmm5, %xmm2
mulss %xmm0, %xmm1
testb $0x1, %al
je 0x1d3607
movaps %xmm2, %xmm11
jmp 0x1d360e
movaps %xmm1, %xmm11
movaps %xmm2, %xmm1
orps 0x50(%rsp), %xmm6
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x1d3623
xorps 0x1d190ad(%rip), %xmm11 # 0x1eec6d0
cmpl $0x2, %eax
jae 0x1d362f
xorps 0x1d190a1(%rip), %xmm1 # 0x1eec6d0
movaps 0x90(%rsp), %xmm7
movaps %xmm7, %xmm0
movaps 0x110(%rsp), %xmm15
mulss %xmm15, %xmm0
movaps 0xb0(%rsp), %xmm14
subss %xmm14, %xmm0
movaps %xmm7, %xmm9
mulss 0xe0(%rsp), %xmm9
movaps 0xc0(%rsp), %xmm13
subss %xmm13, %xmm9
movaps %xmm7, %xmm8
mulss 0x80(%rsp), %xmm8
subss %xmm6, %xmm8
mulss 0x70(%rsp), %xmm7
movaps 0xd0(%rsp), %xmm3
subss %xmm3, %xmm7
movaps %xmm9, %xmm2
mulss %xmm9, %xmm2
movaps %xmm0, %xmm5
mulss %xmm0, %xmm5
addss %xmm2, %xmm5
movaps %xmm8, %xmm2
mulss %xmm8, %xmm2
addss %xmm5, %xmm2
movaps %xmm7, %xmm5
mulss %xmm7, %xmm5
addss %xmm2, %xmm5
movaps %xmm5, %xmm10
rsqrtss %xmm5, %xmm10
movaps %xmm10, %xmm2
mulss %xmm4, %xmm5
mulss %xmm10, %xmm5
mulss %xmm10, %xmm10
mulss %xmm5, %xmm10
movss 0x1d1902d(%rip), %xmm4 # 0x1eec718
mulss %xmm4, %xmm2
addss %xmm2, %xmm10
mulss %xmm10, %xmm0
movaps %xmm11, 0x60(%rsp)
mulss %xmm11, %xmm0
movaps %xmm1, %xmm11
mulss %xmm15, %xmm11
subss %xmm0, %xmm11
movaps 0xa0(%rsp), %xmm2
movss 0x1d18ff1(%rip), %xmm12 # 0x1eec714
subss %xmm2, %xmm12
mulss %xmm2, %xmm14
mulss %xmm12, %xmm15
addss %xmm14, %xmm15
movaps 0xe0(%rsp), %xmm14
mulss %xmm2, %xmm13
movaps %xmm12, %xmm5
mulss %xmm14, %xmm5
addss %xmm13, %xmm5
mulss %xmm2, %xmm6
movaps %xmm12, %xmm0
mulss 0x80(%rsp), %xmm0
addss %xmm6, %xmm0
mulss %xmm2, %xmm3
movaps %xmm12, 0x50(%rsp)
movaps %xmm12, %xmm2
mulss 0x70(%rsp), %xmm2
addss %xmm3, %xmm2
movaps %xmm5, %xmm12
mulss %xmm5, %xmm12
movaps %xmm15, %xmm13
mulss %xmm15, %xmm13
addss %xmm12, %xmm13
movaps %xmm0, %xmm12
mulss %xmm0, %xmm12
addss %xmm13, %xmm12
movaps %xmm2, %xmm13
mulss %xmm2, %xmm13
addss %xmm12, %xmm13
movaps %xmm13, %xmm12
mulss 0x1d18f5c(%rip), %xmm13 # 0x1eec71c
rsqrtss %xmm12, %xmm12
mulss %xmm12, %xmm4
mulss %xmm12, %xmm13
mulss %xmm12, %xmm12
mulss %xmm13, %xmm12
addss %xmm4, %xmm12
movss 0x1d1d1e2(%rip), %xmm6 # 0x1ef09c8
movaps 0x90(%rsp), %xmm3
ucomiss %xmm6, %xmm3
cmpltss %xmm3, %xmm6
movaps %xmm6, %xmm4
andnps %xmm11, %xmm4
mulss %xmm12, %xmm15
andps %xmm15, %xmm6
orps %xmm4, %xmm6
ja 0x1d385b
mulss %xmm10, %xmm9
mulss %xmm10, %xmm8
mulss %xmm10, %xmm7
movaps 0x60(%rsp), %xmm2
mulss %xmm2, %xmm9
mulss %xmm1, %xmm14
subss %xmm9, %xmm14
mulss %xmm2, %xmm8
movaps 0x80(%rsp), %xmm0
mulss %xmm1, %xmm0
subss %xmm8, %xmm0
mulss %xmm2, %xmm7
movaps 0x70(%rsp), %xmm2
mulss %xmm1, %xmm2
subss %xmm7, %xmm2
movaps %xmm14, %xmm5
jmp 0x1d386a
mulss %xmm12, %xmm5
mulss %xmm12, %xmm0
mulss %xmm12, %xmm2
movaps 0xa0(%rsp), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps 0x150(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps 0x50(%rsp), %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movaps 0x10(%rsp), %xmm14
mulps %xmm13, %xmm14
addps %xmm3, %xmm14
movaps 0x130(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps 0x40(%rsp), %xmm15
mulps %xmm13, %xmm15
addps %xmm3, %xmm15
movaps 0x140(%rsp), %xmm3
mulps %xmm1, %xmm3
movaps (%rsp), %xmm4
mulps %xmm13, %xmm4
addps %xmm3, %xmm4
movaps %xmm4, (%rsp)
mulps 0x120(%rsp), %xmm1
mulps 0x30(%rsp), %xmm13
addps %xmm1, %xmm13
movaps %xmm5, %xmm1
mulss %xmm5, %xmm1
movaps %xmm6, %xmm7
mulss %xmm6, %xmm7
movaps %xmm7, %xmm3
addss %xmm1, %xmm3
movaps %xmm0, %xmm10
movaps 0x1d18dd0(%rip), %xmm11 # 0x1eec6d0
xorps %xmm11, %xmm10
mulss %xmm0, %xmm10
addss %xmm10, %xmm3
xorps %xmm2, %xmm11
mulss %xmm2, %xmm11
addss %xmm11, %xmm3
movaps %xmm6, %xmm9
mulss %xmm2, %xmm9
movaps %xmm5, %xmm4
mulss %xmm0, %xmm4
movaps %xmm4, %xmm8
addss %xmm9, %xmm8
subss %xmm9, %xmm4
movaps %xmm5, %xmm12
mulss %xmm2, %xmm12
subss %xmm1, %xmm7
movaps %xmm0, %xmm9
mulss %xmm0, %xmm9
addss %xmm7, %xmm9
addss %xmm11, %xmm9
movaps %xmm6, %xmm1
mulss %xmm0, %xmm1
mulss %xmm5, %xmm6
movaps %xmm12, %xmm11
subss %xmm1, %xmm11
mulss %xmm2, %xmm0
addss %xmm12, %xmm1
movaps %xmm0, %xmm5
addss %xmm6, %xmm5
subss %xmm6, %xmm0
addss %xmm8, %xmm8
addss %xmm11, %xmm11
addss %xmm10, %xmm7
mulss %xmm2, %xmm2
addss %xmm7, %xmm2
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps 0x1d18d52(%rip), %xmm6 # 0x1eec700
mulps %xmm6, %xmm11
movsd 0x1d18d35(%rip), %xmm10 # 0x1eec6f0
mulps %xmm10, %xmm8
addps %xmm11, %xmm8
movss 0x1d18d48(%rip), %xmm11 # 0x1eec714
mulps %xmm11, %xmm3
addps %xmm8, %xmm3
addss %xmm5, %xmm5
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm6, %xmm5
mulps %xmm10, %xmm9
addps %xmm5, %xmm9
addss %xmm4, %xmm4
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm11, %xmm4
addps %xmm9, %xmm4
xorps %xmm7, %xmm7
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm6, %xmm2
movaps %xmm14, %xmm6
shufps $0xe9, %xmm7, %xmm6 # xmm6 = xmm6[1,2],xmm7[2,3]
blendps $0x4, %xmm15, %xmm6 # xmm6 = xmm6[0,1],xmm15[2],xmm6[3]
addss %xmm1, %xmm1
addss %xmm0, %xmm0
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm10, %xmm0
addps %xmm2, %xmm0
mulps %xmm11, %xmm1
addps %xmm0, %xmm1
addps %xmm7, %xmm6
shufps $0x0, %xmm14, %xmm14 # xmm14 = xmm14[0,0,0,0]
movaps (%rsp), %xmm5
movaps %xmm5, %xmm0
movaps %xmm5, %xmm2
shufps $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
mulps %xmm1, %xmm5
movaps %xmm5, %xmm10
movaps %xmm13, %xmm5
movaps %xmm13, %xmm8
shufps $0xaa, %xmm13, %xmm13 # xmm13 = xmm13[2,2,2,2]
mulps %xmm1, %xmm13
movaps %xmm1, %xmm9
mulps %xmm7, %xmm9
mulps %xmm4, %xmm7
addps %xmm9, %xmm7
mulps %xmm3, %xmm14
addps %xmm7, %xmm14
movaps %xmm15, %xmm1
shufps $0x55, %xmm15, %xmm15 # xmm15 = xmm15[1,1,1,1]
mulps %xmm4, %xmm15
addps %xmm9, %xmm15
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
mulps %xmm3, %xmm1
addps %xmm15, %xmm1
shufps $0x55, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
mulps %xmm4, %xmm2
addps %xmm10, %xmm2
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
mulps %xmm3, %xmm0
addps %xmm2, %xmm0
shufps $0x55, %xmm8, %xmm8 # xmm8 = xmm8[1,1,1,1]
mulps %xmm4, %xmm8
addps %xmm13, %xmm8
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm3, %xmm5
addps %xmm8, %xmm5
addps %xmm6, %xmm5
jmp 0x1d2908
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::sse42::InstanceArrayIntersector1::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&) (.cold.1)
|
__forceinline void updateAABB()
{
if (likely(query_ws->radius == (float)inf || userContext->instStackSize == 0)) {
query_radius = Vec3fa(query_ws->radius);
return;
}
const AffineSpace3fa m = AffineSpace3fa_load_unaligned((AffineSpace3fa*)userContext->world2inst[userContext->instStackSize-1]);
BBox3fa bbox(Vec3fa(-query_ws->radius), Vec3fa(query_ws->radius));
bbox = xfmBounds(m, bbox);
query_radius = 0.5f * (bbox.upper - bbox.lower);
}
|
decl %edi
shlq $0x6, %rdi
movups (%rsi,%rdi), %xmm5
movups 0x10(%rsi,%rdi), %xmm1
movups 0x20(%rsi,%rdi), %xmm9
movups 0x30(%rsi,%rdi), %xmm10
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x1d18be1(%rip), %xmm7 # 0x1eec6d0
xorps %xmm0, %xmm7
mulps %xmm0, %xmm9
movaps %xmm10, %xmm2
subps %xmm9, %xmm2
movaps %xmm7, %xmm6
mulps %xmm1, %xmm6
movaps %xmm6, %xmm8
addps %xmm2, %xmm8
mulps %xmm5, %xmm7
movaps %xmm7, %xmm11
addps %xmm8, %xmm11
movaps 0x1d17ed2(%rip), %xmm3 # 0x1eeb9f0
minps %xmm11, %xmm3
movaps 0x1d17ed7(%rip), %xmm4 # 0x1eeba00
maxps %xmm11, %xmm4
addps %xmm10, %xmm9
addps %xmm9, %xmm6
movaps %xmm7, %xmm10
addps %xmm6, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
mulps %xmm0, %xmm1
addps %xmm1, %xmm2
movaps %xmm7, %xmm10
addps %xmm2, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
addps %xmm9, %xmm1
addps %xmm1, %xmm7
minps %xmm7, %xmm3
maxps %xmm7, %xmm4
mulps %xmm5, %xmm0
addps %xmm0, %xmm8
minps %xmm8, %xmm3
maxps %xmm8, %xmm4
addps %xmm0, %xmm6
minps %xmm6, %xmm3
maxps %xmm6, %xmm4
addps %xmm0, %xmm2
minps %xmm2, %xmm3
maxps %xmm2, %xmm4
addps %xmm1, %xmm0
minps %xmm0, %xmm3
maxps %xmm0, %xmm4
subps %xmm3, %xmm4
mulps 0x1d18b44(%rip), %xmm4 # 0x1eec6e0
movaps %xmm4, (%rdx)
retq
|
/embree[P]embree/kernels/geometry/../common/context.h
|
embree::sse42::InstanceArrayIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&) (.cold.1)
|
__forceinline void updateAABB()
{
if (likely(query_ws->radius == (float)inf || userContext->instStackSize == 0)) {
query_radius = Vec3fa(query_ws->radius);
return;
}
const AffineSpace3fa m = AffineSpace3fa_load_unaligned((AffineSpace3fa*)userContext->world2inst[userContext->instStackSize-1]);
BBox3fa bbox(Vec3fa(-query_ws->radius), Vec3fa(query_ws->radius));
bbox = xfmBounds(m, bbox);
query_radius = 0.5f * (bbox.upper - bbox.lower);
}
|
decl %edi
shlq $0x6, %rdi
movups (%rsi,%rdi), %xmm5
movups 0x10(%rsi,%rdi), %xmm1
movups 0x20(%rsi,%rdi), %xmm9
movups 0x30(%rsi,%rdi), %xmm10
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x1d18b0a(%rip), %xmm7 # 0x1eec6d0
xorps %xmm0, %xmm7
mulps %xmm0, %xmm9
movaps %xmm10, %xmm2
subps %xmm9, %xmm2
movaps %xmm7, %xmm6
mulps %xmm1, %xmm6
movaps %xmm6, %xmm8
addps %xmm2, %xmm8
mulps %xmm5, %xmm7
movaps %xmm7, %xmm11
addps %xmm8, %xmm11
movaps 0x1d17dfb(%rip), %xmm3 # 0x1eeb9f0
minps %xmm11, %xmm3
movaps 0x1d17e00(%rip), %xmm4 # 0x1eeba00
maxps %xmm11, %xmm4
addps %xmm10, %xmm9
addps %xmm9, %xmm6
movaps %xmm7, %xmm10
addps %xmm6, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
mulps %xmm0, %xmm1
addps %xmm1, %xmm2
movaps %xmm7, %xmm10
addps %xmm2, %xmm10
minps %xmm10, %xmm3
maxps %xmm10, %xmm4
addps %xmm9, %xmm1
addps %xmm1, %xmm7
minps %xmm7, %xmm3
maxps %xmm7, %xmm4
mulps %xmm5, %xmm0
addps %xmm0, %xmm8
minps %xmm8, %xmm3
maxps %xmm8, %xmm4
addps %xmm0, %xmm6
minps %xmm6, %xmm3
maxps %xmm6, %xmm4
addps %xmm0, %xmm2
minps %xmm2, %xmm3
maxps %xmm2, %xmm4
addps %xmm1, %xmm0
minps %xmm0, %xmm3
maxps %xmm0, %xmm4
subps %xmm3, %xmm4
mulps 0x1d18a6d(%rip), %xmm4 # 0x1eec6e0
movaps %xmm4, (%rdx)
retq
|
/embree[P]embree/kernels/geometry/../common/context.h
|
embree::avx::BVHNIntersector1<4, 257, false, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8c8, %rsp # imm = 0x8C8
movq %rdx, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d4434
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1d4434
vmovaps 0x10(%rsi), %xmm2
vdpps $0x7f, %xmm2, %xmm2, %xmm5
vrsqrtss %xmm5, %xmm5, %xmm6
vmovss 0x1d1896f(%rip), %xmm3 # 0x1eec718
vmulss %xmm3, %xmm6, %xmm7
vmovss 0x1d18dcb(%rip), %xmm4 # 0x1eecb80
vmulss %xmm4, %xmm5, %xmm5
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm7, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,0]
vmovshdup %xmm6, %xmm7 # xmm7 = xmm6[1,1,3,3]
vbroadcastss 0x1d4d0db(%rip), %xmm12 # 0x1f20ec0
vxorps %xmm7, %xmm12, %xmm10
vxorps %xmm7, %xmm7, %xmm7
vunpckhps %xmm7, %xmm6, %xmm11 # xmm11 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovss %xmm10, %xmm7, %xmm10 # xmm10 = xmm10[0],xmm7[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm12, %xmm9, %xmm9
vinsertps $0x2a, %xmm6, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm6[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm11, %xmm12, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vblendvps %xmm11, %xmm10, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vbroadcastss 0x10(%rsi), %xmm11
vmovaps %xmm11, 0xc0(%rsp)
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm4, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm12
vmulss %xmm12, %xmm10, %xmm10
vbroadcastss 0x14(%rsi), %xmm12
vmovaps %xmm12, 0xb0(%rsp)
leaq 0xe0(%rsp), %rcx
vmulss %xmm3, %xmm11, %xmm11
vsubss %xmm10, %xmm11, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm11 # xmm11 = xmm6[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vmovss %xmm8, (%rcx)
vbroadcastss 0x18(%rsi), %xmm8
vmovaps %xmm8, 0xa0(%rsp)
vmulss %xmm4, %xmm11, %xmm4
vrsqrtss %xmm11, %xmm11, %xmm8
vmulss %xmm3, %xmm8, %xmm3
vmulss %xmm4, %xmm8, %xmm4
vmulss %xmm8, %xmm8, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vsubss %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vmulps %xmm6, %xmm5, %xmm4
vunpcklps %xmm4, %xmm9, %xmm5 # xmm5 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vunpcklps %xmm7, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vunpcklps %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovaps %xmm4, 0x10(%rcx)
vmovaps %xmm5, 0x20(%rcx)
vmovaps %xmm3, 0x30(%rcx)
leaq 0x128(%rsp), %r9
movq 0x70(%rax), %rax
vmaxss 0xc(%rsi), %xmm1, %xmm3
vbroadcastss 0x1d4cf9f(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm4
vbroadcastss 0x1d1d0b6(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm2, %xmm2
movq %rax, -0x8(%r9)
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d187c2(%rip), %xmm5 # 0x1eec714
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss (%rsi), %xmm15
vbroadcastss 0x4(%rsi), %xmm12
xorl %r10d, %r10d
vucomiss %xmm1, %xmm2
vbroadcastss 0x8(%rsi), %xmm13
setb %r10b
vmovshdup %xmm2, %xmm4 # xmm4 = xmm2[1,1,3,3]
shll $0x4, %r10d
xorl %r14d, %r14d
vucomiss %xmm1, %xmm4
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
setb %r14b
shll $0x4, %r14d
orq $0x20, %r14
xorl %r13d, %r13d
vucomiss %xmm1, %xmm4
vshufps $0x0, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[2,2,2,2]
setb %r13b
shll $0x4, %r13d
orq $0x40, %r13
movq %r10, %rbp
xorq $0x10, %rbp
movq %r14, %r12
xorq $0x10, %r12
movq %r13, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[0,0,0,0]
leaq 0x120(%rsp), %r11
vmovaps %xmm12, 0x90(%rsp)
vmovaps %xmm13, 0x80(%rsp)
vmovaps %xmm4, 0x70(%rsp)
vmovaps %xmm5, 0x60(%rsp)
vmovaps %xmm6, 0x50(%rsp)
vmovaps %xmm7, (%rsp)
vmovaps %xmm8, 0x40(%rsp)
movq %rsi, 0x20(%rsp)
vmovaps %xmm15, 0xd0(%rsp)
movq %r10, 0x18(%rsp)
cmpq %r11, %r9
je 0x1d4434
movq -0x8(%r9), %rcx
addq $-0x8, %r9
movq %rcx, %rax
andq $0xf, %rax
jne 0x1d4114
vmovaps 0x20(%rcx,%r10), %xmm0
vsubps %xmm15, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm0
vmovaps 0x20(%rcx,%r14), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm5, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rcx,%r13), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm1
vmovaps 0x20(%rcx,%rbp), %xmm2
vsubps %xmm15, %xmm2, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0x20(%rcx,%r12), %xmm3
vsubps %xmm12, %xmm3, %xmm3
vmulps %xmm3, %xmm5, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vmovaps 0x20(%rcx,%rbx), %xmm3
vsubps %xmm13, %xmm3, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vpmaxsd %xmm7, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vpminsd %xmm8, %xmm3, %xmm1
vpminsd %xmm1, %xmm2, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r15d
movb $0x1, %al
testb %al, %al
je 0x1d411d
testq %r15, %r15
je 0x1d4121
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x1d4126
movq %rax, %rcx
testl %edx, %edx
je 0x1d4036
jmp 0x1d4399
cmpl $0x2, %eax
je 0x1d416d
xorl %eax, %eax
jmp 0x1d40cf
pushq $0x6
jmp 0x1d4123
pushq $0x4
popq %rdx
jmp 0x1d4107
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x1d4168
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x1d4135
movq %r8, %rcx
jmp 0x1d4107
movq %rcx, %rax
andq $-0x10, %rax
vmovaps 0x80(%rax), %xmm5
vmovaps 0xa0(%rsp), %xmm0
vmulps %xmm5, %xmm0, %xmm6
vmovaps 0x90(%rax), %xmm4
vmulps %xmm4, %xmm0, %xmm9
vmovaps 0xa0(%rax), %xmm3
vmulps %xmm3, %xmm0, %xmm10
vmovaps 0x20(%rax), %xmm2
vmovaps 0x30(%rax), %xmm1
vmovaps 0x40(%rax), %xmm0
vmovaps 0x50(%rax), %xmm8
vmovaps 0xb0(%rsp), %xmm12
vmulps %xmm8, %xmm12, %xmm7
vaddps %xmm7, %xmm6, %xmm11
vmovaps 0x60(%rax), %xmm7
vmulps %xmm7, %xmm12, %xmm6
vaddps %xmm6, %xmm9, %xmm9
vmovaps 0x70(%rax), %xmm6
vmulps %xmm6, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vmovaps 0xc0(%rsp), %xmm13
vmulps %xmm2, %xmm13, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm1, %xmm13, %xmm12
vaddps %xmm12, %xmm9, %xmm9
vmulps %xmm0, %xmm13, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vbroadcastss 0x1d4ccb5(%rip), %xmm13 # 0x1f20ec4
vandps %xmm13, %xmm11, %xmm12
vbroadcastss 0x1d1cdcb(%rip), %xmm14 # 0x1ef0fe8
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm11, %xmm11
vandps %xmm13, %xmm9, %xmm12
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm9, %xmm12
vandps %xmm13, %xmm10, %xmm9
vcmpltps %xmm14, %xmm9, %xmm9
vblendvps %xmm9, %xmm14, %xmm10, %xmm13
vrcpps %xmm11, %xmm9
vmulps %xmm11, %xmm9, %xmm10
vbroadcastss 0x1d184b6(%rip), %xmm14 # 0x1eec714
vsubps %xmm10, %xmm14, %xmm10
vmulps %xmm10, %xmm9, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vrcpps %xmm12, %xmm10
vmulps %xmm12, %xmm10, %xmm11
vsubps %xmm11, %xmm14, %xmm11
vmulps %xmm11, %xmm10, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vrcpps %xmm13, %xmm11
vmulps %xmm13, %xmm11, %xmm12
vmovaps 0x80(%rsp), %xmm13
vsubps %xmm12, %xmm14, %xmm12
vmulps %xmm12, %xmm11, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps 0x90(%rsp), %xmm12
vmulps %xmm5, %xmm13, %xmm5
vaddps 0xb0(%rax), %xmm5, %xmm5
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm5, %xmm8, %xmm5
vmovaps 0x40(%rsp), %xmm8
vmulps %xmm4, %xmm13, %xmm4
vaddps 0xc0(%rax), %xmm4, %xmm4
vmulps %xmm7, %xmm12, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm3, %xmm13, %xmm3
vaddps 0xd0(%rax), %xmm3, %xmm3
vmulps %xmm6, %xmm12, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps %xmm2, %xmm15, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vbroadcastss 0x1d4cbbb(%rip), %xmm6 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm5
vmulps %xmm5, %xmm2, %xmm2
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm4, %xmm1, %xmm1
vxorps %xmm6, %xmm10, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vxorps %xmm6, %xmm11, %xmm4
vmulps %xmm0, %xmm15, %xmm0
vaddps %xmm3, %xmm0, %xmm0
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm2, %xmm9, %xmm3
vaddps %xmm1, %xmm10, %xmm4
vaddps %xmm0, %xmm11, %xmm5
vpminsd %xmm4, %xmm1, %xmm6
vpminsd %xmm5, %xmm0, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vpminsd %xmm3, %xmm2, %xmm7
vpmaxsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm1, %xmm1
vmovaps 0x70(%rsp), %xmm4
vpmaxsd %xmm5, %xmm0, %xmm0
vmovaps 0x60(%rsp), %xmm5
vminps %xmm0, %xmm1, %xmm0
vmovaps (%rsp), %xmm1
vmaxps %xmm7, %xmm1, %xmm1
vmovaps (%rsp), %xmm7
vmaxps %xmm6, %xmm1, %xmm1
vmovaps 0x50(%rsp), %xmm6
vminps %xmm2, %xmm8, %xmm2
vminps %xmm0, %xmm2, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %r15d
jmp 0x1d40cd
cmpl $0x6, %edx
jne 0x1d442b
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0xe0(%rsp), %rdi
movq 0x30(%rsp), %rdx
movq %r9, 0x38(%rsp)
callq *0x8(%r8,%rax)
leaq 0x120(%rsp), %r11
vmovaps 0x40(%rsp), %xmm8
vmovaps (%rsp), %xmm7
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x70(%rsp), %xmm4
vmovaps 0x80(%rsp), %xmm13
movq 0x18(%rsp), %r10
vmovaps 0x90(%rsp), %xmm12
vmovaps 0xd0(%rsp), %xmm15
movq 0x38(%rsp), %r9
movq 0x20(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x1d442b
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x1d4025
addq $0x8c8, %rsp # imm = 0x8C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16781328, false, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x908, %rsp # imm = 0x908
movq %rdx, 0x38(%rsp)
movq %rdi, 0x30(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d4c5d
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1d4c5d
vmovaps 0x10(%rsi), %xmm1
vdpps $0x7f, %xmm1, %xmm1, %xmm4
vrsqrtss %xmm4, %xmm4, %xmm5
vxorps %xmm12, %xmm12, %xmm12
vmovss 0x1d1827a(%rip), %xmm2 # 0x1eec718
vmulss %xmm2, %xmm5, %xmm6
vmovss 0x1d186d6(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm4, %xmm4
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm5, %xmm5
vmulss %xmm5, %xmm4, %xmm4
vsubss %xmm4, %xmm6, %xmm7
vbroadcastss 0x10(%rsi), %xmm4
vmovaps %xmm4, 0x110(%rsp)
vbroadcastss 0x14(%rsi), %xmm4
vmovaps %xmm4, 0x100(%rsp)
vbroadcastss 0x18(%rsi), %xmm4
vmovaps %xmm4, 0xf0(%rsp)
vshufps $0x0, %xmm7, %xmm7, %xmm4 # xmm4 = xmm7[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm8 # xmm8 = xmm5[1,0]
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vbroadcastss 0x1d4c9ba(%rip), %xmm9 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm10
vxorps %xmm6, %xmm6, %xmm6
vunpckhps %xmm6, %xmm5, %xmm11 # xmm11 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovss %xmm10, %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm9, %xmm8, %xmm8
vinsertps $0x2a, %xmm5, %xmm8, %xmm8 # xmm8 = xmm8[0],zero,xmm5[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm9
vdpps $0x7f, %xmm8, %xmm8, %xmm11
vcmpltps %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vblendvps %xmm9, %xmm10, %xmm8, %xmm8
vdpps $0x7f, %xmm8, %xmm8, %xmm9
leaq 0x120(%rsp), %rcx
vrsqrtss %xmm9, %xmm9, %xmm10
vmulss %xmm2, %xmm10, %xmm11
vmulss %xmm3, %xmm9, %xmm9
vmulss %xmm10, %xmm9, %xmm9
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm9, %xmm9
vsubss %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm8, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3]
vmulps %xmm8, %xmm10, %xmm10
vmulps %xmm5, %xmm9, %xmm9
vsubps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vmovss %xmm7, (%rcx)
vrsqrtss %xmm10, %xmm10, %xmm7
vmulss %xmm2, %xmm7, %xmm2
vmulss %xmm3, %xmm10, %xmm3
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm7, %xmm7
vmulss %xmm7, %xmm3, %xmm3
vsubss %xmm3, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm5, %xmm4, %xmm3
vunpcklps %xmm3, %xmm8, %xmm4 # xmm4 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
vunpckhps %xmm3, %xmm8, %xmm3 # xmm3 = xmm8[2],xmm3[2],xmm8[3],xmm3[3]
vunpcklps %xmm6, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovaps %xmm3, 0x10(%rcx)
vmovaps %xmm4, 0x20(%rcx)
vmovaps %xmm2, 0x30(%rcx)
leaq 0x168(%rsp), %r9
movq 0x70(%rax), %rax
movq %rax, -0x8(%r9)
vmaxss 0xc(%rsi), %xmm12, %xmm2
vbroadcastss 0x1d4c8a8(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1d1c9bf(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vrcpps %xmm1, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vbroadcastss 0x1d180cf(%rip), %xmm4 # 0x1eec714
vsubps %xmm1, %xmm4, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vbroadcastss (%rsi), %xmm8
vbroadcastss 0x4(%rsi), %xmm11
vbroadcastss 0x8(%rsi), %xmm10
xorl %r10d, %r10d
vucomiss %xmm12, %xmm1
setb %r10b
vshufps $0x0, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[0,0,0,0]
vmovshdup %xmm1, %xmm3 # xmm3 = xmm1[1,1,3,3]
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
shll $0x4, %r10d
xorl %r13d, %r13d
vucomiss %xmm12, %xmm3
vshufpd $0x1, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,0]
vshufps $0xaa, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[2,2,2,2]
setb %r13b
shll $0x4, %r13d
orq $0x20, %r13
xorl %r14d, %r14d
vucomiss %xmm12, %xmm3
setb %r14b
shll $0x4, %r14d
orq $0x40, %r14
movq %r10, %rbp
xorq $0x10, %rbp
movq %r13, %r12
xorq $0x10, %r12
movq %r14, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm2, %xmm2, %xmm12 # xmm12 = xmm2[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm13 # xmm13 = xmm0[0,0,0,0]
leaq 0x160(%rsp), %r11
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm11, 0x90(%rsp)
vmovaps %xmm10, (%rsp)
vmovaps %xmm6, 0x80(%rsp)
vmovaps %xmm7, 0x70(%rsp)
vmovaps %xmm9, 0x60(%rsp)
vmovaps %xmm12, 0x50(%rsp)
vmovaps %xmm13, 0x40(%rsp)
movq %rsi, 0x28(%rsp)
movq %r10, 0x20(%rsp)
cmpq %r11, %r9
je 0x1d4c5d
movq -0x8(%r9), %rcx
addq $-0x8, %r9
testb $0x8, %cl
jne 0x1d481c
vmovss 0x1c(%rsi), %xmm0
movl %ecx, %edx
andl $0x7, %edx
movq %rcx, %rax
andq $-0x10, %rax
cmpq $0x3, %rdx
je 0x1d4871
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps 0x80(%rax,%r10), %xmm0, %xmm1
vaddps 0x20(%rax,%r10), %xmm1, %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps 0x80(%rax,%r13), %xmm0, %xmm2
vmulps %xmm1, %xmm6, %xmm1
vaddps 0x20(%rax,%r13), %xmm2, %xmm2
vsubps %xmm11, %xmm2, %xmm2
vmulps 0x80(%rax,%r14), %xmm0, %xmm3
vaddps 0x20(%rax,%r14), %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vsubps %xmm10, %xmm3, %xmm3
vmulps %xmm3, %xmm9, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmulps 0x80(%rax,%rbp), %xmm0, %xmm3
vaddps 0x20(%rax,%rbp), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vmulps 0x80(%rax,%r12), %xmm0, %xmm4
vaddps 0x20(%rax,%r12), %xmm4, %xmm4
vsubps %xmm11, %xmm4, %xmm4
vmulps %xmm4, %xmm7, %xmm4
vmulps 0x80(%rax,%rbx), %xmm0, %xmm5
vaddps 0x20(%rax,%rbx), %xmm5, %xmm5
vsubps %xmm10, %xmm5, %xmm5
vmulps %xmm5, %xmm9, %xmm5
vminps %xmm5, %xmm4, %xmm4
vmaxps %xmm1, %xmm12, %xmm1
vmaxps %xmm2, %xmm1, %xmm1
vminps %xmm3, %xmm13, %xmm2
vminps %xmm4, %xmm2, %xmm2
cmpl $0x6, %edx
je 0x1d4b50
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r15d
testb $0x8, %cl
jne 0x1d486a
testq %r15, %r15
je 0x1d4b48
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x1d4b78
movq %rax, %rcx
testl %edx, %edx
je 0x1d472c
jmp 0x1d4bc2
pushq $0x6
jmp 0x1d4b4a
vmovaps 0x50(%rax), %xmm5
vmovaps 0x60(%rax), %xmm3
vmovaps 0x70(%rax), %xmm14
vmovaps 0x80(%rax), %xmm11
vmovaps 0x90(%rax), %xmm10
vmovaps 0xa0(%rax), %xmm8
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vmovss 0x1d17e6f(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,0,0,0]
vmulps 0xe0(%rax), %xmm1, %xmm6
vmulps 0xf0(%rax), %xmm1, %xmm7
vmulps 0x100(%rax), %xmm1, %xmm9
vmulss 0x1d17156(%rip), %xmm0, %xmm0 # 0x1eeba24
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vaddps %xmm6, %xmm0, %xmm2
vmovaps %xmm2, 0xc0(%rsp)
vaddps %xmm7, %xmm0, %xmm2
vmovaps %xmm2, 0x10(%rsp)
vmulps 0x110(%rax), %xmm1, %xmm7
vmulps 0x120(%rax), %xmm1, %xmm12
vmulps 0x130(%rax), %xmm1, %xmm1
vaddps %xmm0, %xmm9, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vaddps %xmm7, %xmm4, %xmm9
vaddps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xb0(%rsp)
vaddps %xmm1, %xmm4, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vmovaps 0xf0(%rsp), %xmm4
vmulps %xmm4, %xmm11, %xmm0
vmulps %xmm4, %xmm10, %xmm1
vmulps %xmm4, %xmm8, %xmm12
vmovaps 0x100(%rsp), %xmm4
vmulps %xmm5, %xmm4, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm3, %xmm4, %xmm13
vaddps %xmm1, %xmm13, %xmm1
vmulps %xmm4, %xmm14, %xmm13
vmovaps %xmm14, %xmm7
vaddps %xmm12, %xmm13, %xmm15
vmovaps 0x20(%rax), %xmm12
vmovaps 0x110(%rsp), %xmm4
vmulps %xmm4, %xmm12, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmovaps 0x30(%rax), %xmm13
vmulps %xmm4, %xmm13, %xmm14
vaddps %xmm1, %xmm14, %xmm1
vmovaps 0x40(%rax), %xmm14
vmulps %xmm4, %xmm14, %xmm4
vaddps %xmm4, %xmm15, %xmm4
vbroadcastss 0x1d4c523(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm15
vbroadcastss 0x1d1c63a(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm15, %xmm15
vblendvps %xmm15, %xmm6, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm15
vcmpltps %xmm6, %xmm15, %xmm15
vblendvps %xmm15, %xmm6, %xmm1, %xmm1
vandps %xmm2, %xmm4, %xmm15
vcmpltps %xmm6, %xmm15, %xmm15
vblendvps %xmm15, %xmm6, %xmm4, %xmm4
vrcpps %xmm0, %xmm15
vmulps %xmm0, %xmm15, %xmm0
vbroadcastss 0x1d17d2c(%rip), %xmm2 # 0x1eec714
vsubps %xmm0, %xmm2, %xmm0
vmulps %xmm0, %xmm15, %xmm0
vaddps %xmm0, %xmm15, %xmm15
vrcpps %xmm1, %xmm0
vmulps %xmm0, %xmm1, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm1
vrcpps %xmm4, %xmm0
vmulps %xmm0, %xmm4, %xmm4
vsubps %xmm4, %xmm2, %xmm4
vmulps %xmm4, %xmm0, %xmm4
vaddps %xmm4, %xmm0, %xmm0
vmulps (%rsp), %xmm11, %xmm4
vmovaps 0x90(%rsp), %xmm11
vaddps 0xb0(%rax), %xmm4, %xmm4
vmulps (%rsp), %xmm10, %xmm10
vaddps 0xc0(%rax), %xmm10, %xmm10
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm3, %xmm11, %xmm3
vaddps %xmm3, %xmm10, %xmm3
vmovaps (%rsp), %xmm10
vmulps %xmm8, %xmm10, %xmm5
vmovaps 0xa0(%rsp), %xmm8
vaddps 0xd0(%rax), %xmm5, %xmm5
vmulps %xmm7, %xmm11, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vmulps %xmm12, %xmm8, %xmm5
vmovaps 0x50(%rsp), %xmm12
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm13, %xmm8, %xmm5
vmovaps 0x40(%rsp), %xmm13
vaddps %xmm3, %xmm5, %xmm3
vmulps %xmm14, %xmm8, %xmm5
vaddps %xmm2, %xmm5, %xmm2
vmovaps 0xc0(%rsp), %xmm5
vsubps %xmm4, %xmm5, %xmm5
vsubps %xmm4, %xmm9, %xmm4
vmovaps 0x60(%rsp), %xmm9
vmovaps 0x10(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm6
vmovaps 0xb0(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm3
vmovaps 0xe0(%rsp), %xmm7
vsubps %xmm2, %xmm7, %xmm7
vmovaps 0xd0(%rsp), %xmm14
vsubps %xmm2, %xmm14, %xmm2
vmulps %xmm5, %xmm15, %xmm5
vmulps %xmm4, %xmm15, %xmm4
vmulps %xmm1, %xmm6, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vmulps %xmm1, %xmm3, %xmm1
vmulps %xmm0, %xmm2, %xmm0
vpminsd %xmm1, %xmm6, %xmm2
vpminsd %xmm0, %xmm7, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vpminsd %xmm4, %xmm5, %xmm3
vpmaxsd %xmm4, %xmm5, %xmm4
vpmaxsd %xmm1, %xmm6, %xmm1
vmovaps 0x80(%rsp), %xmm6
vpmaxsd %xmm0, %xmm7, %xmm0
vmovaps 0x70(%rsp), %xmm7
vminps %xmm0, %xmm1, %xmm0
vmaxps %xmm3, %xmm12, %xmm1
vmaxps %xmm2, %xmm1, %xmm1
vminps %xmm4, %xmm13, %xmm2
vminps %xmm0, %xmm2, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
jmp 0x1d4818
pushq $0x4
popq %rdx
jmp 0x1d485d
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1d4813
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x1d4bba
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x1d4b87
movq %r8, %rcx
jmp 0x1d485d
cmpl $0x6, %edx
jne 0x1d4c54
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x30(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x120(%rsp), %rdi
movq 0x38(%rsp), %rdx
movq %r9, 0x10(%rsp)
callq *0x8(%r8,%rax)
leaq 0x160(%rsp), %r11
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x50(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm9
vmovaps 0x70(%rsp), %xmm7
vmovaps 0x80(%rsp), %xmm6
movq 0x20(%rsp), %r10
vmovaps (%rsp), %xmm10
vmovaps 0x90(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm8
movq 0x10(%rsp), %r9
movq 0x28(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x1d4c54
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x1d471b
addq $0x908, %rsp # imm = 0x908
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 257, true, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8f8, %rsp # imm = 0x8F8
movq %rdx, 0x20(%rsp)
movq %rdi, 0x18(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d53cc
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1d53cc
vmovaps 0x10(%rsi), %xmm2
vdpps $0x7f, %xmm2, %xmm2, %xmm5
vrsqrtss %xmm5, %xmm5, %xmm6
vmovss 0x1d17a55(%rip), %xmm3 # 0x1eec718
vmulss %xmm3, %xmm6, %xmm7
vmovss 0x1d17eb1(%rip), %xmm4 # 0x1eecb80
vmulss %xmm4, %xmm5, %xmm5
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm7, %xmm8
vbroadcastss 0x10(%rsi), %xmm5
vmovaps %xmm5, 0xf0(%rsp)
vbroadcastss 0x14(%rsi), %xmm5
vmovaps %xmm5, 0xe0(%rsp)
vshufps $0x0, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,0]
vmovshdup %xmm6, %xmm7 # xmm7 = xmm6[1,1,3,3]
vbroadcastss 0x1d4c1a3(%rip), %xmm12 # 0x1f20ec0
vxorps %xmm7, %xmm12, %xmm10
vxorps %xmm7, %xmm7, %xmm7
vunpckhps %xmm7, %xmm6, %xmm11 # xmm11 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovss %xmm10, %xmm7, %xmm10 # xmm10 = xmm10[0],xmm7[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm12, %xmm9, %xmm9
vinsertps $0x2a, %xmm6, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm6[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm11, %xmm12, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vblendvps %xmm11, %xmm10, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
leaq 0x110(%rsp), %rcx
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm3, %xmm11, %xmm12
vmulss %xmm4, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm10, %xmm10
vsubss %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm11 # xmm11 = xmm6[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vmovss %xmm8, (%rcx)
vrsqrtss %xmm11, %xmm11, %xmm8
vmulss %xmm3, %xmm8, %xmm3
vmulss %xmm4, %xmm11, %xmm4
vmulss %xmm4, %xmm8, %xmm4
vmulss %xmm8, %xmm8, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vsubss %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vmulps %xmm6, %xmm5, %xmm4
vunpcklps %xmm4, %xmm9, %xmm5 # xmm5 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vunpcklps %xmm7, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vunpcklps %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovaps %xmm4, 0x10(%rcx)
vmovaps %xmm5, 0x20(%rcx)
vmovaps %xmm3, 0x30(%rcx)
leaq 0x158(%rsp), %r9
movq 0x70(%rax), %rax
movq %rax, -0x8(%r9)
vmaxss 0xc(%rsi), %xmm1, %xmm3
vbroadcastss 0x1d4c090(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm4
vbroadcastss 0x1d1c1a7(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d178c5(%rip), %xmm5 # 0x1eec714
vdivps %xmm2, %xmm5, %xmm2
vbroadcastss 0x1d4c104(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm2, %xmm2
vbroadcastss 0x1d4b0a5(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm2, %xmm4
vbroadcastss 0x1d4b09c(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm2, %xmm2
xorl %r10d, %r10d
vucomiss %xmm1, %xmm4
setb %r10b
vshufps $0x0, %xmm4, %xmm4, %xmm15 # xmm15 = xmm4[0,0,0,0]
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
shll $0x4, %r10d
xorl %r14d, %r14d
vucomiss %xmm1, %xmm5
vshufpd $0x1, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
setb %r14b
shll $0x4, %r14d
orq $0x20, %r14
xorl %r13d, %r13d
vucomiss %xmm1, %xmm5
vshufps $0x0, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[2,2,2,2]
setb %r13b
shll $0x4, %r13d
orq $0x40, %r13
movq %r10, %rbp
xorq $0x10, %rbp
movq %r14, %r12
xorq $0x10, %r12
movq %r13, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm10 # xmm10 = xmm0[0,0,0,0]
leaq 0x150(%rsp), %r11
vbroadcastss 0x18(%rsi), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vbroadcastss (%rsi), %xmm12
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vmovaps %xmm6, 0xc0(%rsp)
vmovaps %xmm4, 0xb0(%rsp)
vmovaps %xmm5, 0xa0(%rsp)
vmovaps %xmm7, 0x90(%rsp)
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm12, 0x50(%rsp)
vmovaps %xmm13, 0x40(%rsp)
vmovaps %xmm14, 0x30(%rsp)
movq %rsi, 0x10(%rsp)
movq %r10, 0x8(%rsp)
vmovaps %xmm15, 0x100(%rsp)
cmpq %r11, %r9
je 0x1d53cc
movq -0x8(%r9), %rcx
addq $-0x8, %r9
movq %rcx, %rax
andq $0xf, %rax
jne 0x1d5063
vmovaps 0x20(%rcx,%r10), %xmm0
vsubps %xmm12, %xmm0, %xmm0
vmulps %xmm0, %xmm15, %xmm0
vmovaps 0x20(%rcx,%r14), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rcx,%r13), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vmovaps 0x20(%rcx,%rbp), %xmm2
vsubps %xmm12, %xmm2, %xmm2
vmulps %xmm2, %xmm5, %xmm2
vmovaps 0x20(%rcx,%r12), %xmm3
vsubps %xmm13, %xmm3, %xmm3
vmulps %xmm3, %xmm7, %xmm3
vminps %xmm3, %xmm2, %xmm2
vmovaps 0x20(%rcx,%rbx), %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmulps %xmm3, %xmm8, %xmm3
vmaxps %xmm9, %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vminps %xmm10, %xmm3, %xmm1
vminps %xmm1, %xmm2, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %r15d
movb $0x1, %al
testb %al, %al
je 0x1d506c
testq %r15, %r15
je 0x1d5070
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x1d5075
movq %rax, %rcx
testl %edx, %edx
je 0x1d4f8e
jmp 0x1d5315
cmpl $0x2, %eax
je 0x1d50bc
xorl %eax, %eax
jmp 0x1d501e
pushq $0x6
jmp 0x1d5072
pushq $0x4
popq %rdx
jmp 0x1d5056
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x1d50b7
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x1d5084
movq %r8, %rcx
jmp 0x1d5056
movq %rcx, %rax
andq $-0x10, %rax
vmovaps 0x80(%rax), %xmm4
vmovaps 0xd0(%rsp), %xmm0
vmulps %xmm4, %xmm0, %xmm7
vmovaps 0x90(%rax), %xmm5
vmulps %xmm5, %xmm0, %xmm9
vmovaps 0xa0(%rax), %xmm3
vmulps %xmm3, %xmm0, %xmm10
vmovaps 0x20(%rax), %xmm2
vmovaps 0x30(%rax), %xmm1
vmovaps 0x40(%rax), %xmm0
vmovaps 0x50(%rax), %xmm6
vmovaps 0xe0(%rsp), %xmm12
vmulps %xmm6, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm11
vmovaps 0x60(%rax), %xmm8
vmulps %xmm8, %xmm12, %xmm7
vaddps %xmm7, %xmm9, %xmm9
vmovaps 0x70(%rax), %xmm7
vmulps %xmm7, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vmovaps 0xf0(%rsp), %xmm13
vmulps %xmm2, %xmm13, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm1, %xmm13, %xmm12
vaddps %xmm12, %xmm9, %xmm9
vmulps %xmm0, %xmm13, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vbroadcastss 0x1d4bd66(%rip), %xmm13 # 0x1f20ec4
vandps %xmm13, %xmm11, %xmm12
vbroadcastss 0x1d1be7c(%rip), %xmm14 # 0x1ef0fe8
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm11, %xmm11
vandps %xmm13, %xmm9, %xmm12
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm9, %xmm12
vandps %xmm13, %xmm10, %xmm9
vcmpltps %xmm14, %xmm9, %xmm9
vblendvps %xmm9, %xmm14, %xmm10, %xmm13
vrcpps %xmm11, %xmm9
vmulps %xmm11, %xmm9, %xmm10
vbroadcastss 0x1d17567(%rip), %xmm14 # 0x1eec714
vsubps %xmm10, %xmm14, %xmm10
vmulps %xmm10, %xmm9, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vrcpps %xmm12, %xmm10
vmulps %xmm12, %xmm10, %xmm11
vsubps %xmm11, %xmm14, %xmm11
vmulps %xmm11, %xmm10, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vrcpps %xmm13, %xmm11
vmulps %xmm13, %xmm11, %xmm12
vmovaps 0x40(%rsp), %xmm13
vsubps %xmm12, %xmm14, %xmm12
vmovaps 0x30(%rsp), %xmm14
vmulps %xmm12, %xmm11, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps 0x50(%rsp), %xmm12
vmulps %xmm4, %xmm14, %xmm4
vaddps 0xb0(%rax), %xmm4, %xmm4
vmulps %xmm5, %xmm14, %xmm5
vaddps 0xc0(%rax), %xmm5, %xmm5
vmulps %xmm6, %xmm13, %xmm6
vaddps %xmm4, %xmm6, %xmm4
vmulps %xmm8, %xmm13, %xmm6
vmovaps 0x80(%rsp), %xmm8
vaddps %xmm5, %xmm6, %xmm5
vmulps %xmm3, %xmm14, %xmm3
vaddps 0xd0(%rax), %xmm3, %xmm3
vmulps %xmm7, %xmm13, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps %xmm2, %xmm12, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d4bc69(%rip), %xmm6 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vmulps %xmm1, %xmm12, %xmm1
vaddps %xmm5, %xmm1, %xmm1
vxorps %xmm6, %xmm10, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vxorps %xmm6, %xmm11, %xmm4
vmulps %xmm0, %xmm12, %xmm0
vaddps %xmm3, %xmm0, %xmm0
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm2, %xmm9, %xmm3
vmovaps 0x70(%rsp), %xmm9
vaddps %xmm1, %xmm10, %xmm4
vmovaps 0x60(%rsp), %xmm10
vaddps %xmm0, %xmm11, %xmm5
vpminsd %xmm4, %xmm1, %xmm6
vpminsd %xmm5, %xmm0, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vpminsd %xmm3, %xmm2, %xmm7
vpmaxsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm1, %xmm1
vmovaps 0xb0(%rsp), %xmm4
vpmaxsd %xmm5, %xmm0, %xmm0
vmovaps 0xa0(%rsp), %xmm5
vminps %xmm0, %xmm1, %xmm0
vmaxps %xmm7, %xmm9, %xmm1
vmovaps 0x90(%rsp), %xmm7
vmaxps %xmm6, %xmm1, %xmm1
vmovaps 0xc0(%rsp), %xmm6
vminps %xmm2, %xmm10, %xmm2
vminps %xmm0, %xmm2, %xmm0
vbroadcastss 0x1d4ac16(%rip), %xmm2 # 0x1f1ff10
vmulps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1d4ac0d(%rip), %xmm2 # 0x1f1ff14
vmulps %xmm2, %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
jmp 0x1d5018
cmpl $0x6, %edx
jne 0x1d53c3
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x18(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x110(%rsp), %rdi
movq 0x20(%rsp), %rdx
movq %r9, 0x28(%rsp)
callq *0x8(%r8,%rax)
vmovaps 0x30(%rsp), %xmm14
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x50(%rsp), %xmm12
leaq 0x150(%rsp), %r11
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x90(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm5
vmovaps 0xb0(%rsp), %xmm4
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0x100(%rsp), %xmm15
movq 0x8(%rsp), %r10
movq 0x28(%rsp), %r9
movq 0x10(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x1d53c3
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x1d4f7d
addq $0x8f8, %rsp # imm = 0x8F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16781328, true, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x948, %rsp # imm = 0x948
movq %rdx, 0x58(%rsp)
movq %rdi, 0x50(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d5c9c
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1d5c9c
vxorps %xmm12, %xmm12, %xmm12
vmovaps 0x10(%rsi), %xmm1
vdpps $0x7f, %xmm1, %xmm1, %xmm4
vrsqrtss %xmm4, %xmm4, %xmm5
vmovss 0x1d172e2(%rip), %xmm2 # 0x1eec718
vmulss %xmm2, %xmm5, %xmm6
vmovss 0x1d1773e(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm4, %xmm4
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm5, %xmm5
vmulss %xmm5, %xmm4, %xmm4
vsubss %xmm4, %xmm6, %xmm7
vbroadcastss 0x10(%rsi), %xmm4
vmovaps %xmm4, 0x150(%rsp)
vbroadcastss 0x14(%rsi), %xmm4
vmovaps %xmm4, 0x140(%rsp)
vbroadcastss 0x18(%rsi), %xmm4
vmovaps %xmm4, 0x130(%rsp)
vshufps $0x0, %xmm7, %xmm7, %xmm4 # xmm4 = xmm7[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm8 # xmm8 = xmm5[1,0]
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vbroadcastss 0x1d4ba22(%rip), %xmm9 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm10
vxorps %xmm6, %xmm6, %xmm6
vunpckhps %xmm6, %xmm5, %xmm11 # xmm11 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovss %xmm10, %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm9, %xmm8, %xmm8
vinsertps $0x2a, %xmm5, %xmm8, %xmm8 # xmm8 = xmm8[0],zero,xmm5[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm9
vdpps $0x7f, %xmm8, %xmm8, %xmm11
vcmpltps %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vblendvps %xmm9, %xmm10, %xmm8, %xmm8
vdpps $0x7f, %xmm8, %xmm8, %xmm9
leaq 0x160(%rsp), %rcx
vrsqrtss %xmm9, %xmm9, %xmm10
vmulss %xmm2, %xmm10, %xmm11
vmulss %xmm3, %xmm9, %xmm9
vmulss %xmm10, %xmm9, %xmm9
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm9, %xmm9
vsubss %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm8, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3]
vmulps %xmm8, %xmm10, %xmm10
vmulps %xmm5, %xmm9, %xmm9
vsubps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vmovss %xmm7, (%rcx)
vrsqrtss %xmm10, %xmm10, %xmm7
vmulss %xmm2, %xmm7, %xmm2
vmulss %xmm3, %xmm10, %xmm3
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm7, %xmm7
vmulss %xmm7, %xmm3, %xmm3
vsubss %xmm3, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm5, %xmm4, %xmm3
vunpcklps %xmm3, %xmm8, %xmm4 # xmm4 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
vunpckhps %xmm3, %xmm8, %xmm3 # xmm3 = xmm8[2],xmm3[2],xmm8[3],xmm3[3]
vunpcklps %xmm6, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovaps %xmm3, 0x10(%rcx)
vmovaps %xmm4, 0x20(%rcx)
vmovaps %xmm2, 0x30(%rcx)
leaq 0x1a8(%rsp), %r9
movq 0x70(%rax), %rax
movq %rax, -0x8(%r9)
vmaxss 0xc(%rsi), %xmm12, %xmm2
vbroadcastss 0x1d4b910(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1d1ba27(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1d17145(%rip), %xmm4 # 0x1eec714
vdivps %xmm1, %xmm4, %xmm1
vbroadcastss 0x1d4b984(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d4a925(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1d4a91c(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm1, %xmm1
xorl %r10d, %r10d
vucomiss %xmm12, %xmm3
setb %r10b
vshufps $0x0, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[2,2,2,2]
shll $0x4, %r10d
xorl %r13d, %r13d
vucomiss %xmm12, %xmm4
vshufps $0x0, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[0,0,0,0]
setb %r13b
shll $0x4, %r13d
orq $0x20, %r13
xorl %r14d, %r14d
vucomiss %xmm12, %xmm5
vshufps $0x55, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm10 # xmm10 = xmm1[2,2,2,2]
setb %r14b
shll $0x4, %r14d
orq $0x40, %r14
movq %r10, %rbp
xorq $0x10, %rbp
movq %r13, %r12
xorq $0x10, %r12
movq %r14, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,0,0,0]
leaq 0x1a0(%rsp), %r11
vbroadcastss (%rsi), %xmm13
vbroadcastss 0x4(%rsi), %xmm14
vbroadcastss 0x8(%rsi), %xmm15
vmovaps %xmm6, 0xd0(%rsp)
vmovaps %xmm7, 0xc0(%rsp)
vmovaps %xmm8, 0xb0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovaps %xmm5, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm11, 0x70(%rsp)
vmovaps %xmm12, 0x60(%rsp)
vmovaps %xmm13, 0x20(%rsp)
vmovaps %xmm14, 0x10(%rsp)
vmovaps %xmm15, (%rsp)
movq %rsi, 0x48(%rsp)
movq %r10, 0x40(%rsp)
cmpq %r11, %r9
je 0x1d5c9c
movq -0x8(%r9), %rcx
addq $-0x8, %r9
testb $0x8, %cl
jne 0x1d57f1
vmovss 0x1c(%rsi), %xmm0
movl %ecx, %edx
andl $0x7, %edx
movq %rcx, %rax
andq $-0x10, %rax
cmpq $0x3, %rdx
je 0x1d5846
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps 0x80(%rax,%r10), %xmm0, %xmm1
vaddps 0x20(%rax,%r10), %xmm1, %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps 0x80(%rax,%r13), %xmm0, %xmm2
vmulps %xmm1, %xmm6, %xmm1
vaddps 0x20(%rax,%r13), %xmm2, %xmm2
vsubps %xmm14, %xmm2, %xmm2
vmulps 0x80(%rax,%r14), %xmm0, %xmm3
vaddps 0x20(%rax,%r14), %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vsubps %xmm15, %xmm3, %xmm3
vmulps %xmm3, %xmm8, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm1, %xmm11, %xmm1
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%rbp), %xmm0, %xmm2
vaddps 0x20(%rax,%rbp), %xmm2, %xmm2
vsubps %xmm13, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%r12), %xmm0, %xmm3
vaddps 0x20(%rax,%r12), %xmm3, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmulps %xmm3, %xmm5, %xmm3
vmulps 0x80(%rax,%rbx), %xmm0, %xmm4
vaddps 0x20(%rax,%rbx), %xmm4, %xmm4
vsubps %xmm15, %xmm4, %xmm4
vmulps %xmm4, %xmm10, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm2, %xmm12, %xmm2
vminps %xmm3, %xmm2, %xmm2
cmpl $0x6, %edx
je 0x1d5b74
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r15d
testb $0x8, %cl
jne 0x1d583f
testq %r15, %r15
je 0x1d5b6c
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x1d5b9c
movq %rax, %rcx
testl %edx, %edx
je 0x1d5701
jmp 0x1d5be6
pushq $0x6
jmp 0x1d5b6e
vmovaps 0x50(%rax), %xmm9
vmovaps 0x60(%rax), %xmm3
vmovaps 0x70(%rax), %xmm14
vmovaps 0x80(%rax), %xmm11
vmovaps 0x90(%rax), %xmm10
vmovaps 0xa0(%rax), %xmm7
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vmovss 0x1d16e9a(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,0,0,0]
vmulps 0xe0(%rax), %xmm1, %xmm5
vmulps 0xf0(%rax), %xmm1, %xmm6
vmulps 0x100(%rax), %xmm1, %xmm8
vmulss 0x1d16181(%rip), %xmm0, %xmm0 # 0x1eeba24
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vaddps %xmm5, %xmm0, %xmm2
vmovaps %xmm2, 0x100(%rsp)
vaddps %xmm6, %xmm0, %xmm2
vmovaps %xmm2, 0x30(%rsp)
vmulps 0x110(%rax), %xmm1, %xmm6
vmulps 0x120(%rax), %xmm1, %xmm12
vaddps %xmm0, %xmm8, %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmulps 0x130(%rax), %xmm1, %xmm0
vaddps %xmm6, %xmm4, %xmm8
vaddps %xmm4, %xmm12, %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vaddps %xmm0, %xmm4, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps 0x130(%rsp), %xmm4
vmulps %xmm4, %xmm11, %xmm0
vmulps %xmm4, %xmm10, %xmm1
vmulps %xmm7, %xmm4, %xmm12
vmovaps 0x140(%rsp), %xmm4
vmulps %xmm4, %xmm9, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm3, %xmm4, %xmm13
vaddps %xmm1, %xmm13, %xmm1
vmulps %xmm4, %xmm14, %xmm13
vmovaps %xmm14, %xmm6
vaddps %xmm12, %xmm13, %xmm15
vmovaps 0x20(%rax), %xmm12
vmovaps 0x150(%rsp), %xmm4
vmulps %xmm4, %xmm12, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmovaps 0x30(%rax), %xmm13
vmulps %xmm4, %xmm13, %xmm14
vaddps %xmm1, %xmm14, %xmm1
vmovaps 0x40(%rax), %xmm14
vmulps %xmm4, %xmm14, %xmm4
vaddps %xmm4, %xmm15, %xmm4
vbroadcastss 0x1d4b54e(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm15
vbroadcastss 0x1d1b665(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm15, %xmm15
vblendvps %xmm15, %xmm5, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm15
vcmpltps %xmm5, %xmm15, %xmm15
vblendvps %xmm15, %xmm5, %xmm1, %xmm1
vandps %xmm2, %xmm4, %xmm15
vcmpltps %xmm5, %xmm15, %xmm15
vblendvps %xmm15, %xmm5, %xmm4, %xmm4
vrcpps %xmm0, %xmm15
vmulps %xmm0, %xmm15, %xmm0
vbroadcastss 0x1d16d57(%rip), %xmm2 # 0x1eec714
vsubps %xmm0, %xmm2, %xmm0
vmulps %xmm0, %xmm15, %xmm0
vaddps %xmm0, %xmm15, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vrcpps %xmm1, %xmm0
vmulps %xmm0, %xmm1, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm1
vrcpps %xmm4, %xmm0
vmulps %xmm0, %xmm4, %xmm4
vsubps %xmm4, %xmm2, %xmm4
vmulps %xmm4, %xmm0, %xmm4
vaddps %xmm4, %xmm0, %xmm0
vmulps (%rsp), %xmm11, %xmm4
vmovaps 0x70(%rsp), %xmm11
vaddps 0xb0(%rax), %xmm4, %xmm4
vmulps 0x10(%rsp), %xmm9, %xmm9
vaddps %xmm4, %xmm9, %xmm4
vmulps (%rsp), %xmm10, %xmm9
vmovaps 0x80(%rsp), %xmm10
vaddps 0xc0(%rax), %xmm9, %xmm9
vmulps 0x10(%rsp), %xmm3, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vmovaps 0xa0(%rsp), %xmm9
vmulps (%rsp), %xmm7, %xmm7
vaddps 0xd0(%rax), %xmm7, %xmm7
vmulps 0x10(%rsp), %xmm6, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vmulps 0x20(%rsp), %xmm12, %xmm7
vmovaps 0x60(%rsp), %xmm12
vaddps %xmm4, %xmm7, %xmm4
vmulps 0x20(%rsp), %xmm13, %xmm7
vmovaps 0x20(%rsp), %xmm13
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm14, %xmm13, %xmm7
vmovaps 0x10(%rsp), %xmm14
vaddps %xmm2, %xmm7, %xmm2
vmovaps 0x100(%rsp), %xmm5
vsubps %xmm4, %xmm5, %xmm5
vsubps %xmm4, %xmm8, %xmm4
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vmovaps 0xf0(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm3
vmovaps 0x120(%rsp), %xmm6
vsubps %xmm2, %xmm6, %xmm6
vmovaps 0x110(%rsp), %xmm15
vsubps %xmm2, %xmm15, %xmm2
vmovaps 0xe0(%rsp), %xmm15
vmulps %xmm5, %xmm15, %xmm5
vmulps %xmm4, %xmm15, %xmm4
vmovaps (%rsp), %xmm15
vmulps %xmm1, %xmm7, %xmm7
vmulps %xmm0, %xmm6, %xmm6
vmulps %xmm1, %xmm3, %xmm1
vmulps %xmm0, %xmm2, %xmm0
vpminsd %xmm1, %xmm7, %xmm2
vpminsd %xmm0, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vpminsd %xmm4, %xmm5, %xmm3
vpmaxsd %xmm4, %xmm5, %xmm4
vmovaps 0x90(%rsp), %xmm5
vpmaxsd %xmm1, %xmm7, %xmm1
vmovaps 0xc0(%rsp), %xmm7
vpmaxsd %xmm0, %xmm6, %xmm0
vmovaps 0xd0(%rsp), %xmm6
vminps %xmm0, %xmm1, %xmm0
vmaxps %xmm3, %xmm11, %xmm1
vmaxps %xmm2, %xmm1, %xmm1
vminps %xmm4, %xmm12, %xmm2
vminps %xmm0, %xmm2, %xmm0
vbroadcastss 0x1d4a3bf(%rip), %xmm2 # 0x1f1ff10
vmulps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1d4a3b6(%rip), %xmm2 # 0x1f1ff14
vmulps %xmm2, %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
jmp 0x1d57ed
pushq $0x4
popq %rdx
jmp 0x1d5832
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1d57e8
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x1d5bde
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x1d5bab
movq %r8, %rcx
jmp 0x1d5832
cmpl $0x6, %edx
jne 0x1d5c93
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x50(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x160(%rsp), %rdi
movq 0x58(%rsp), %rdx
movq %r9, 0x30(%rsp)
callq *0x8(%r8,%rax)
vmovaps (%rsp), %xmm15
vmovaps 0x10(%rsp), %xmm14
vmovaps 0x20(%rsp), %xmm13
leaq 0x1a0(%rsp), %r11
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x70(%rsp), %xmm11
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x90(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0xd0(%rsp), %xmm6
movq 0x40(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x48(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x1d5c93
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x1d56f0
addq $0x948, %rsp # imm = 0x948
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa18, %rsp # imm = 0xA18
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1d5cdb
addq $0xa18, %rsp # imm = 0xA18
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1d5cc9
leaq 0x278(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%r14), %xmm2, %xmm1
vmovaps 0x10(%r14), %xmm3
vbroadcastss 0x1d4b1b2(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d1b2c9(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d169d9(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %xmm6
vbroadcastss 0x4(%r14), %xmm7
vbroadcastss 0x8(%r14), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rcx
xorq $0x10, %rcx
movq %r10, %r13
xorq $0x10, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0x1f7a1b5(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
leaq 0x270(%rsp), %rbp
vmovaps %xmm6, 0x190(%rsp)
vmovaps %xmm7, 0x180(%rsp)
vmovaps %xmm8, 0x170(%rsp)
vmovaps %xmm9, 0x160(%rsp)
vmovaps %xmm10, 0x150(%rsp)
vmovaps %xmm3, 0x140(%rsp)
movq %r13, 0x58(%rsp)
vmovaps %xmm4, 0x130(%rsp)
vmovaps %xmm5, 0x120(%rsp)
cmpq %rbp, %rdi
je 0x1d5cc9
movq -0x8(%rdi), %rbx
addq $-0x8, %rdi
testb $0x8, %bl
jne 0x1d5ed2
vmovaps 0x20(%rbx,%r8), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%rbx,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r10), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r11), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%rbx,%rcx), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbx,%r13), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r12d
testb $0x8, %bl
jne 0x1d5f0a
testq %r12, %r12
je 0x1d5f0e
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rsi
xorl %r15d, %r15d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rsi
jne 0x1d5f14
movq %rax, %rbx
testl %r15d, %r15d
je 0x1d5e42
jmp 0x1d5f52
pushq $0x6
jmp 0x1d5f10
pushq $0x4
popq %r15
jmp 0x1d5eff
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rsi, %rax
je 0x1d5f47
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x1d5f26
movq %rcx, %rbx
movq %r15, %rcx
xorl %r15d, %r15d
jmp 0x1d5eff
cmpl $0x6, %r15d
jne 0x1d6504
movl %ebx, %esi
andl $0xf, %esi
addq $-0x8, %rsi
setne %r15b
je 0x1d6501
andq $-0x10, %rbx
xorl %ebp, %ebp
imulq $0xb0, %rbp, %r13
vmovaps 0x80(%rbx,%r13), %xmm9
vmovaps 0x40(%rbx,%r13), %xmm6
vmulps %xmm6, %xmm9, %xmm0
vmovaps 0x70(%rbx,%r13), %xmm10
vmovaps 0x50(%rbx,%r13), %xmm7
vmulps %xmm7, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps 0x60(%rbx,%r13), %xmm11
vmulps %xmm7, %xmm11, %xmm1
vmovaps (%rbx,%r13), %xmm3
vmovaps 0x10(%rbx,%r13), %xmm13
vmovaps 0x20(%rbx,%r13), %xmm0
vmovaps 0x30(%rbx,%r13), %xmm8
vmulps %xmm8, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm5
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm6, %xmm11, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vbroadcastss (%r14), %xmm12
vsubps %xmm12, %xmm3, %xmm2
vbroadcastss 0x4(%r14), %xmm12
vsubps %xmm12, %xmm13, %xmm3
vbroadcastss 0x8(%r14), %xmm12
vsubps %xmm12, %xmm0, %xmm1
vbroadcastss 0x14(%r14), %xmm12
vbroadcastss 0x18(%r14), %xmm13
vmulps %xmm1, %xmm12, %xmm14
vmulps %xmm3, %xmm13, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x10(%r14), %xmm15
vmulps %xmm2, %xmm13, %xmm0
vmovaps %xmm1, 0x60(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm3, 0x40(%rsp)
vmulps %xmm3, %xmm15, %xmm1
vmovaps %xmm2, 0x70(%rsp)
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm4, 0x20(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vmovaps (%rsp), %xmm13
vmovaps %xmm5, 0x30(%rsp)
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm15, %xmm13, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm14, %xmm11, %xmm10
vaddps %xmm9, %xmm10, %xmm10
vmulps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmovddup 0x1d4aec6(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps %xmm14, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d4adee(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm6
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x1a0(%rsp), %xmm10
jne 0x1d6111
incq %rbp
cmpq %rsi, %rbp
setb %r15b
jne 0x1d5f75
jmp 0x1d64ac
vandps 0x1a0(%rsp), %xmm10, %xmm10
vmovaps 0x60(%rsp), %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm0
vmovaps 0x40(%rsp), %xmm1
vmulps 0x30(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x70(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm4
vtestps %xmm10, %xmm4
je 0x1d60fc
addq %rbx, %r13
vandps %xmm4, %xmm10, %xmm0
vmovaps %xmm7, 0x1b0(%rsp)
vmovaps %xmm8, 0x1c0(%rsp)
vmovaps %xmm3, 0x1d0(%rsp)
vmovaps %xmm6, 0x1e0(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm13, 0x240(%rsp)
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm1, 0x250(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x260(%rsp)
movq (%rdx), %rax
movq %rax, 0x70(%rsp)
vrcpps %xmm6, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vbroadcastss 0x1d16533(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x1d0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x230(%rsp)
vmulps 0x1b0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmulps 0x1c0(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmovmskps %xmm0, %eax
movq %rcx, 0x10(%rsp)
movq %rax, (%rsp)
bsfq %rax, %rcx
movq %rcx, 0x20(%rsp)
movl 0x90(%r13,%rcx,4), %eax
movq 0x70(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x40(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x30(%rsp)
testl %ecx, 0x34(%rax)
je 0x1d6289
movq 0x10(%rdx), %rax
movq %rax, 0x60(%rsp)
cmpq $0x0, 0x10(%rax)
movq 0x10(%rsp), %rcx
jne 0x1d62bb
movq 0x30(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x1d62bb
xorl %eax, %eax
jmp 0x1d62a1
movq 0x20(%rsp), %rax
movq (%rsp), %rcx
btcq %rax, %rcx
movq %rcx, (%rsp)
movb $0x1, %al
movq 0x10(%rsp), %rcx
testb %al, %al
je 0x1d6513
movq (%rsp), %rax
testq %rax, %rax
jne 0x1d622c
jmp 0x1d60fc
movq %rsi, 0x88(%rsp)
movq %r11, 0x90(%rsp)
movq %r10, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movq %r8, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
movq 0x20(%rsp), %rsi
vmovss 0x210(%rsp,%rsi,4), %xmm0
vmovss 0x220(%rsp,%rsi,4), %xmm1
movq %rdx, 0xb8(%rsp)
movq 0x8(%rdx), %rcx
movl 0xa0(%r13,%rsi,4), %edx
vmovss 0x240(%rsp,%rsi,4), %xmm2
vmovss 0x250(%rsp,%rsi,4), %xmm3
vmovss 0x260(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edx, 0xd4(%rsp)
movq 0x40(%rsp), %rax
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x40(%rsp)
vmovss 0x230(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x30(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %r14, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x1d640d
leaq 0xf0(%rsp), %rdi
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d644b
movq 0x60(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1d6447
movq 0x60(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x1d6430
movq 0x30(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1d643a
leaq 0xf0(%rsp), %rdi
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d644b
xorl %eax, %eax
jmp 0x1d646a
vmovss 0x40(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq (%rsp), %rax
movq 0x20(%rsp), %rcx
btcq %rcx, %rax
movq %rax, (%rsp)
movb $0x1, %al
movq 0xb8(%rsp), %rdx
movq 0xb0(%rsp), %rdi
movq 0xa8(%rsp), %r8
movq 0xa0(%rsp), %r9
movq 0x98(%rsp), %r10
movq 0x90(%rsp), %r11
movq 0x10(%rsp), %rcx
movq 0x88(%rsp), %rsi
jmp 0x1d62a1
vmovaps 0x190(%rsp), %xmm6
vmovaps 0x180(%rsp), %xmm7
vmovaps 0x170(%rsp), %xmm8
vmovaps 0x160(%rsp), %xmm9
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x140(%rsp), %xmm3
movq 0x58(%rsp), %r13
vmovaps 0x130(%rsp), %xmm4
vmovaps 0x120(%rsp), %xmm5
leaq 0x270(%rsp), %rbp
xorl %r15d, %r15d
cmpl $0x3, %r15d
jne 0x1d5e31
jmp 0x1d5cc9
testb $0x1, %r15b
vmovaps 0x190(%rsp), %xmm6
vmovaps 0x180(%rsp), %xmm7
vmovaps 0x170(%rsp), %xmm8
vmovaps 0x160(%rsp), %xmm9
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x140(%rsp), %xmm3
movq 0x58(%rsp), %r13
vmovaps 0x130(%rsp), %xmm4
vmovaps 0x120(%rsp), %xmm5
leaq 0x270(%rsp), %rbp
movl $0x0, %r15d
je 0x1d6504
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %r15
jmp 0x1d6504
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa28, %rsp # imm = 0xA28
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1d65af
addq $0xa28, %rsp # imm = 0xA28
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1d659d
movq %rdx, %r8
leaq 0x288(%rsp), %r9
movq 0x70(%rax), %rax
movq %rax, -0x8(%r9)
vmaxss 0xc(%r14), %xmm2, %xmm1
vmovaps 0x10(%r14), %xmm3
vbroadcastss 0x1d4a8db(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d1a9f2(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d16102(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %xmm6
vbroadcastss 0x4(%r14), %xmm7
vbroadcastss 0x8(%r14), %xmm8
xorl %r10d, %r10d
vucomiss %xmm2, %xmm3
setb %r10b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
shll $0x4, %r10d
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
setb %r11b
shll $0x4, %r11d
orq $0x20, %r11
xorl %esi, %esi
vucomiss %xmm2, %xmm5
setb %sil
shll $0x4, %esi
orq $0x40, %rsi
movq %r10, %rdi
xorq $0x10, %rdi
movq %r11, %rbp
xorq $0x10, %rbp
movq %rsi, %rcx
xorq $0x10, %rcx
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0x1f798e0(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x190(%rsp)
leaq 0x280(%rsp), %r15
vmovaps %xmm6, 0x180(%rsp)
vmovaps %xmm7, 0x170(%rsp)
vmovaps %xmm8, 0x160(%rsp)
vmovaps %xmm9, 0x150(%rsp)
vmovaps %xmm10, 0x140(%rsp)
vmovaps %xmm3, 0x130(%rsp)
movq %rsi, 0x40(%rsp)
movq %rdi, 0x38(%rsp)
movq %rbp, 0x30(%rsp)
movq %rcx, 0x28(%rsp)
vmovaps %xmm4, 0x120(%rsp)
vmovaps %xmm5, 0x110(%rsp)
cmpq %r15, %r9
je 0x1d659d
movq -0x8(%r9), %rbx
addq $-0x8, %r9
testb $0x8, %bl
jne 0x1d67b3
vmovaps 0x20(%rbx,%r10), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%rbx,%r11), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%rsi), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%rdi), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%rbx,%rbp), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbx,%rcx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r12d
testb $0x8, %bl
jne 0x1d67eb
testq %r12, %r12
je 0x1d67ef
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %r13d, %r13d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x1d67f5
movq %rax, %rbx
testl %r13d, %r13d
je 0x1d6726
jmp 0x1d6838
pushq $0x6
jmp 0x1d67f1
pushq $0x4
popq %r13
jmp 0x1d67e0
movq %rcx, %r15
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1d6828
movq %rcx, (%r9)
addq $0x8, %r9
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1d6807
movq %rcx, %rbx
movq %r15, %rcx
leaq 0x280(%rsp), %r15
jmp 0x1d67e0
cmpl $0x6, %r13d
jne 0x1d6e6a
movl %ebx, %eax
andl $0xf, %eax
xorl %r13d, %r13d
addq $-0x8, %rax
movq %rax, 0xa8(%rsp)
setne %r15b
je 0x1d6e62
andq $-0x10, %rbx
movq (%r8), %rax
movq %rax, 0x48(%rsp)
xorl %ebp, %ebp
imulq $0x50, %rbp, %rax
movl 0x30(%rbx,%rax), %edx
movq 0x48(%rsp), %rcx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%rbx,%rax), %esi
movl 0x4(%rbx,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x10(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x20(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x34(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm5
movl 0x14(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x24(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x38(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x18(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm7
movl 0x28(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x3c(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm9
movl 0x1c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm10
movl 0x2c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm11
vunpcklps %xmm6, %xmm2, %xmm12 # xmm12 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm9, %xmm5, %xmm13 # xmm13 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm5, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vunpcklps %xmm13, %xmm12, %xmm9 # xmm9 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm5 # xmm5 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm7, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpcklps %xmm7, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
vunpckhps %xmm7, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
vunpcklps %xmm8, %xmm0, %xmm7 # xmm7 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm11, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm7, %xmm3 # xmm3 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
vunpckhps %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[2],xmm8[2],xmm7[3],xmm8[3]
vmovaps 0x30(%rbx,%rax), %xmm7
vmovaps %xmm7, 0x1b0(%rsp)
vmovaps 0x40(%rbx,%rax), %xmm7
vmovaps %xmm7, 0x1a0(%rsp)
vsubps %xmm4, %xmm9, %xmm13
vmovaps %xmm13, 0x10(%rsp)
vsubps %xmm2, %xmm5, %xmm14
vmovaps %xmm14, 0x60(%rsp)
vsubps %xmm1, %xmm6, %xmm8
vsubps %xmm9, %xmm3, %xmm10
vsubps %xmm5, %xmm11, %xmm11
vsubps %xmm6, %xmm0, %xmm12
vmulps %xmm12, %xmm14, %xmm0
vmulps %xmm11, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm7
vmulps %xmm10, %xmm8, %xmm1
vmulps %xmm12, %xmm13, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm11, %xmm13, %xmm2
vmulps %xmm10, %xmm14, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss (%r14), %xmm0
vbroadcastss 0x4(%r14), %xmm13
vbroadcastss 0x8(%r14), %xmm14
vbroadcastss 0x14(%r14), %xmm15
vsubps %xmm0, %xmm9, %xmm2
vbroadcastss 0x18(%r14), %xmm9
vsubps %xmm13, %xmm5, %xmm5
vsubps %xmm14, %xmm6, %xmm6
vmulps %xmm6, %xmm15, %xmm13
vmulps %xmm5, %xmm9, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vbroadcastss 0x10(%r14), %xmm14
vmulps %xmm2, %xmm9, %xmm0
vmulps %xmm6, %xmm14, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmulps %xmm5, %xmm14, %xmm1
vmovaps %xmm2, 0x70(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm3, 0x50(%rsp)
vmulps %xmm3, %xmm9, %xmm2
vmulps %xmm4, %xmm15, %xmm9
vmovaps %xmm4, %xmm15
vaddps %xmm2, %xmm9, %xmm2
vmulps %xmm7, %xmm14, %xmm9
vmovaps %xmm7, %xmm14
vaddps %xmm2, %xmm9, %xmm2
vmulps %xmm1, %xmm12, %xmm9
vmulps %xmm0, %xmm11, %xmm11
vaddps %xmm9, %xmm11, %xmm9
vmulps %xmm13, %xmm10, %xmm10
vaddps %xmm9, %xmm10, %xmm10
vmulps %xmm1, %xmm8, %xmm1
vmulps 0x60(%rsp), %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vmovddup 0x1d4a4e7(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps 0x10(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d4a40e(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm4
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x190(%rsp), %xmm10
jne 0x1d6af6
incq %rbp
cmpq 0xa8(%rsp), %rbp
setb %r15b
jne 0x1d686e
jmp 0x1d6e06
vandps 0x190(%rsp), %xmm10, %xmm10
vmulps 0x50(%rsp), %xmm6, %xmm0
vmulps %xmm5, %xmm15, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x70(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm4, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm4, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm5
vtestps %xmm10, %xmm5
je 0x1d6adc
vandps %xmm5, %xmm10, %xmm0
vmovaps %xmm7, 0x1c0(%rsp)
vmovaps %xmm8, 0x1d0(%rsp)
vmovaps %xmm3, 0x1e0(%rsp)
vmovaps %xmm4, 0x1f0(%rsp)
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm14, 0x250(%rsp)
vmovaps %xmm15, 0x260(%rsp)
vmovaps 0x50(%rsp), %xmm1
vmovaps %xmm1, 0x270(%rsp)
vrcpps %xmm4, %xmm1
vmulps %xmm1, %xmm4, %xmm2
vbroadcastss 0x1d15b6d(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x1e0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x240(%rsp)
vmulps 0x1c0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x220(%rsp)
vmulps 0x1d0(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x230(%rsp)
vmovmskps %xmm0, %edx
bsfq %rdx, %rsi
movl 0x1b0(%rsp,%rsi,4), %eax
movq 0x48(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rdi
movl 0x24(%r14), %ecx
testl %ecx, 0x34(%rdi)
je 0x1d6c27
movq 0x10(%r8), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1d6c3f
cmpq $0x0, 0x48(%rdi)
jne 0x1d6c3f
xorl %eax, %eax
jmp 0x1d6c2d
btcq %rsi, %rdx
movb $0x1, %al
testb %al, %al
je 0x1d6e79
testq %rdx, %rdx
jne 0x1d6bed
jmp 0x1d6adc
movq %rcx, 0x60(%rsp)
movq %rdx, 0x10(%rsp)
movq %r11, 0x98(%rsp)
movq %r10, 0xa0(%rsp)
movq %r9, 0x70(%rsp)
vmovss 0x220(%rsp,%rsi,4), %xmm0
vmovss 0x230(%rsp,%rsi,4), %xmm1
movq %r8, 0x50(%rsp)
movq 0x8(%r8), %rcx
movl 0x1a0(%rsp,%rsi,4), %edx
vmovss 0x250(%rsp,%rsi,4), %xmm2
vmovss 0x260(%rsp,%rsi,4), %xmm3
vmovss 0x270(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xb0(%rsp)
vmovss %xmm3, 0xb4(%rsp)
vmovss %xmm4, 0xb8(%rsp)
vmovss %xmm0, 0xbc(%rsp)
vmovss %xmm1, 0xc0(%rsp)
movl %edx, 0xc4(%rsp)
movl %eax, 0xc8(%rsp)
movl (%rcx), %eax
movl %eax, 0xcc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xd0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x8(%rsp)
movq %rsi, 0x90(%rsp)
vmovss 0x240(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0xc(%rsp)
leaq 0xc(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0x18(%rdi), %rax
movq %rax, 0xe8(%rsp)
movq %rcx, 0xf0(%rsp)
movq %r14, 0xf8(%rsp)
leaq 0xb0(%rsp), %rax
movq %rax, 0x100(%rsp)
movl $0x1, 0x108(%rsp)
movq %rdi, 0x88(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
je 0x1d6d7d
leaq 0xe0(%rsp), %rdi
callq *%rax
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d6dbe
movq 0x60(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1d6dba
movq 0x60(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x1d6da3
movq 0x88(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1d6dad
leaq 0xe0(%rsp), %rdi
callq *%rax
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d6dbe
xorl %eax, %eax
jmp 0x1d6de2
vmovss 0x8(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x10(%rsp), %rax
movq 0x90(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x10(%rsp)
movb $0x1, %al
movq 0x50(%rsp), %r8
movq 0x70(%rsp), %r9
movq 0xa0(%rsp), %r10
movq 0x98(%rsp), %r11
movq 0x10(%rsp), %rdx
jmp 0x1d6c2d
vmovaps 0x180(%rsp), %xmm6
vmovaps 0x170(%rsp), %xmm7
vmovaps 0x160(%rsp), %xmm8
vmovaps 0x150(%rsp), %xmm9
vmovaps 0x140(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm3
movq 0x40(%rsp), %rsi
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rbp
movq 0x28(%rsp), %rcx
vmovaps 0x120(%rsp), %xmm4
vmovaps 0x110(%rsp), %xmm5
leaq 0x280(%rsp), %r15
cmpl $0x3, %r13d
jne 0x1d6715
jmp 0x1d659d
testb $0x1, %r15b
vmovaps 0x180(%rsp), %xmm6
vmovaps 0x170(%rsp), %xmm7
vmovaps 0x160(%rsp), %xmm8
vmovaps 0x150(%rsp), %xmm9
vmovaps 0x140(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm3
movq 0x40(%rsp), %rsi
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %rbp
movq 0x28(%rsp), %rcx
vmovaps 0x120(%rsp), %xmm4
vmovaps 0x110(%rsp), %xmm5
leaq 0x280(%rsp), %r15
je 0x1d6e6a
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %r13
jmp 0x1d6e6a
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMvIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xaa8, %rsp # imm = 0xAA8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d7983
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1d7983
leaq 0x308(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d49f7b(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d1a092(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d157b0(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d49fef(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d48f90(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d48f87(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm5
vmovaps %xmm5, 0x240(%rsp)
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vbroadcastss 0x8(%rsi), %xmm5
vmovaps %xmm5, 0x230(%rsp)
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vmovaps %xmm5, 0x220(%rsp)
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vmovaps %xmm6, 0x210(%rsp)
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmovaps %xmm4, 0x200(%rsp)
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x1f0(%rsp)
shll $0x4, %r9d
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vmovaps %xmm4, 0x1e0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmovaps %xmm3, 0x1d0(%rsp)
setb %r10b
shll $0x4, %r10d
orq $0x20, %r10
xorl %r11d, %r11d
vucomiss %xmm2, %xmm6
setb %r11b
shll $0x4, %r11d
orq $0x40, %r11
movq %r9, %r14
xorq $0x10, %r14
movq %r10, %r15
xorq $0x10, %r15
movq %r11, %rax
xorq $0x10, %rax
movq %rax, 0xb8(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %xmm1, 0x1c0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x1b0(%rsp)
leaq 0x1f78efa(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmovaps %xmm7, 0x90(%rsp)
movq %r14, 0x88(%rsp)
movq %r15, 0x80(%rsp)
leaq 0x300(%rsp), %rax
cmpq %rax, %r8
je 0x1d7983
movq -0x8(%r8), %rbx
addq $-0x8, %r8
testb $0x8, %bl
jne 0x1d718e
vmovaps 0x20(%rbx,%r9), %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps 0x220(%rsp), %xmm0, %xmm0
vmovaps 0x20(%rbx,%r10), %xmm1
vmovaps 0x240(%rsp), %xmm3
vsubps %xmm3, %xmm1, %xmm1
vmulps 0x210(%rsp), %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r11), %xmm1
vmovaps 0x230(%rsp), %xmm4
vsubps %xmm4, %xmm1, %xmm1
vmulps 0x200(%rsp), %xmm1, %xmm1
vmaxps 0x1c0(%rsp), %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r14), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps 0x1f0(%rsp), %xmm1, %xmm1
vmovaps 0x20(%rbx,%r15), %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmulps 0x1e0(%rsp), %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
movq 0xb8(%rsp), %rax
vmovaps 0x20(%rbx,%rax), %xmm2
vsubps %xmm4, %xmm2, %xmm2
vmulps 0x1d0(%rsp), %xmm2, %xmm2
vminps 0x1b0(%rsp), %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %r12d
testb $0x8, %bl
jne 0x1d71c4
testq %r12, %r12
je 0x1d71c8
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdi
xorl %ebp, %ebp
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdi
jne 0x1d71cd
movq %rax, %rbx
testl %ebp, %ebp
je 0x1d70c9
jmp 0x1d7202
pushq $0x6
jmp 0x1d71ca
pushq $0x4
popq %rbp
jmp 0x1d71ba
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdi, %rax
je 0x1d71fd
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x1d71dc
movq %rcx, %rbx
jmp 0x1d71ba
cmpl $0x6, %ebp
jne 0x1d797a
movl %ebx, %edi
andl $0xf, %edi
xorl %ebp, %ebp
addq $-0x8, %rdi
setne %al
je 0x1d797a
andq $-0x10, %rbx
xorl %ecx, %ecx
movb %al, 0xe(%rsp)
movq %rcx, 0xb0(%rsp)
imulq $0xb0, %rcx, %rcx
leaq 0xf(%rsp), %rax
movq %rax, 0x280(%rsp)
vbroadcastss (%rsi), %xmm1
vbroadcastss 0x4(%rsi), %xmm2
vbroadcastss 0x8(%rsi), %xmm3
vmovaps (%rbx,%rcx), %xmm0
vmovaps 0x10(%rbx,%rcx), %xmm4
vmovaps 0x20(%rbx,%rcx), %xmm5
vmovaps 0x30(%rbx,%rcx), %xmm6
vsubps %xmm1, %xmm0, %xmm7
vsubps %xmm2, %xmm4, %xmm14
vsubps %xmm3, %xmm5, %xmm10
vsubps %xmm1, %xmm6, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x40(%rbx,%rcx), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovaps 0x50(%rbx,%rcx), %xmm4
vsubps %xmm3, %xmm4, %xmm6
vmovaps 0x60(%rbx,%rcx), %xmm4
vsubps %xmm1, %xmm4, %xmm5
vmovaps 0x70(%rbx,%rcx), %xmm1
vsubps %xmm2, %xmm1, %xmm2
vmovaps %xmm2, 0x30(%rsp)
movq %rcx, 0x10(%rsp)
vmovaps 0x80(%rbx,%rcx), %xmm1
vsubps %xmm3, %xmm1, %xmm3
vmovaps %xmm3, 0x40(%rsp)
vsubps %xmm7, %xmm5, %xmm13
vsubps %xmm14, %xmm2, %xmm15
vsubps %xmm10, %xmm3, %xmm0
vaddps %xmm2, %xmm14, %xmm1
vaddps %xmm3, %xmm10, %xmm4
vmulps %xmm0, %xmm1, %xmm8
vmulps %xmm4, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm12
vaddps %xmm5, %xmm7, %xmm8
vmulps %xmm4, %xmm13, %xmm4
vmovaps %xmm0, 0x160(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm4, %xmm9, %xmm4
vmovaps %xmm15, 0x170(%rsp)
vmulps %xmm15, %xmm8, %xmm8
vmovaps %xmm13, 0x180(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vsubps %xmm8, %xmm1, %xmm1
vbroadcastss 0x18(%rsi), %xmm0
vmulps %xmm1, %xmm0, %xmm1
vbroadcastss 0x14(%rsi), %xmm13
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vbroadcastss 0x10(%rsi), %xmm11
vmulps %xmm12, %xmm11, %xmm4
vaddps %xmm1, %xmm4, %xmm9
vmovaps 0x20(%rsp), %xmm3
vsubps %xmm3, %xmm14, %xmm15
vsubps %xmm6, %xmm10, %xmm1
vmovaps %xmm14, 0x1a0(%rsp)
vaddps %xmm3, %xmm14, %xmm2
vmovaps %xmm10, 0x190(%rsp)
vaddps %xmm6, %xmm10, %xmm4
vmovaps %xmm6, %xmm10
vmovaps %xmm0, %xmm6
vmulps %xmm1, %xmm2, %xmm12
vmulps %xmm4, %xmm15, %xmm14
vsubps %xmm12, %xmm14, %xmm12
vmovaps 0x50(%rsp), %xmm0
vsubps %xmm0, %xmm7, %xmm14
vmulps %xmm4, %xmm14, %xmm4
vmovaps %xmm7, 0xa0(%rsp)
vaddps %xmm0, %xmm7, %xmm7
vmovaps %xmm1, 0x140(%rsp)
vmulps %xmm1, %xmm7, %xmm8
vsubps %xmm4, %xmm8, %xmm4
vbroadcastss 0x1d49b00(%rip), %xmm8 # 0x1f20ec4
vmovaps %xmm15, 0x150(%rsp)
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm2, %xmm14, %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm1
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vmulps %xmm12, %xmm11, %xmm4
vaddps %xmm1, %xmm4, %xmm4
vsubps %xmm5, %xmm0, %xmm1
vaddps %xmm5, %xmm0, %xmm7
vmovaps 0x30(%rsp), %xmm0
vsubps %xmm0, %xmm3, %xmm12
vaddps %xmm0, %xmm3, %xmm0
vmovaps 0x40(%rsp), %xmm2
vsubps %xmm2, %xmm10, %xmm15
vaddps %xmm2, %xmm10, %xmm2
vmulps %xmm0, %xmm15, %xmm3
vmulps %xmm2, %xmm12, %xmm5
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm2, %xmm1, %xmm2
vmulps %xmm7, %xmm15, %xmm5
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm7, %xmm12, %xmm5
vmovaps 0x90(%rsp), %xmm7
vmulps %xmm0, %xmm1, %xmm0
vsubps %xmm5, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm0
vmovaps %xmm13, 0x50(%rsp)
vmulps %xmm2, %xmm13, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm11, 0x40(%rsp)
vmulps %xmm3, %xmm11, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm4, %xmm9, %xmm2
vaddps %xmm2, %xmm0, %xmm13
vminps %xmm4, %xmm9, %xmm2
vminps %xmm0, %xmm2, %xmm2
vandps %xmm8, %xmm13, %xmm5
vbroadcastss 0x1d49a4f(%rip), %xmm3 # 0x1f20ecc
vmovaps %xmm5, 0x20(%rsp)
vmulps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d49a30(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm3, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm9, 0x30(%rsp)
vmaxps %xmm4, %xmm9, %xmm5
vmaxps %xmm0, %xmm5, %xmm0
vcmpleps %xmm3, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vtestps 0x120(%rsp), %xmm0
je 0x1d7930
vmovaps %xmm6, %xmm10
vmovaps 0x160(%rsp), %xmm7
vmovaps 0x150(%rsp), %xmm11
vmovaps %xmm0, 0x130(%rsp)
vmulps %xmm7, %xmm11, %xmm0
vmovaps 0x170(%rsp), %xmm9
vmovaps 0x140(%rsp), %xmm6
vmulps %xmm6, %xmm9, %xmm2
vsubps %xmm0, %xmm2, %xmm2
vmulps %xmm6, %xmm12, %xmm3
vmulps %xmm15, %xmm11, %xmm5
vsubps %xmm3, %xmm5, %xmm5
vandps %xmm0, %xmm8, %xmm0
vandps %xmm3, %xmm8, %xmm3
vcmpltps %xmm3, %xmm0, %xmm0
vblendvps %xmm0, %xmm2, %xmm5, %xmm0
vmulps %xmm15, %xmm14, %xmm2
vmulps %xmm7, %xmm14, %xmm3
vmovaps 0x180(%rsp), %xmm7
vmulps %xmm7, %xmm6, %xmm5
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm5, %xmm8, %xmm5
vandps %xmm2, %xmm8, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps %xmm1, %xmm11, %xmm1
vmulps %xmm7, %xmm11, %xmm3
vmulps %xmm9, %xmm14, %xmm5
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm5, %xmm8, %xmm5
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm1, %xmm5, %xmm1
vblendvps %xmm1, %xmm3, %xmm6, %xmm1
vmulps %xmm1, %xmm10, %xmm3
vmulps 0x50(%rsp), %xmm2, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmulps 0x40(%rsp), %xmm0, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x190(%rsp), %xmm1, %xmm3
vmulps 0x1a0(%rsp), %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vmulps 0xa0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1d15141(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovaps 0x90(%rsp), %xmm7
vcmpneqps 0x1d14401(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x130(%rsp), %xmm5
vandps 0x120(%rsp), %xmm5, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x1d7930
addq %rbx, 0x10(%rsp)
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x30(%rsp), %xmm6
vmovaps %xmm6, 0x250(%rsp)
vmovaps %xmm4, 0x260(%rsp)
vmovaps %xmm13, 0x270(%rsp)
movq %rax, 0x280(%rsp)
vmovaps %xmm5, 0x290(%rsp)
vmovaps %xmm3, 0x2c0(%rsp)
vmovaps %xmm0, 0x2d0(%rsp)
vmovaps %xmm2, 0x2e0(%rsp)
vmovaps %xmm1, 0x2f0(%rsp)
movq (%rdx), %rax
movq %rax, 0x50(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1d15061(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d19920(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x20(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x2a0(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x2b0(%rsp)
vmovmskps %xmm5, %r15d
bsfq %r15, %r13
movq 0x10(%rsp), %rax
movl 0x90(%rax,%r13,4), %eax
movq 0x50(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rbp
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbp)
je 0x1d773c
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x1d7756
cmpq $0x0, 0x48(%rbp)
jne 0x1d7756
xorl %eax, %eax
jmp 0x1d7742
btcq %r13, %r15
movb $0x1, %al
xorl %ebp, %ebp
testb %al, %al
je 0x1d7959
testq %r15, %r15
jne 0x1d76fd
jmp 0x1d7930
movq %rdi, 0x40(%rsp)
vmovss 0x2a0(%rsp,%r13,4), %xmm0
vmovss 0x2b0(%rsp,%r13,4), %xmm1
movq 0x8(%rdx), %rcx
movq 0x10(%rsp), %rdi
movl 0xa0(%rdi,%r13,4), %edi
vmovss 0x2d0(%rsp,%r13,4), %xmm2
vmovss 0x2e0(%rsp,%r13,4), %xmm3
vmovss 0x2f0(%rsp,%r13,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edi, 0xd4(%rsp)
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x30(%rsp)
vmovss 0x2c0(%rsp,%r13,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rbp), %rax
testq %rax, %rax
movq %r8, 0x78(%rsp)
movq %r9, 0x70(%rsp)
movq %r10, 0x68(%rsp)
movq %r11, 0x60(%rsp)
je 0x1d78ba
leaq 0xf0(%rsp), %rdi
movq %rdx, 0x20(%rsp)
movq %rsi, 0xa0(%rsp)
callq *%rax
movq 0x60(%rsp), %r11
movq 0x68(%rsp), %r10
movq 0x70(%rsp), %r9
vmovaps 0x90(%rsp), %xmm7
movq 0x78(%rsp), %r8
movq 0xa0(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d7913
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1d790f
testb $0x2, (%r14)
jne 0x1d78cf
testb $0x40, 0x3e(%rbp)
je 0x1d7902
leaq 0xf0(%rsp), %rdi
movq %rdx, %r14
movq %rsi, %rbp
callq *%rax
movq 0x60(%rsp), %r11
movq 0x68(%rsp), %r10
movq 0x70(%rsp), %r9
vmovaps 0x90(%rsp), %xmm7
movq 0x78(%rsp), %r8
movq %rbp, %rsi
movq %r14, %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d7913
xorl %eax, %eax
jmp 0x1d7924
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r13, %r15
movb $0x1, %al
xorl %ebp, %ebp
movq 0x40(%rsp), %rdi
jmp 0x1d7744
movq 0xb0(%rsp), %rcx
incq %rcx
cmpq %rdi, %rcx
setb %al
jne 0x1d7225
movq 0x88(%rsp), %r14
movq 0x80(%rsp), %r15
jmp 0x1d797a
testb $0x1, 0xe(%rsp)
movq 0x88(%rsp), %r14
movq 0x80(%rsp), %r15
je 0x1d797a
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
cmpl $0x3, %ebp
jne 0x1d70b0
addq $0xaa8, %rsp # imm = 0xAA8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xaf8, %rsp # imm = 0xAF8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1d853e
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1d853e
movq %rdx, %r8
leaq 0x358(%rsp), %r10
movq 0x70(%rax), %rax
movq %rax, -0x8(%r10)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d494d6(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d195ed(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d14d0b(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d4954a(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d484eb(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d484e2(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm5
vmovaps %xmm5, 0x270(%rsp)
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
vbroadcastss 0x8(%rsi), %xmm5
vmovaps %xmm5, 0x260(%rsp)
setb %r11b
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vmovaps %xmm5, 0x250(%rsp)
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vmovaps %xmm6, 0x240(%rsp)
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmovaps %xmm4, 0x230(%rsp)
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x220(%rsp)
shll $0x4, %r11d
xorl %edi, %edi
vucomiss %xmm2, %xmm5
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vmovaps %xmm4, 0x210(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmovaps %xmm3, 0x200(%rsp)
setb %dil
shll $0x4, %edi
orq $0x20, %rdi
xorl %r9d, %r9d
vucomiss %xmm2, %xmm6
setb %r9b
shll $0x4, %r9d
orq $0x40, %r9
movq %r11, %r14
xorq $0x10, %r14
movq %rdi, %r15
xorq $0x10, %r15
movq %r9, %r13
xorq $0x10, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %xmm1, 0x1f0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x1e0(%rsp)
leaq 0x1f7845f(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm7, 0x90(%rsp)
movq %rdi, 0x88(%rsp)
movq %r9, 0x80(%rsp)
movq %r14, 0x78(%rsp)
movq %r15, 0x70(%rsp)
movq %r13, 0x68(%rsp)
leaq 0x350(%rsp), %rax
cmpq %rax, %r10
je 0x1d853e
movq -0x8(%r10), %rbx
addq $-0x8, %r10
testb $0x8, %bl
jne 0x1d7c30
vmovaps 0x20(%rbx,%r11), %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps 0x250(%rsp), %xmm0, %xmm0
vmovaps 0x20(%rbx,%rdi), %xmm1
vmovaps 0x270(%rsp), %xmm3
vsubps %xmm3, %xmm1, %xmm1
vmulps 0x240(%rsp), %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r9), %xmm1
vmovaps 0x260(%rsp), %xmm4
vsubps %xmm4, %xmm1, %xmm1
vmulps 0x230(%rsp), %xmm1, %xmm1
vmaxps 0x1f0(%rsp), %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r14), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps 0x220(%rsp), %xmm1, %xmm1
vmovaps 0x20(%rbx,%r15), %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmulps 0x210(%rsp), %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbx,%r13), %xmm2
vsubps %xmm4, %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vminps 0x1e0(%rsp), %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %r12d
testb $0x8, %bl
jne 0x1d7c66
testq %r12, %r12
je 0x1d7c6a
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %ebp, %ebp
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x1d7c6f
movq %rax, %rbx
testl %ebp, %ebp
je 0x1d7b73
jmp 0x1d7ca4
pushq $0x6
jmp 0x1d7c6c
pushq $0x4
popq %rbp
jmp 0x1d7c5c
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1d7c9f
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1d7c7e
movq %rcx, %rbx
jmp 0x1d7c5c
cmpl $0x6, %ebp
jne 0x1d8535
movl %ebx, %eax
andl $0xf, %eax
xorl %ebp, %ebp
addq $-0x8, %rax
setne %cl
je 0x1d8535
andq $-0x10, %rbx
movq (%r8), %rdi
xorl %edx, %edx
movq %rax, 0xa0(%rsp)
movq %rdi, 0x20(%rsp)
movb %cl, 0xe(%rsp)
movq %rdx, 0xa8(%rsp)
imulq $0x50, %rdx, %rax
movl 0x30(%rbx,%rax), %edx
movq 0x228(%rdi), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%rbx,%rax), %r9d
movl 0x4(%rbx,%rax), %edi
vmovups (%rdx,%r9,4), %xmm2
movl 0x10(%rbx,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm1
movl 0x20(%rbx,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm0
movl 0x34(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm5
movl 0x14(%rbx,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm4
movl 0x24(%rbx,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm3
movl 0x38(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%rbx,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm6
movl 0x18(%rbx,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm7
movl 0x28(%rbx,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm8
movq 0x20(%rsp), %rdi
movl 0x3c(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm9
movl 0x1c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm10
movl 0x2c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm11
movb 0xe(%rsp), %dl
vunpcklps %xmm6, %xmm2, %xmm12 # xmm12 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm9, %xmm5, %xmm6 # xmm6 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vmovaps %xmm2, 0x10(%rsp)
vunpcklps %xmm6, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
vunpckhps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm6[2],xmm12[3],xmm6[3]
vunpcklps %xmm7, %xmm1, %xmm9 # xmm9 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm4, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpcklps %xmm7, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
vunpckhps %xmm7, %xmm9, %xmm15 # xmm15 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
vunpcklps %xmm8, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm11, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vunpcklps %xmm3, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
vunpckhps %xmm8, %xmm9, %xmm8 # xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
vmovaps 0x30(%rbx,%rax), %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps 0x40(%rbx,%rax), %xmm0
vmovaps %xmm0, 0x280(%rsp)
vbroadcastss (%rsi), %xmm9
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm9, %xmm5, %xmm7
vsubps %xmm13, %xmm6, %xmm3
vmovaps 0x10(%rsp), %xmm0
vsubps %xmm14, %xmm0, %xmm1
vsubps %xmm9, %xmm10, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vsubps %xmm13, %xmm15, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vsubps %xmm14, %xmm2, %xmm10
vsubps %xmm9, %xmm12, %xmm12
vsubps %xmm13, %xmm8, %xmm11
vsubps %xmm14, %xmm4, %xmm2
vmovaps %xmm2, 0x40(%rsp)
vsubps %xmm7, %xmm12, %xmm14
vsubps %xmm3, %xmm11, %xmm15
vsubps %xmm1, %xmm2, %xmm0
vaddps %xmm3, %xmm11, %xmm4
vaddps %xmm1, %xmm2, %xmm5
vmulps %xmm4, %xmm0, %xmm8
vmulps %xmm5, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm7, %xmm12, %xmm8
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm0, 0x170(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm5
vmovaps %xmm15, 0x180(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x190(%rsp)
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vbroadcastss 0x18(%rsi), %xmm6
vmulps %xmm4, %xmm6, %xmm4
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vbroadcastss 0x10(%rsi), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vmulps %xmm0, %xmm13, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x30(%rsp), %xmm2
vsubps %xmm2, %xmm3, %xmm0
vmovaps %xmm10, %xmm8
vsubps %xmm10, %xmm1, %xmm10
vmovaps %xmm3, 0x1b0(%rsp)
vaddps %xmm2, %xmm3, %xmm4
vmovaps %xmm1, 0x1a0(%rsp)
vaddps %xmm1, %xmm8, %xmm5
vmovaps %xmm8, %xmm1
vmulps %xmm4, %xmm10, %xmm14
vmulps %xmm5, %xmm0, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmovaps 0x10(%rsp), %xmm14
vsubps %xmm14, %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vmovaps %xmm7, 0x1c0(%rsp)
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm7, %xmm10, %xmm8
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm0, 0x160(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vmovaps %xmm13, 0x150(%rsp)
vmulps %xmm4, %xmm13, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmovaps 0x1d0(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm3
vaddps %xmm4, %xmm3, %xmm0
vsubps %xmm12, %xmm14, %xmm5
vaddps %xmm12, %xmm14, %xmm3
vsubps %xmm11, %xmm2, %xmm8
vaddps %xmm2, %xmm11, %xmm2
vmovaps %xmm0, %xmm12
vmovaps 0x40(%rsp), %xmm0
vsubps %xmm0, %xmm1, %xmm4
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm2, %xmm4, %xmm1
vmulps %xmm0, %xmm8, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm3, %xmm4, %xmm7
vsubps %xmm0, %xmm7, %xmm0
vmovaps 0x90(%rsp), %xmm7
vmulps %xmm3, %xmm8, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm6, 0x10(%rsp)
vmulps %xmm2, %xmm6, %xmm2
vmovaps %xmm15, 0x40(%rsp)
vmulps %xmm0, %xmm15, %xmm0
vmovaps %xmm13, %xmm15
vaddps %xmm2, %xmm0, %xmm0
vmulps %xmm1, %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm12, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm13
vminps %xmm12, %xmm9, %xmm1
vminps %xmm0, %xmm1, %xmm1
vbroadcastss 0x1d48eab(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm13, %xmm3
vbroadcastss 0x1d48ea6(%rip), %xmm2 # 0x1f20ecc
vmovaps %xmm3, 0x140(%rsp)
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d48e84(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm2, %xmm3
vcmpnltps %xmm3, %xmm1, %xmm1
vmovaps %xmm9, 0x30(%rsp)
vmovaps %xmm12, 0x130(%rsp)
vmaxps %xmm12, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm2, %xmm0, %xmm0
vorps %xmm0, %xmm1, %xmm0
movb $0x0, 0xf(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x2d0(%rsp)
vtestps 0x110(%rsp), %xmm0
je 0x1d84c7
vmovaps 0x170(%rsp), %xmm6
vmovaps 0x160(%rsp), %xmm12
vmovaps %xmm0, 0x120(%rsp)
vmulps %xmm6, %xmm12, %xmm0
vmovaps 0x180(%rsp), %xmm9
vmulps %xmm10, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm4, %xmm12, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d48df5(%rip), %xmm11 # 0x1f20ec4
vandps %xmm0, %xmm11, %xmm0
vandps %xmm2, %xmm11, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm0
vmovaps 0x150(%rsp), %xmm14
vmulps %xmm4, %xmm14, %xmm1
vmulps %xmm6, %xmm14, %xmm2
vmovaps 0x190(%rsp), %xmm7
vmulps %xmm7, %xmm10, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm5, %xmm10, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm3, %xmm11, %xmm3
vandps %xmm1, %xmm11, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmulps %xmm5, %xmm12, %xmm2
vmulps %xmm7, %xmm12, %xmm3
vmulps %xmm14, %xmm9, %xmm5
vmulps %xmm8, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm5, %xmm11, %xmm5
vandps %xmm2, %xmm11, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps 0x10(%rsp), %xmm2, %xmm3
vmulps 0x40(%rsp), %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vmulps %xmm0, %xmm15, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x1a0(%rsp), %xmm2, %xmm3
vmulps 0x1b0(%rsp), %xmm1, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps 0x1c0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1d14574(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovaps 0x90(%rsp), %xmm7
vcmpneqps 0x1d13834(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x120(%rsp), %xmm4
vandps 0x110(%rsp), %xmm4, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x1d84c7
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x30(%rsp), %xmm6
vmovaps %xmm6, 0x2a0(%rsp)
vmovaps 0x130(%rsp), %xmm11
vmovaps %xmm11, 0x2b0(%rsp)
vmovaps %xmm13, 0x2c0(%rsp)
movq %rax, 0x2d0(%rsp)
vmovaps %xmm5, 0x2e0(%rsp)
vmovaps %xmm3, 0x310(%rsp)
vmovaps %xmm0, 0x320(%rsp)
vmovaps %xmm1, 0x330(%rsp)
vmovaps %xmm2, 0x340(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1d14498(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d18d57(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x140(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x2f0(%rsp)
vmulps %xmm0, %xmm11, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x300(%rsp)
vmovmskps %xmm5, %r13d
bsfq %r13, %rbp
movl 0x290(%rsp,%rbp,4), %eax
movq 0x1e8(%rdi), %rcx
movq (%rcx,%rax,8), %r15
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r15)
je 0x1d82fe
movq 0x10(%r8), %r14
cmpq $0x0, 0x10(%r14)
jne 0x1d8318
cmpq $0x0, 0x48(%r15)
jne 0x1d8318
xorl %eax, %eax
jmp 0x1d8304
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
testb %al, %al
je 0x1d8507
testq %r13, %r13
jne 0x1d82c9
jmp 0x1d84c7
vmovss 0x2f0(%rsp,%rbp,4), %xmm0
vmovss 0x300(%rsp,%rbp,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x280(%rsp,%rbp,4), %edx
vmovss 0x320(%rsp,%rbp,4), %xmm2
vmovss 0x330(%rsp,%rbp,4), %xmm3
vmovss 0x340(%rsp,%rbp,4), %xmm4
vmovss %xmm2, 0xb0(%rsp)
vmovss %xmm3, 0xb4(%rsp)
vmovss %xmm4, 0xb8(%rsp)
vmovss %xmm0, 0xbc(%rsp)
vmovss %xmm1, 0xc0(%rsp)
movl %edx, 0xc4(%rsp)
movl %eax, 0xc8(%rsp)
movl (%rcx), %eax
movl %eax, 0xcc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xd0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x10(%rsp)
vmovss 0x310(%rsp,%rbp,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0xe8(%rsp)
movq %rcx, 0xf0(%rsp)
movq %rsi, 0xf8(%rsp)
leaq 0xb0(%rsp), %rax
movq %rax, 0x100(%rsp)
movl $0x1, 0x108(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
movq %r10, 0x60(%rsp)
movq %r11, 0x58(%rsp)
je 0x1d8456
leaq 0xe0(%rsp), %rdi
movq %r8, 0x40(%rsp)
movq %rsi, 0x30(%rsp)
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x58(%rsp), %r11
vmovaps 0x90(%rsp), %xmm7
movq 0x60(%rsp), %r10
movq 0x30(%rsp), %rsi
movq 0x40(%rsp), %r8
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d84ab
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1d84a7
testb $0x2, (%r14)
jne 0x1d846c
testb $0x40, 0x3e(%r15)
je 0x1d849a
leaq 0xe0(%rsp), %rdi
movq %r8, %r14
movq %rsi, %r15
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x58(%rsp), %r11
vmovaps 0x90(%rsp), %xmm7
movq 0x60(%rsp), %r10
movq %r15, %rsi
movq %r14, %r8
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d84ab
xorl %eax, %eax
jmp 0x1d84bc
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
movb 0xe(%rsp), %dl
jmp 0x1d8306
movq 0xa8(%rsp), %rdx
incq %rdx
movq 0xa0(%rsp), %rax
cmpq %rax, %rdx
setb %cl
jne 0x1d7cd7
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %r14
movq 0x70(%rsp), %r15
movq 0x68(%rsp), %r13
jmp 0x1d8535
testb $0x1, %dl
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %r14
movq 0x70(%rsp), %r15
movq 0x68(%rsp), %r13
je 0x1d8535
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
cmpl $0x3, %ebp
jne 0x1d7b5a
addq $0xaf8, %rsp # imm = 0xAF8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMvMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa18, %rsp # imm = 0xA18
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1d857d
addq $0xa18, %rsp # imm = 0xA18
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1d856b
leaq 0x278(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%r14), %xmm2, %xmm1
vmovaps 0x10(%r14), %xmm3
vbroadcastss 0x1d48910(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d18a27(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d14137(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %xmm6
vbroadcastss 0x4(%r14), %xmm7
vbroadcastss 0x8(%r14), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vshufps $0xaa, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[2,2,2,2]
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %r15
xorq $0x10, %r15
movq %r10, %r13
xorq $0x10, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,0,0,0]
leaq 0x1f77913(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
leaq 0x270(%rsp), %rbp
vmovaps %xmm6, 0x190(%rsp)
vmovaps %xmm7, 0x180(%rsp)
vmovaps %xmm8, 0x170(%rsp)
vmovaps %xmm9, 0x160(%rsp)
vmovaps %xmm10, 0x150(%rsp)
vmovaps %xmm11, 0x140(%rsp)
movq %r15, 0x68(%rsp)
movq %r13, 0x60(%rsp)
vmovaps %xmm5, 0x130(%rsp)
vmovaps %xmm12, 0x120(%rsp)
cmpq %rbp, %rdi
je 0x1d856b
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1d87c9
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%r14), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm5, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vminps %xmm2, %xmm12, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%r13), %xmm0, %xmm4
vaddps 0x20(%rax,%r13), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1d8811
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %ebx
testb $0x8, %r12b
jne 0x1d880d
testq %rbx, %rbx
je 0x1d8836
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rsi
xorl %ecx, %ecx
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rsi
jne 0x1d883b
movq %rax, %r12
testl %ecx, %ecx
je 0x1d86e9
jmp 0x1d8880
pushq $0x6
jmp 0x1d8838
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1d87c0
pushq $0x4
popq %rcx
jmp 0x1d8803
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rsi, %rax
je 0x1d8879
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x1d884a
movq %rcx, %r12
xorl %ecx, %ecx
jmp 0x1d8803
cmpl $0x6, %ecx
jne 0x1d8e97
movl %r12d, %esi
andl $0xf, %esi
xorl %ecx, %ecx
addq $-0x8, %rsi
setne %bpl
je 0x1d8e8f
andq $-0x10, %r12
xorl %r15d, %r15d
imulq $0x140, %r15, %r13 # imm = 0x140
vbroadcastss 0x1c(%r14), %xmm0
vmulps 0x90(%r12,%r13), %xmm0, %xmm1
vaddps (%r12,%r13), %xmm1, %xmm7
vmulps 0xa0(%r12,%r13), %xmm0, %xmm1
vaddps 0x10(%r12,%r13), %xmm1, %xmm5
vmulps 0xb0(%r12,%r13), %xmm0, %xmm1
vaddps 0x20(%r12,%r13), %xmm1, %xmm6
vmulps 0xc0(%r12,%r13), %xmm0, %xmm1
vaddps 0x30(%r12,%r13), %xmm1, %xmm1
vmulps 0xd0(%r12,%r13), %xmm0, %xmm2
vaddps 0x40(%r12,%r13), %xmm2, %xmm2
vmulps 0xe0(%r12,%r13), %xmm0, %xmm4
vaddps 0x50(%r12,%r13), %xmm4, %xmm8
vmulps 0xf0(%r12,%r13), %xmm0, %xmm4
vaddps 0x60(%r12,%r13), %xmm4, %xmm9
vmulps 0x100(%r12,%r13), %xmm0, %xmm4
vaddps 0x70(%r12,%r13), %xmm4, %xmm10
vmulps 0x110(%r12,%r13), %xmm0, %xmm0
vaddps 0x80(%r12,%r13), %xmm0, %xmm0
vsubps %xmm1, %xmm7, %xmm3
vmovaps %xmm3, (%rsp)
vsubps %xmm2, %xmm5, %xmm12
vmovaps %xmm12, 0x40(%rsp)
vsubps %xmm8, %xmm6, %xmm8
vsubps %xmm7, %xmm9, %xmm9
vsubps %xmm5, %xmm10, %xmm10
vsubps %xmm6, %xmm0, %xmm11
vmulps %xmm11, %xmm12, %xmm0
vmulps %xmm10, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmulps %xmm9, %xmm8, %xmm1
vmulps %xmm3, %xmm11, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm3, %xmm10, %xmm2
vmulps %xmm9, %xmm12, %xmm12
vsubps %xmm2, %xmm12, %xmm3
vbroadcastss (%r14), %xmm12
vbroadcastss 0x4(%r14), %xmm13
vbroadcastss 0x8(%r14), %xmm14
vbroadcastss 0x14(%r14), %xmm15
vsubps %xmm12, %xmm7, %xmm2
vbroadcastss 0x18(%r14), %xmm12
vsubps %xmm13, %xmm5, %xmm5
vsubps %xmm14, %xmm6, %xmm6
vmulps %xmm6, %xmm15, %xmm13
vmulps %xmm5, %xmm12, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vbroadcastss 0x10(%r14), %xmm14
vmulps %xmm2, %xmm12, %xmm0
vmulps %xmm6, %xmm14, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmulps %xmm5, %xmm14, %xmm1
vmovaps %xmm2, 0x20(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm3, 0x70(%rsp)
vmulps %xmm3, %xmm12, %xmm2
vmulps %xmm4, %xmm15, %xmm12
vmovaps %xmm4, %xmm15
vaddps %xmm2, %xmm12, %xmm2
vmovaps 0x30(%rsp), %xmm3
vmulps %xmm3, %xmm14, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm11, %xmm11
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm13, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm10
vmulps %xmm1, %xmm8, %xmm1
vmulps 0x40(%rsp), %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vmovddup 0x1d4851a(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps (%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d48442(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm4
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x1a0(%rsp), %xmm10
jne 0x1d8abd
incq %r15
cmpq %rsi, %r15
setb %bpl
jne 0x1d88a6
jmp 0x1d8e3d
vmovaps %xmm3, %xmm14
vandps 0x1a0(%rsp), %xmm10, %xmm10
vmulps 0x70(%rsp), %xmm6, %xmm0
vmulps %xmm5, %xmm15, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x20(%rsp), %xmm3, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm4, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm4, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm5
vtestps %xmm10, %xmm5
je 0x1d8aa8
addq %r12, %r13
vandps %xmm5, %xmm10, %xmm0
vmovaps %xmm7, 0x1b0(%rsp)
vmovaps %xmm8, 0x1c0(%rsp)
vmovaps %xmm3, 0x1d0(%rsp)
vmovaps %xmm4, 0x1e0(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm14, 0x240(%rsp)
vmovaps %xmm15, 0x250(%rsp)
vmovaps 0x70(%rsp), %xmm1
vmovaps %xmm1, 0x260(%rsp)
movq (%rdx), %rax
movq %rax, 0x70(%rsp)
vrcpps %xmm4, %xmm1
vmulps %xmm1, %xmm4, %xmm2
vbroadcastss 0x1d13b97(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x1d0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x230(%rsp)
vmulps 0x1b0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmulps 0x1c0(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmovmskps %xmm0, %eax
movq %rax, (%rsp)
bsfq %rax, %rcx
movq %rcx, 0x30(%rsp)
movl 0x120(%r13,%rcx,4), %eax
movq 0x70(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x20(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x40(%rsp)
testl %ecx, 0x34(%rax)
je 0x1d8c20
movq 0x10(%rdx), %rax
movq %rax, 0x58(%rsp)
cmpq $0x0, 0x10(%rax)
movl $0x0, %ecx
jne 0x1d8c4f
movq 0x40(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x1d8c4f
xorl %eax, %eax
jmp 0x1d8c35
movq 0x30(%rsp), %rax
movq (%rsp), %rcx
btcq %rax, %rcx
movq %rcx, (%rsp)
movb $0x1, %al
xorl %ecx, %ecx
testb %al, %al
je 0x1d8ea5
movq (%rsp), %rax
testq %rax, %rax
jne 0x1d8bc3
jmp 0x1d8aa8
movq %rsi, 0x88(%rsp)
movq %r11, 0x90(%rsp)
movq %r10, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movq %r8, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
movq 0x30(%rsp), %rsi
vmovss 0x210(%rsp,%rsi,4), %xmm0
vmovss 0x220(%rsp,%rsi,4), %xmm1
movq %rdx, 0xb8(%rsp)
movq 0x8(%rdx), %rcx
movl 0x130(%r13,%rsi,4), %edx
vmovss 0x240(%rsp,%rsi,4), %xmm2
vmovss 0x250(%rsp,%rsi,4), %xmm3
vmovss 0x260(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edx, 0xd4(%rsp)
movq 0x20(%rsp), %rax
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x230(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x40(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %r14, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x1d8da1
leaq 0xf0(%rsp), %rdi
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d8ddf
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1d8ddb
movq 0x58(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x1d8dc4
movq 0x40(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1d8dce
leaq 0xf0(%rsp), %rdi
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d8ddf
xorl %eax, %eax
jmp 0x1d8dfe
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq (%rsp), %rax
movq 0x30(%rsp), %rcx
btcq %rcx, %rax
movq %rax, (%rsp)
movb $0x1, %al
movq 0xb8(%rsp), %rdx
movq 0xb0(%rsp), %rdi
movq 0xa8(%rsp), %r8
movq 0xa0(%rsp), %r9
movq 0x98(%rsp), %r10
movq 0x90(%rsp), %r11
xorl %ecx, %ecx
movq 0x88(%rsp), %rsi
jmp 0x1d8c35
vmovaps 0x190(%rsp), %xmm6
vmovaps 0x180(%rsp), %xmm7
vmovaps 0x170(%rsp), %xmm8
vmovaps 0x160(%rsp), %xmm9
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x140(%rsp), %xmm11
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
vmovaps 0x130(%rsp), %xmm5
vmovaps 0x120(%rsp), %xmm12
leaq 0x270(%rsp), %rbp
cmpl $0x3, %ecx
jne 0x1d86d8
jmp 0x1d856b
testb $0x1, %bpl
vmovaps 0x190(%rsp), %xmm6
vmovaps 0x180(%rsp), %xmm7
vmovaps 0x170(%rsp), %xmm8
vmovaps 0x160(%rsp), %xmm9
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x140(%rsp), %xmm11
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
vmovaps 0x130(%rsp), %xmm5
vmovaps 0x120(%rsp), %xmm12
leaq 0x270(%rsp), %rbp
je 0x1d8e97
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rcx
jmp 0x1d8e97
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa78, %rsp # imm = 0xA78
movq %rdx, 0x8(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1d8f44
addq $0xa78, %rsp # imm = 0xA78
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x1d8f32
leaq 0x2d8(%rsp), %rdi
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm5, %xmm1
vbroadcastss 0x1d47f52(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d18069(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq %rax, -0x8(%rdi)
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d13775(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
vbroadcastss 0x8(%rsi), %xmm8
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[0,0,0,0]
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm10 # xmm10 = xmm2[1,1,1,1]
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
vshufps $0xaa, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[2,2,2,2]
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rbx
xorq $0x10, %rbx
movq %r10, %r14
xorq $0x10, %r14
vshufps $0x0, %xmm1, %xmm1, %xmm12 # xmm12 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm13 # xmm13 = xmm0[0,0,0,0]
leaq 0x1f76f51(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
leaq 0x2d0(%rsp), %r15
movq %rsi, 0x10(%rsp)
vmovaps %xmm6, 0x1b0(%rsp)
vmovaps %xmm7, 0x1a0(%rsp)
movq %r8, 0x80(%rsp)
vmovaps %xmm8, 0x190(%rsp)
vmovaps %xmm9, 0x180(%rsp)
vmovaps %xmm10, 0x170(%rsp)
movq %r9, 0x78(%rsp)
movq %r10, 0x70(%rsp)
vmovaps %xmm11, 0x160(%rsp)
movq %r11, 0x68(%rsp)
movq %rbx, 0x60(%rsp)
movq %r14, 0x58(%rsp)
vmovaps %xmm12, 0x150(%rsp)
vmovaps %xmm13, 0x140(%rsp)
cmpq %r15, %rdi
je 0x1d8f32
movq -0x8(%rdi), %r13
addq $-0x8, %rdi
testb $0x8, %r13b
jne 0x1d91a5
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%rbx), %xmm0, %xmm3
vminps %xmm2, %xmm13, %xmm2
vaddps 0x20(%rax,%rbx), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%r14), %xmm0, %xmm4
vaddps 0x20(%rax,%r14), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1d91ef
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r12d
testb $0x8, %r13b
jne 0x1d91eb
testq %r12, %r12
je 0x1d9214
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %ebp, %ebp
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rdx
jne 0x1d9219
movq %rax, %r13
testl %ebp, %ebp
je 0x1d90c7
jmp 0x1d925d
pushq $0x6
jmp 0x1d9216
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1d919c
pushq $0x4
popq %rbp
jmp 0x1d91e1
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x1d9258
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1d9228
movq %rcx, %r13
jmp 0x1d91e1
cmpl $0x6, %ebp
jne 0x1d9abf
movl %r13d, %eax
andl $0xf, %eax
xorl %ebp, %ebp
addq $-0x8, %rax
movq %rax, 0xd0(%rsp)
setne %cl
je 0x1d9abf
movq %r12, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
andq $-0x10, %r13
movq 0x8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x98(%rsp)
xorl %eax, %eax
movb %cl, 0x7(%rsp)
movq %rax, 0xd8(%rsp)
imulq $0x50, %rax, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x30(%r13,%rax), %ecx
movq 0x98(%rsp), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rcx
vmovss 0x28(%rcx), %xmm1
vmovss 0x2c(%rcx), %xmm2
vmovss 0x30(%rcx), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm6
vroundss $0x9, %xmm6, %xmm6, %xmm0
vaddss 0x1d176c9(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm0, %xmm0
vmaxss %xmm0, %xmm5, %xmm2
vcvttss2si %xmm2, %edx
movslq %edx, %rdx
movq 0xe0(%rcx), %rcx
imulq $0x38, %rdx, %rsi
movl (%r13,%rax), %ebp
movl 0x4(%r13,%rax), %edx
movq (%rcx,%rsi), %r12
movq 0x38(%rcx,%rsi), %rcx
vmovups (%r12,%rbp,4), %xmm4
movl 0x10(%r13,%rax), %r15d
vmovups (%r12,%r15,4), %xmm5
movl 0x20(%r13,%rax), %esi
movq %rsi, 0x40(%rsp)
vmovups (%r12,%rsi,4), %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovups (%r12,%rdx,4), %xmm8
movl 0x14(%r13,%rax), %edi
vmovups (%r12,%rdi,4), %xmm7
movl 0x24(%r13,%rax), %esi
vmovups (%r12,%rsi,4), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movl 0x8(%r13,%rax), %ebx
vmovups (%r12,%rbx,4), %xmm10
movl 0x18(%r13,%rax), %r10d
vmovups (%r12,%r10,4), %xmm9
movl 0x28(%r13,%rax), %r8d
vmovups (%r12,%r8,4), %xmm11
movl 0xc(%r13,%rax), %r14d
vmovups (%r12,%r14,4), %xmm1
movl 0x1c(%r13,%rax), %r11d
vmovups (%r12,%r11,4), %xmm14
movl 0x2c(%r13,%rax), %r9d
vmovups (%r12,%r9,4), %xmm12
vmovups (%rcx,%rbp,4), %xmm15
vmovups (%rcx,%r15,4), %xmm13
vsubss %xmm2, %xmm6, %xmm6
vunpcklps %xmm10, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm1, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
vunpckhps %xmm1, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
vmovups (%rcx,%rdx,4), %xmm3
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovaps %xmm1, 0x1e0(%rsp)
vunpcklps %xmm0, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vmovaps %xmm0, 0xb0(%rsp)
vunpcklps %xmm9, %xmm5, %xmm0 # xmm0 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm1 # xmm1 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm14, %xmm7, %xmm2 # xmm2 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
vunpckhps %xmm14, %xmm7, %xmm5 # xmm5 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
vmovups (%rcx,%rbx,4), %xmm14
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vmovaps %xmm1, 0xa0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x1d0(%rsp)
vmovaps 0x30(%rsp), %xmm1
vunpcklps %xmm11, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
vunpckhps %xmm11, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
vmovaps 0x20(%rsp), %xmm2
vunpcklps %xmm12, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
vunpckhps %xmm12, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm12[2],xmm2[3],xmm12[3]
vmovups (%rcx,%r14,4), %xmm11
vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vmovaps %xmm1, 0x30(%rsp)
vunpcklps %xmm5, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vmovaps %xmm1, 0xc0(%rsp)
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vmovaps %xmm0, 0x20(%rsp)
vunpcklps %xmm14, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
vunpckhps %xmm14, %xmm15, %xmm1 # xmm1 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
vunpcklps %xmm11, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vmovups (%rcx,%r10,4), %xmm11
vunpcklps %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpcklps %xmm5, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vunpcklps %xmm11, %xmm13, %xmm0 # xmm0 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
vunpckhps %xmm11, %xmm13, %xmm5 # xmm5 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
vmovups (%rcx,%rdi,4), %xmm11
vmovups (%rcx,%r11,4), %xmm12
vunpcklps %xmm12, %xmm11, %xmm13 # xmm13 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpckhps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
vunpcklps %xmm11, %xmm5, %xmm11 # xmm11 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpcklps %xmm13, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
vunpckhps %xmm13, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
movq 0x40(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
vmovups (%rcx,%r8,4), %xmm5
vunpcklps %xmm5, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vmovups (%rcx,%rsi,4), %xmm5
movq 0x10(%rsp), %rsi
vmovups (%rcx,%r9,4), %xmm12
vunpcklps %xmm12, %xmm5, %xmm15 # xmm15 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
vunpckhps %xmm12, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
vunpcklps %xmm5, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpcklps %xmm15, %xmm13, %xmm14 # xmm14 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
vunpckhps %xmm15, %xmm13, %xmm15 # xmm15 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
vshufps $0x0, %xmm6, %xmm6, %xmm0 # xmm0 = xmm6[0,0,0,0]
vmovss 0x1d131f6(%rip), %xmm5 # 0x1eec714
vsubss %xmm6, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm13 # xmm13 = xmm5[0,0,0,0]
vmulps %xmm3, %xmm0, %xmm3
vmulps %xmm8, %xmm13, %xmm5
vaddps %xmm3, %xmm5, %xmm12
vmulps %xmm2, %xmm0, %xmm2
vmulps 0xb0(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm5
vmulps %xmm4, %xmm0, %xmm2
vmulps 0x1e0(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm6
vmulps %xmm1, %xmm0, %xmm1
vmulps %xmm9, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm7, %xmm0, %xmm2
vmulps 0x1d0(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmovaps 0x30(%r13,%rax), %xmm3
vmovaps %xmm3, 0x200(%rsp)
vmovaps 0x40(%r13,%rax), %xmm3
movq 0xd8(%rsp), %rax
vmulps %xmm0, %xmm11, %xmm7
vmulps 0xa0(%rsp), %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm9
vmulps %xmm0, %xmm14, %xmm7
vmulps %xmm0, %xmm15, %xmm8
vmulps %xmm0, %xmm10, %xmm0
vmulps 0xc0(%rsp), %xmm13, %xmm10
vaddps %xmm7, %xmm10, %xmm10
vmulps 0x20(%rsp), %xmm13, %xmm7
vaddps %xmm7, %xmm8, %xmm11
vmulps 0x30(%rsp), %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vmovaps %xmm3, 0x1f0(%rsp)
vsubps %xmm1, %xmm12, %xmm7
vmovaps %xmm5, %xmm1
vmovaps %xmm5, 0xb0(%rsp)
vsubps %xmm2, %xmm5, %xmm8
vsubps %xmm9, %xmm6, %xmm9
vsubps %xmm12, %xmm10, %xmm10
vmovaps %xmm12, %xmm5
vsubps %xmm1, %xmm11, %xmm11
vsubps %xmm6, %xmm0, %xmm12
vmulps %xmm12, %xmm8, %xmm0
vmulps %xmm11, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmulps %xmm10, %xmm9, %xmm1
vmulps %xmm7, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm7, %xmm11, %xmm2
vmulps %xmm10, %xmm8, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm0
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vbroadcastss 0x14(%rsi), %xmm15
vsubps %xmm0, %xmm5, %xmm2
vbroadcastss 0x18(%rsi), %xmm0
vmovaps 0xb0(%rsp), %xmm1
vsubps %xmm13, %xmm1, %xmm5
vsubps %xmm14, %xmm6, %xmm1
vmulps %xmm1, %xmm15, %xmm6
vmulps %xmm0, %xmm5, %xmm13
vsubps %xmm6, %xmm13, %xmm6
vbroadcastss 0x10(%rsi), %xmm13
vmulps %xmm0, %xmm2, %xmm14
vmovaps %xmm1, 0xc0(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmovaps %xmm5, 0x20(%rsp)
vmulps %xmm5, %xmm13, %xmm14
vmovaps %xmm2, 0x30(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm14, %xmm2, %xmm2
vmovaps %xmm3, 0xa0(%rsp)
vmulps %xmm0, %xmm3, %xmm0
vmulps %xmm4, %xmm15, %xmm14
vaddps %xmm0, %xmm14, %xmm0
vmovaps 0x40(%rsp), %xmm14
vmulps %xmm13, %xmm14, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm2, %xmm12, %xmm12
vmulps %xmm1, %xmm11, %xmm11
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmovddup 0x1d47872(%rip), %xmm2 # xmm2 = mem[0,0]
vandps %xmm2, %xmm0, %xmm9
vxorps %xmm10, %xmm9, %xmm8
vmulps %xmm6, %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vxorps %xmm1, %xmm9, %xmm7
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm8, %xmm1
vcmpnltps %xmm10, %xmm7, %xmm2
vandps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1d4779b(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm6
vcmpneqps %xmm0, %xmm10, %xmm0
vandps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x1c0(%rsp), %xmm10
jne 0x1d976c
incq %rax
cmpq 0xd0(%rsp), %rax
setb %cl
vxorps %xmm5, %xmm5, %xmm5
jne 0x1d92a9
jmp 0x1d9a3c
vmovaps %xmm4, %xmm15
vandps 0x1c0(%rsp), %xmm10, %xmm10
vmovaps 0xc0(%rsp), %xmm0
vmulps 0xa0(%rsp), %xmm0, %xmm0
vmulps 0x20(%rsp), %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x30(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%rsi), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%rsi), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm4
vtestps %xmm10, %xmm4
je 0x1d974f
vandps %xmm4, %xmm10, %xmm0
vmovaps %xmm8, 0x210(%rsp)
vmovaps %xmm7, 0x220(%rsp)
vmovaps %xmm3, 0x230(%rsp)
vmovaps %xmm6, 0x240(%rsp)
vmovaps %xmm0, 0x260(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vmovaps %xmm15, 0x2b0(%rsp)
vmovaps 0xa0(%rsp), %xmm1
vmovaps %xmm1, 0x2c0(%rsp)
vrcpps %xmm6, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vbroadcastss 0x1d12ee2(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x230(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x290(%rsp)
vmulps 0x210(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x270(%rsp)
vmulps 0x220(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x280(%rsp)
vmovmskps %xmm0, %r15d
movq %rax, %rbx
bsfq %r15, %r12
movl 0x200(%rsp,%r12,4), %eax
movq 0x98(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rbp
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbp)
je 0x1d98bd
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x1d98d8
cmpq $0x0, 0x48(%rbp)
jne 0x1d98d8
xorl %eax, %eax
jmp 0x1d98c3
btcq %r12, %r15
movb $0x1, %al
testb %al, %al
je 0x1d9acd
testq %r15, %r15
movq %rbx, %rax
jne 0x1d9878
jmp 0x1d974f
vmovss 0x270(%rsp,%r12,4), %xmm0
vmovss 0x280(%rsp,%r12,4), %xmm1
movq 0x8(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x1f0(%rsp,%r12,4), %edx
vmovss 0x2a0(%rsp,%r12,4), %xmm2
vmovss 0x2b0(%rsp,%r12,4), %xmm3
vmovss 0x2c0(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0xe0(%rsp)
vmovss %xmm3, 0xe4(%rsp)
vmovss %xmm4, 0xe8(%rsp)
vmovss %xmm0, 0xec(%rsp)
vmovss %xmm1, 0xf0(%rsp)
movl %edx, 0xf4(%rsp)
movl %eax, 0xf8(%rsp)
movl (%rcx), %eax
movl %eax, 0xfc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x100(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x40(%rsp)
vmovss 0x290(%rsp,%r12,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq %rsi, 0x128(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%rbp), %rax
testq %rax, %rax
je 0x1d99ec
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d9a1c
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1d9a18
testb $0x2, (%r14)
jne 0x1d9a01
testb $0x40, 0x3e(%rbp)
je 0x1d9a0b
leaq 0x110(%rsp), %rdi
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1d9a1c
xorl %eax, %eax
jmp 0x1d9a32
movq 0x10(%rsp), %rax
vmovss 0x40(%rsp), %xmm0
vmovss %xmm0, 0x20(%rax)
btcq %r12, %r15
movb $0x1, %al
movq 0x10(%rsp), %rsi
jmp 0x1d98c3
movq 0x90(%rsp), %rdi
vmovaps 0x1b0(%rsp), %xmm6
vmovaps 0x1a0(%rsp), %xmm7
movq 0x80(%rsp), %r8
vmovaps 0x190(%rsp), %xmm8
vmovaps 0x180(%rsp), %xmm9
vmovaps 0x170(%rsp), %xmm10
movq 0x78(%rsp), %r9
movq 0x70(%rsp), %r10
vmovaps 0x160(%rsp), %xmm11
movq 0x68(%rsp), %r11
movq 0x60(%rsp), %rbx
movq 0x58(%rsp), %r14
vmovaps 0x150(%rsp), %xmm12
vmovaps 0x140(%rsp), %xmm13
leaq 0x2d0(%rsp), %r15
movq 0x88(%rsp), %r12
xorl %ebp, %ebp
cmpl $0x3, %ebp
jne 0x1d90b6
jmp 0x1d8f32
testb $0x1, 0x7(%rsp)
vxorps %xmm5, %xmm5, %xmm5
movq 0x90(%rsp), %rdi
vmovaps 0x1b0(%rsp), %xmm6
vmovaps 0x1a0(%rsp), %xmm7
movq 0x80(%rsp), %r8
vmovaps 0x190(%rsp), %xmm8
vmovaps 0x180(%rsp), %xmm9
vmovaps 0x170(%rsp), %xmm10
movq 0x78(%rsp), %r9
movq 0x70(%rsp), %r10
vmovaps 0x160(%rsp), %xmm11
movq 0x68(%rsp), %r11
movq 0x60(%rsp), %rbx
movq 0x58(%rsp), %r14
vmovaps 0x150(%rsp), %xmm12
vmovaps 0x140(%rsp), %xmm13
leaq 0x2d0(%rsp), %r15
movq 0x88(%rsp), %r12
movl $0x0, %ebp
je 0x1d9abf
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
jmp 0x1d9abf
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMvMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xab8, %rsp # imm = 0xAB8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1da73d
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1da73d
leaq 0x318(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d472fd(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d17414(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d12b32(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d47371(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d46312(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d46309(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm5
vmovaps %xmm5, 0x250(%rsp)
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vbroadcastss 0x8(%rsi), %xmm5
vmovaps %xmm5, 0x240(%rsp)
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vmovaps %xmm5, 0x230(%rsp)
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vmovaps %xmm6, 0x220(%rsp)
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmovaps %xmm4, 0x210(%rsp)
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x200(%rsp)
shll $0x4, %r9d
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vmovaps %xmm4, 0x1f0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmovaps %xmm3, 0x1e0(%rsp)
setb %r10b
shll $0x4, %r10d
orq $0x20, %r10
xorl %r11d, %r11d
vucomiss %xmm2, %xmm6
setb %r11b
shll $0x4, %r11d
orq $0x40, %r11
movq %r9, %r14
xorq $0x10, %r14
movq %r10, %r15
xorq $0x10, %r15
movq %r11, %r13
xorq $0x10, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %xmm1, 0x1d0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x1c0(%rsp)
leaq 0x1f76284(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x310(%rsp), %rbp
vmovaps %xmm7, 0x80(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x68(%rsp)
movq %r13, 0x60(%rsp)
cmpq %rbp, %r8
je 0x1da73d
movq -0x8(%r8), %r12
addq $-0x8, %r8
testb $0x8, %r12b
jne 0x1d9e5e
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r9), %xmm0, %xmm1
vaddps 0x20(%rax,%r9), %xmm1, %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps 0x230(%rsp), %xmm1, %xmm1
vmovaps 0x1d0(%rsp), %xmm2
vmaxps %xmm1, %xmm2, %xmm1
vmulps 0x80(%rax,%r10), %xmm0, %xmm2
vaddps 0x20(%rax,%r10), %xmm2, %xmm2
vmovaps 0x250(%rsp), %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps 0x80(%rax,%r11), %xmm0, %xmm3
vaddps 0x20(%rax,%r11), %xmm3, %xmm3
vmovaps 0x240(%rsp), %xmm5
vsubps %xmm5, %xmm3, %xmm3
vmulps 0x210(%rsp), %xmm3, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r14), %xmm0, %xmm2
vaddps 0x20(%rax,%r14), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vmovaps 0x1c0(%rsp), %xmm6
vminps %xmm2, %xmm6, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm4, %xmm3, %xmm3
vmulps 0x80(%rax,%r13), %xmm0, %xmm4
vaddps 0x20(%rax,%r13), %xmm4, %xmm4
vmulps 0x1f0(%rsp), %xmm3, %xmm3
vsubps %xmm5, %xmm4, %xmm4
vmulps 0x1e0(%rsp), %xmm4, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1d9ea6
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %ebx
testb $0x8, %r12b
jne 0x1d9ea2
testq %rbx, %rbx
je 0x1d9ecb
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdi
xorl %ecx, %ecx
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdi
jne 0x1d9ed0
movq %rax, %r12
testl %ecx, %ecx
je 0x1d9d3e
jmp 0x1d9f15
pushq $0x6
jmp 0x1d9ecd
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1d9e55
pushq $0x4
popq %rcx
jmp 0x1d9e98
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x1d9f0e
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x1d9edf
movq %rcx, %r12
xorl %ecx, %ecx
jmp 0x1d9e98
cmpl $0x6, %ecx
jne 0x1da734
movl %r12d, %edi
andl $0xf, %edi
xorl %ecx, %ecx
addq $-0x8, %rdi
setne %al
je 0x1da734
andq $-0x10, %r12
xorl %r14d, %r14d
movb %al, 0xe(%rsp)
imulq $0x140, %r14, %rax # imm = 0x140
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x90(%r12,%rax), %xmm0, %xmm1
vaddps (%r12,%rax), %xmm1, %xmm1
vmulps 0xa0(%r12,%rax), %xmm0, %xmm2
vaddps 0x10(%r12,%rax), %xmm2, %xmm2
vmulps 0xb0(%r12,%rax), %xmm0, %xmm3
vaddps 0x20(%r12,%rax), %xmm3, %xmm11
vmulps 0xc0(%r12,%rax), %xmm0, %xmm4
vaddps 0x30(%r12,%rax), %xmm4, %xmm4
vmulps 0xd0(%r12,%rax), %xmm0, %xmm5
vaddps 0x40(%r12,%rax), %xmm5, %xmm5
vmulps 0xe0(%r12,%rax), %xmm0, %xmm6
vaddps 0x50(%r12,%rax), %xmm6, %xmm12
vmulps 0xf0(%r12,%rax), %xmm0, %xmm6
vaddps 0x60(%r12,%rax), %xmm6, %xmm8
vmulps 0x100(%r12,%rax), %xmm0, %xmm6
vaddps 0x70(%r12,%rax), %xmm6, %xmm9
vmulps 0x110(%r12,%rax), %xmm0, %xmm0
movq %rax, 0x10(%rsp)
vaddps 0x80(%r12,%rax), %xmm0, %xmm0
vbroadcastss (%rsi), %xmm10
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm10, %xmm1, %xmm7
vsubps %xmm13, %xmm2, %xmm3
vsubps %xmm14, %xmm11, %xmm1
vsubps %xmm10, %xmm4, %xmm2
vmovaps %xmm2, 0x30(%rsp)
vsubps %xmm13, %xmm5, %xmm11
vsubps %xmm14, %xmm12, %xmm5
vsubps %xmm10, %xmm8, %xmm12
vsubps %xmm13, %xmm9, %xmm6
vsubps %xmm14, %xmm0, %xmm4
vmovaps %xmm4, 0x20(%rsp)
vsubps %xmm7, %xmm12, %xmm14
vsubps %xmm3, %xmm6, %xmm15
vsubps %xmm1, %xmm4, %xmm0
vaddps %xmm3, %xmm6, %xmm2
vaddps %xmm1, %xmm4, %xmm4
vmulps %xmm2, %xmm0, %xmm8
vmulps %xmm4, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm7, %xmm12, %xmm8
vmulps %xmm4, %xmm14, %xmm4
vmovaps %xmm0, 0x180(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm4, %xmm9, %xmm4
vmovaps %xmm15, 0x190(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x1a0(%rsp)
vmulps %xmm2, %xmm14, %xmm0
vsubps %xmm8, %xmm0, %xmm0
vbroadcastss 0x18(%rsi), %xmm10
vmulps %xmm0, %xmm10, %xmm0
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm4, %xmm15, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vbroadcastss 0x10(%rsi), %xmm2
vmovaps %xmm2, 0x90(%rsp)
vmulps %xmm2, %xmm13, %xmm4
vaddps %xmm0, %xmm4, %xmm9
vmovaps %xmm11, %xmm4
vsubps %xmm11, %xmm3, %xmm2
vmovaps %xmm5, %xmm8
vsubps %xmm5, %xmm1, %xmm11
vmovaps %xmm3, 0xa0(%rsp)
vaddps %xmm4, %xmm3, %xmm0
vmovaps %xmm4, %xmm5
vmovaps %xmm1, 0x1b0(%rsp)
vaddps %xmm1, %xmm8, %xmm4
vmovaps %xmm8, %xmm1
vmulps %xmm0, %xmm11, %xmm14
vmulps %xmm4, %xmm2, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmovaps 0x30(%rsp), %xmm14
vsubps %xmm14, %xmm7, %xmm13
vmulps %xmm4, %xmm13, %xmm4
vmovaps %xmm7, 0xb0(%rsp)
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm7, %xmm11, %xmm8
vsubps %xmm4, %xmm8, %xmm4
vmovaps %xmm2, 0x170(%rsp)
vmulps %xmm7, %xmm2, %xmm7
vmovaps %xmm13, 0x160(%rsp)
vmulps %xmm0, %xmm13, %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm4, %xmm15, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vmovaps 0x90(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm3
vaddps %xmm0, %xmm3, %xmm0
vsubps %xmm12, %xmm14, %xmm8
vaddps %xmm12, %xmm14, %xmm3
vsubps %xmm6, %xmm5, %xmm12
vaddps %xmm6, %xmm5, %xmm2
vmovaps 0x20(%rsp), %xmm5
vsubps %xmm5, %xmm1, %xmm4
vaddps %xmm5, %xmm1, %xmm1
vmulps %xmm2, %xmm4, %xmm5
vmulps %xmm1, %xmm12, %xmm7
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm1, %xmm8, %xmm1
vmulps %xmm3, %xmm4, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmovaps 0x80(%rsp), %xmm7
vmulps %xmm3, %xmm12, %xmm3
vmulps %xmm2, %xmm8, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm10, 0x20(%rsp)
vmulps %xmm2, %xmm10, %xmm2
vmovaps %xmm15, 0x30(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm5, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm9, %xmm2
vaddps %xmm2, %xmm1, %xmm13
vminps %xmm0, %xmm9, %xmm2
vminps %xmm1, %xmm2, %xmm2
vbroadcastss 0x1d46ce1(%rip), %xmm3 # 0x1f20ec4
vmovaps %xmm3, %xmm10
vandps %xmm3, %xmm13, %xmm5
vbroadcastss 0x1d46cd8(%rip), %xmm3 # 0x1f20ecc
vmovaps %xmm5, 0x130(%rsp)
vmulps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d46cb6(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm3, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm9, 0x150(%rsp)
vmovaps %xmm0, 0x140(%rsp)
vmaxps %xmm0, %xmm9, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm3, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm0
leaq 0xf(%rsp), %rax
movq %rax, 0x290(%rsp)
vtestps 0x120(%rsp), %xmm0
je 0x1da6e4
vmovaps 0x180(%rsp), %xmm7
vmovaps 0x170(%rsp), %xmm15
vmulps %xmm7, %xmm15, %xmm1
vmovaps 0x190(%rsp), %xmm9
vmulps %xmm11, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm12, %xmm11, %xmm3
vmulps %xmm4, %xmm15, %xmm5
vsubps %xmm3, %xmm5, %xmm5
vandps %xmm1, %xmm10, %xmm1
vandps %xmm3, %xmm10, %xmm3
vcmpltps %xmm3, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm5
vmovaps 0x160(%rsp), %xmm14
vmulps %xmm4, %xmm14, %xmm1
vmulps %xmm7, %xmm14, %xmm2
vmovaps 0x1a0(%rsp), %xmm7
vmulps %xmm7, %xmm11, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm8, %xmm11, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm3, %xmm10, %xmm3
vandps %xmm1, %xmm10, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmovaps %xmm0, %xmm4
vmulps %xmm8, %xmm15, %xmm0
vmulps %xmm7, %xmm15, %xmm2
vmulps %xmm14, %xmm9, %xmm3
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm0, %xmm6, %xmm6
vandps %xmm3, %xmm10, %xmm3
vandps %xmm0, %xmm10, %xmm0
vcmpltps %xmm0, %xmm3, %xmm0
vblendvps %xmm0, %xmm2, %xmm6, %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm2
vmulps 0x30(%rsp), %xmm1, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps 0x90(%rsp), %xmm5, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm2, %xmm3
vmulps 0x1b0(%rsp), %xmm0, %xmm2
vmulps 0xa0(%rsp), %xmm1, %xmm6
vaddps %xmm2, %xmm6, %xmm2
vmulps 0xb0(%rsp), %xmm5, %xmm6
vaddps %xmm2, %xmm6, %xmm2
vaddps %xmm2, %xmm2, %xmm2
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm7
vbroadcastss 0x1d123b0(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm2, %xmm2
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm2, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm2, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovaps 0x80(%rsp), %xmm7
vcmpneqps 0x1d11670(%rip), %xmm3, %xmm3 # 0x1eeba10
vandps %xmm6, %xmm3, %xmm6
vandps 0x120(%rsp), %xmm4, %xmm3
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm3, %xmm6
je 0x1da6e4
addq %r12, 0x10(%rsp)
vandps %xmm3, %xmm6, %xmm3
vmovaps 0x150(%rsp), %xmm6
vmovaps %xmm6, 0x260(%rsp)
vmovaps 0x140(%rsp), %xmm4
vmovaps %xmm4, 0x270(%rsp)
vmovaps %xmm13, 0x280(%rsp)
movq %rax, 0x290(%rsp)
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps %xmm2, 0x2d0(%rsp)
vmovaps %xmm5, 0x2e0(%rsp)
vmovaps %xmm1, 0x2f0(%rsp)
vmovaps %xmm0, 0x300(%rsp)
movq (%rdx), %rax
movq %rax, 0x90(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1d122ca(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d16b89(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x130(%rsp), %xmm5
vcmpnltps %xmm1, %xmm5, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x2b0(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x2c0(%rsp)
vmovmskps %xmm3, %ebp
movq %r14, 0x78(%rsp)
bsfq %rbp, %r15
movq 0x10(%rsp), %rax
movl 0x120(%rax,%r15,4), %eax
movq 0x90(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r13)
je 0x1da4e9
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
movl $0x0, %ecx
jne 0x1da503
cmpq $0x0, 0x48(%r13)
jne 0x1da503
xorl %eax, %eax
movq 0x78(%rsp), %r14
jmp 0x1da4f1
btcq %r15, %rbp
movb $0x1, %al
xorl %ecx, %ecx
testb %al, %al
je 0x1da70c
testq %rbp, %rbp
jne 0x1da49c
jmp 0x1da6e4
movq %rdi, 0x30(%rsp)
vmovss 0x2b0(%rsp,%r15,4), %xmm0
vmovss 0x2c0(%rsp,%r15,4), %xmm1
movq 0x8(%rdx), %rcx
movq 0x10(%rsp), %rdi
movl 0x130(%rdi,%r15,4), %edi
vmovss 0x2e0(%rsp,%r15,4), %xmm2
vmovss 0x2f0(%rsp,%r15,4), %xmm3
vmovss 0x300(%rsp,%r15,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edi, 0xd4(%rsp)
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x2d0(%rsp,%r15,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r8, 0x58(%rsp)
movq %r9, 0x50(%rsp)
movq %r10, 0x48(%rsp)
movq %r11, 0x40(%rsp)
je 0x1da66d
leaq 0xf0(%rsp), %rdi
movq %rdx, 0xb0(%rsp)
movq %rsi, 0xa0(%rsp)
callq *%rax
movq 0x40(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x50(%rsp), %r9
vmovaps 0x80(%rsp), %xmm7
movq 0x58(%rsp), %r8
movq 0xa0(%rsp), %rsi
movq 0xb0(%rsp), %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1da6c7
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1da6c3
testb $0x2, (%r14)
jne 0x1da683
testb $0x40, 0x3e(%r13)
je 0x1da6b6
leaq 0xf0(%rsp), %rdi
movq %rdx, %r14
movq %rsi, %r13
callq *%rax
movq 0x40(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x50(%rsp), %r9
vmovaps 0x80(%rsp), %xmm7
movq 0x58(%rsp), %r8
movq %r13, %rsi
movq %r14, %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1da6c7
xorl %eax, %eax
jmp 0x1da6d8
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r15, %rbp
movb $0x1, %al
xorl %ecx, %ecx
movq 0x30(%rsp), %rdi
jmp 0x1da4e2
incq %r14
cmpq %rdi, %r14
setb %al
leaq 0x310(%rsp), %rbp
jne 0x1d9f3a
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
jmp 0x1da734
testb $0x1, 0xe(%rsp)
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
leaq 0x310(%rsp), %rbp
je 0x1da734
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rcx
cmpl $0x3, %ecx
jne 0x1d9d2d
addq $0xab8, %rsp # imm = 0xAB8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xaf8, %rsp # imm = 0xAF8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1db5a7
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1db5a7
leaq 0x358(%rsp), %r8
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm2
vxorps %xmm6, %xmm6, %xmm6
vmaxss 0xc(%rsi), %xmm6, %xmm1
vbroadcastss 0x1d4671f(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d16836(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1d11f54(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d46793(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq %rax, -0x8(%r8)
vbroadcastss 0x1d45730(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d45727(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm4
vmovaps %xmm4, 0x270(%rsp)
vbroadcastss 0x8(%rsi), %xmm4
vmovaps %xmm4, 0x260(%rsp)
xorl %r9d, %r9d
vucomiss %xmm6, %xmm3
setb %r9b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x250(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vmovaps %xmm5, 0x240(%rsp)
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmovaps %xmm3, 0x230(%rsp)
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vmovaps %xmm3, 0x220(%rsp)
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vmovaps %xmm3, 0x210(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmovaps %xmm2, 0x200(%rsp)
shll $0x4, %r9d
xorl %r10d, %r10d
vucomiss %xmm6, %xmm4
setb %r10b
shll $0x4, %r10d
orq $0x20, %r10
xorl %r11d, %r11d
vucomiss %xmm6, %xmm5
setb %r11b
shll $0x4, %r11d
orq $0x40, %r11
movq %r9, %r14
xorq $0x10, %r14
movq %r10, %r15
xorq $0x10, %r15
movq %r11, %r13
xorq $0x10, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %xmm1, 0x1f0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x1e0(%rsp)
leaq 0x1f756a2(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x160(%rsp)
movq %rdx, 0x48(%rsp)
movq %rsi, 0x40(%rsp)
vmovaps %xmm7, 0xc0(%rsp)
movq %r9, 0xb0(%rsp)
movq %r10, 0xa8(%rsp)
movq %r11, 0xa0(%rsp)
movq %r14, 0x98(%rsp)
movq %r15, 0x90(%rsp)
movq %r13, 0x88(%rsp)
leaq 0x350(%rsp), %rax
cmpq %rax, %r8
je 0x1db5a7
movq -0x8(%r8), %r12
addq $-0x8, %r8
testb $0x8, %r12b
jne 0x1daa6b
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r9), %xmm0, %xmm1
vaddps 0x20(%rax,%r9), %xmm1, %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps 0x250(%rsp), %xmm1, %xmm1
vmovaps 0x1f0(%rsp), %xmm2
vmaxps %xmm1, %xmm2, %xmm1
vmulps 0x80(%rax,%r10), %xmm0, %xmm2
vaddps 0x20(%rax,%r10), %xmm2, %xmm2
vmovaps 0x270(%rsp), %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vmulps 0x80(%rax,%r11), %xmm0, %xmm3
vaddps 0x20(%rax,%r11), %xmm3, %xmm3
vmovaps 0x260(%rsp), %xmm5
vsubps %xmm5, %xmm3, %xmm3
vmulps 0x230(%rsp), %xmm3, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r14), %xmm0, %xmm2
vaddps 0x20(%rax,%r14), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vmovaps 0x1e0(%rsp), %xmm6
vminps %xmm2, %xmm6, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm4, %xmm3, %xmm3
vmulps 0x80(%rax,%r13), %xmm0, %xmm4
vaddps 0x20(%rax,%r13), %xmm4, %xmm4
vmulps 0x210(%rsp), %xmm3, %xmm3
vsubps %xmm5, %xmm4, %xmm4
vmulps 0x200(%rsp), %xmm4, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1daab3
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %ebx
testb $0x8, %r12b
jne 0x1daaaf
testq %rbx, %rbx
je 0x1daad8
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdi
xorl %ebp, %ebp
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdi
jne 0x1daadd
movq %rax, %r12
testl %ebp, %ebp
je 0x1da94b
jmp 0x1dab20
pushq $0x6
jmp 0x1daada
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1daa62
pushq $0x4
popq %rbp
jmp 0x1daaa5
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x1dab1b
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x1daaec
movq %rcx, %r12
jmp 0x1daaa5
cmpl $0x6, %ebp
jne 0x1db59e
movl %r12d, %eax
andl $0xf, %eax
xorl %ebp, %ebp
addq $-0x8, %rax
setne %cl
je 0x1db59e
movq %r8, 0xb8(%rsp)
andq $-0x10, %r12
movq (%rdx), %r8
xorl %edi, %edi
movq %rax, 0xf0(%rsp)
movq %r8, 0x50(%rsp)
movb %cl, 0xe(%rsp)
movq %rdi, 0xf8(%rsp)
imulq $0x50, %rdi, %rdi
vmovss 0x1c(%rsi), %xmm0
movl 0x30(%r12,%rdi), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %rax
vmovss 0x28(%rax), %xmm1
vmovss 0x2c(%rax), %xmm2
vmovss 0x30(%rax), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm6
vroundss $0x9, %xmm6, %xmm6, %xmm0
vaddss 0x1d15e1e(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmaxss %xmm0, %xmm1, %xmm2
vcvttss2si %xmm2, %ecx
movslq %ecx, %rcx
movq 0xe0(%rax), %rdx
imulq $0x38, %rcx, %rcx
movl (%r12,%rdi), %r13d
movl 0x4(%r12,%rdi), %r15d
movq (%rdx,%rcx), %rax
movq 0x38(%rdx,%rcx), %rcx
vmovups (%rax,%r13,4), %xmm4
movl 0x10(%r12,%rdi), %esi
vmovups (%rax,%rsi,4), %xmm5
movl 0x20(%r12,%rdi), %edx
movq %rdx, 0x30(%rsp)
vmovups (%rax,%rdx,4), %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups (%rax,%r15,4), %xmm7
movl 0x14(%r12,%rdi), %edx
vmovups (%rax,%rdx,4), %xmm8
movl 0x24(%r12,%rdi), %r8d
movq %r8, 0x70(%rsp)
vmovups (%rax,%r8,4), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movl 0x8(%r12,%rdi), %r14d
vmovups (%rax,%r14,4), %xmm9
movl 0x18(%r12,%rdi), %r10d
vmovups (%rax,%r10,4), %xmm11
movl 0x28(%r12,%rdi), %r8d
vmovups (%rax,%r8,4), %xmm10
movl 0xc(%r12,%rdi), %ebp
vmovups (%rax,%rbp,4), %xmm1
movl 0x1c(%r12,%rdi), %r11d
vmovups (%rax,%r11,4), %xmm14
movl 0x2c(%r12,%rdi), %r9d
vmovups (%rax,%r9,4), %xmm12
vmovups (%rcx,%r13,4), %xmm15
vmovups (%rcx,%rsi,4), %xmm13
vsubss %xmm2, %xmm6, %xmm6
vunpcklps %xmm9, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm9[0],xmm4[1],xmm9[1]
vunpckhps %xmm9, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
vunpcklps %xmm1, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
vunpckhps %xmm1, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
vmovups (%rcx,%r15,4), %xmm3
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovaps %xmm1, 0xd0(%rsp)
vunpcklps %xmm0, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm7 # xmm7 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vunpcklps %xmm11, %xmm5, %xmm0 # xmm0 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm1 # xmm1 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vunpcklps %xmm14, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
vunpckhps %xmm14, %xmm8, %xmm5 # xmm5 = xmm8[2],xmm14[2],xmm8[3],xmm14[3]
vmovups (%rcx,%r14,4), %xmm11
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vmovaps %xmm1, 0x60(%rsp)
vunpcklps %xmm2, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps 0x20(%rsp), %xmm1
vunpcklps %xmm10, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
vunpckhps %xmm10, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
vmovaps 0x10(%rsp), %xmm4
vunpcklps %xmm12, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm12[0],xmm4[1],xmm12[1]
vunpckhps %xmm12, %xmm4, %xmm10 # xmm10 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
vmovups (%rcx,%rbp,4), %xmm12
vunpcklps %xmm10, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
vmovaps %xmm1, 0x20(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0xe0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x10(%rsp)
vunpcklps %xmm11, %xmm15, %xmm10 # xmm10 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm0 # xmm0 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm12, %xmm3, %xmm11 # xmm11 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vmovups (%rcx,%r10,4), %xmm12
vunpcklps %xmm3, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm11, %xmm10, %xmm3 # xmm3 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
vunpckhps %xmm11, %xmm10, %xmm1 # xmm1 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
vunpcklps %xmm12, %xmm13, %xmm11 # xmm11 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
vunpckhps %xmm12, %xmm13, %xmm10 # xmm10 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vmovups (%rcx,%r11,4), %xmm13
vunpcklps %xmm13, %xmm12, %xmm14 # xmm14 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
vunpcklps %xmm14, %xmm11, %xmm0 # xmm0 = xmm11[0],xmm14[0],xmm11[1],xmm14[1]
vunpckhps %xmm14, %xmm11, %xmm2 # xmm2 = xmm11[2],xmm14[2],xmm11[3],xmm14[3]
movq 0x30(%rsp), %rax
vmovups (%rcx,%rax,4), %xmm11
vmovups (%rcx,%r8,4), %xmm12
movq 0x50(%rsp), %r8
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
vunpcklps %xmm12, %xmm11, %xmm14 # xmm14 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpckhps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
movq 0x70(%rsp), %rax
vmovups (%rcx,%rax,4), %xmm12
vmovups (%rcx,%r9,4), %xmm13
vunpcklps %xmm13, %xmm12, %xmm15 # xmm15 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpcklps %xmm15, %xmm14, %xmm13 # xmm13 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
vunpckhps %xmm15, %xmm14, %xmm14 # xmm14 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
vshufps $0x0, %xmm6, %xmm6, %xmm15 # xmm15 = xmm6[0,0,0,0]
vmovss 0x1d11946(%rip), %xmm12 # 0x1eec714
vsubss %xmm6, %xmm12, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm12 # xmm12 = xmm6[0,0,0,0]
vmulps %xmm3, %xmm15, %xmm3
vmulps %xmm9, %xmm12, %xmm6
vaddps %xmm3, %xmm6, %xmm6
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm7, %xmm12, %xmm3
vaddps %xmm1, %xmm3, %xmm7
vmulps %xmm4, %xmm15, %xmm1
vmulps 0xd0(%rsp), %xmm12, %xmm3
vaddps %xmm1, %xmm3, %xmm4
vmulps %xmm0, %xmm15, %xmm0
vmulps %xmm8, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm8
vmulps %xmm2, %xmm15, %xmm0
vmulps %xmm5, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm5
vmovaps 0x30(%r12,%rdi), %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps 0x40(%r12,%rdi), %xmm0
movb 0xe(%rsp), %dil
vmulps %xmm10, %xmm15, %xmm1
vmulps 0x60(%rsp), %xmm12, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm13, %xmm15, %xmm2
vmulps %xmm14, %xmm15, %xmm3
vmulps %xmm11, %xmm15, %xmm9
vmulps 0xe0(%rsp), %xmm12, %xmm10
vaddps %xmm2, %xmm10, %xmm2
vmulps 0x10(%rsp), %xmm12, %xmm10
vaddps %xmm3, %xmm10, %xmm15
vmulps 0x20(%rsp), %xmm12, %xmm3
vaddps %xmm3, %xmm9, %xmm9
vmovaps %xmm0, 0x280(%rsp)
vbroadcastss (%rsi), %xmm12
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm12, %xmm6, %xmm10
vsubps %xmm13, %xmm7, %xmm3
vsubps %xmm14, %xmm4, %xmm7
vsubps %xmm12, %xmm8, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vsubps %xmm13, %xmm5, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vsubps %xmm14, %xmm1, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vsubps %xmm12, %xmm2, %xmm12
vsubps %xmm13, %xmm15, %xmm2
vmovaps %xmm2, 0x60(%rsp)
vsubps %xmm14, %xmm9, %xmm1
vmovaps %xmm1, 0x70(%rsp)
vsubps %xmm10, %xmm12, %xmm14
vsubps %xmm3, %xmm2, %xmm15
vsubps %xmm7, %xmm1, %xmm0
vaddps %xmm3, %xmm2, %xmm4
vaddps %xmm7, %xmm1, %xmm5
vmulps %xmm4, %xmm0, %xmm8
vmulps %xmm5, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm10, %xmm12, %xmm8
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm0, 0x1a0(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm5
vmovaps %xmm15, 0x1b0(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x1c0(%rsp)
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vbroadcastss 0x18(%rsi), %xmm6
vmulps %xmm4, %xmm6, %xmm4
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vbroadcastss 0x10(%rsi), %xmm11
vmulps %xmm13, %xmm11, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x10(%rsp), %xmm2
vsubps %xmm2, %xmm3, %xmm0
vmovaps 0x20(%rsp), %xmm13
vsubps %xmm13, %xmm7, %xmm14
vmovaps %xmm3, 0xd0(%rsp)
vaddps %xmm2, %xmm3, %xmm4
vmovaps %xmm7, 0x1d0(%rsp)
vaddps %xmm7, %xmm13, %xmm5
vmulps %xmm4, %xmm14, %xmm7
vmulps %xmm5, %xmm0, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmovaps 0x30(%rsp), %xmm8
vsubps %xmm8, %xmm10, %xmm1
vmulps %xmm5, %xmm1, %xmm5
vmovaps %xmm10, 0xe0(%rsp)
vaddps %xmm8, %xmm10, %xmm7
vmovaps %xmm8, %xmm10
vmovaps %xmm14, 0x180(%rsp)
vmulps %xmm7, %xmm14, %xmm8
vmovaps %xmm1, %xmm14
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm0, 0x190(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vmulps %xmm4, %xmm1, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm3, %xmm11, %xmm3
vaddps %xmm4, %xmm3, %xmm4
vsubps %xmm12, %xmm10, %xmm5
vaddps %xmm12, %xmm10, %xmm3
vmovaps 0x60(%rsp), %xmm0
vsubps %xmm0, %xmm2, %xmm12
vaddps %xmm0, %xmm2, %xmm2
vmovaps 0x70(%rsp), %xmm0
vsubps %xmm0, %xmm13, %xmm8
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm2, %xmm8, %xmm1
vmulps %xmm0, %xmm12, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm3, %xmm8, %xmm7
vsubps %xmm0, %xmm7, %xmm0
vmovaps 0xc0(%rsp), %xmm7
vmulps %xmm3, %xmm12, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm6, 0x30(%rsp)
vmulps %xmm2, %xmm6, %xmm2
vmovaps %xmm15, 0x70(%rsp)
vmulps %xmm0, %xmm15, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm11, 0x60(%rsp)
vmulps %xmm1, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm4, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm11
vminps %xmm4, %xmm9, %xmm1
vminps %xmm0, %xmm1, %xmm1
vbroadcastss 0x1d45e3a(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm11, %xmm3
vbroadcastss 0x1d45e35(%rip), %xmm2 # 0x1f20ecc
vmovaps %xmm3, 0x10(%rsp)
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d45e16(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm2, %xmm3
vcmpnltps %xmm3, %xmm1, %xmm1
vmovaps %xmm9, 0x20(%rsp)
vmaxps %xmm4, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm2, %xmm0, %xmm0
vorps %xmm0, %xmm1, %xmm0
movb $0x0, 0xf(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x2d0(%rsp)
vtestps 0x160(%rsp), %xmm0
je 0x1db4fb
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovaps %xmm0, 0x170(%rsp)
vmulps %xmm10, %xmm9, %xmm0
vmovaps 0x1b0(%rsp), %xmm15
vmovaps 0x180(%rsp), %xmm6
vmulps %xmm6, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm6, %xmm12, %xmm2
vmulps %xmm8, %xmm10, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d45d88(%rip), %xmm7 # 0x1f20ec4
vandps %xmm7, %xmm0, %xmm0
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm0
vmulps %xmm8, %xmm14, %xmm1
vmulps %xmm14, %xmm9, %xmm2
vmovaps 0x1c0(%rsp), %xmm9
vmulps %xmm6, %xmm9, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm5, %xmm6, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm7, %xmm3, %xmm3
vandps %xmm7, %xmm1, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmulps %xmm5, %xmm10, %xmm2
vmulps %xmm10, %xmm9, %xmm3
vmulps %xmm14, %xmm15, %xmm5
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm7, %xmm5, %xmm5
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps 0x30(%rsp), %xmm2, %xmm3
vmulps 0x70(%rsp), %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vmulps 0x60(%rsp), %xmm0, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x1d0(%rsp), %xmm2, %xmm3
vmulps 0xd0(%rsp), %xmm1, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps 0xe0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1d1150b(%rip), %xmm9 # 0x1eec714
vsubps %xmm7, %xmm9, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovaps 0xc0(%rsp), %xmm7
vcmpneqps 0x1d107cb(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x170(%rsp), %xmm5
vandps 0x160(%rsp), %xmm5, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x1db4fb
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x20(%rsp), %xmm8
vmovaps %xmm8, 0x2a0(%rsp)
vmovaps %xmm4, 0x2b0(%rsp)
vmovaps %xmm11, 0x2c0(%rsp)
movq %rax, 0x2d0(%rsp)
vmovaps %xmm5, 0x2e0(%rsp)
vmovaps %xmm3, 0x310(%rsp)
vmovaps %xmm0, 0x320(%rsp)
vmovaps %xmm1, 0x330(%rsp)
vmovaps %xmm2, 0x340(%rsp)
vrcpps %xmm11, %xmm0
vmulps %xmm0, %xmm11, %xmm1
vbroadcastss 0x1d11438(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d15cf7(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x10(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm8, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x2f0(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x300(%rsp)
vmovmskps %xmm5, %r13d
bsfq %r13, %rbp
movl 0x290(%rsp,%rbp,4), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %r15
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r15)
je 0x1db35b
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x1db375
cmpq $0x0, 0x48(%r15)
jne 0x1db375
xorl %eax, %eax
jmp 0x1db361
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
testb %al, %al
je 0x1db556
testq %r13, %r13
jne 0x1db326
jmp 0x1db4fb
vmovss 0x2f0(%rsp,%rbp,4), %xmm0
vmovss 0x300(%rsp,%rbp,4), %xmm1
movq 0x8(%rdx), %rcx
movl 0x280(%rsp,%rbp,4), %edi
vmovss 0x320(%rsp,%rbp,4), %xmm2
vmovss 0x330(%rsp,%rbp,4), %xmm3
vmovss 0x340(%rsp,%rbp,4), %xmm4
vmovss %xmm2, 0x100(%rsp)
vmovss %xmm3, 0x104(%rsp)
vmovss %xmm4, 0x108(%rsp)
vmovss %xmm0, 0x10c(%rsp)
vmovss %xmm1, 0x110(%rsp)
movl %edi, 0x114(%rsp)
movl %eax, 0x118(%rsp)
movl (%rcx), %eax
movl %eax, 0x11c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x120(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x30(%rsp)
vmovss 0x310(%rsp,%rbp,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x5c(%rsp)
leaq 0x5c(%rsp), %rax
movq %rax, 0x130(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x138(%rsp)
movq %rcx, 0x140(%rsp)
movq %rsi, 0x148(%rsp)
leaq 0x100(%rsp), %rax
movq %rax, 0x150(%rsp)
movl $0x1, 0x158(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x1db495
leaq 0x130(%rsp), %rdi
callq *%rax
movq 0x50(%rsp), %r8
vmovaps 0xc0(%rsp), %xmm7
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x130(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1db4de
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1db4da
testb $0x2, (%r14)
jne 0x1db4ab
testb $0x40, 0x3e(%r15)
je 0x1db4cd
leaq 0x130(%rsp), %rdi
callq *%rax
movq 0x50(%rsp), %r8
vmovaps 0xc0(%rsp), %xmm7
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x130(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1db4de
xorl %eax, %eax
jmp 0x1db4ef
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
movb 0xe(%rsp), %dil
jmp 0x1db363
movq 0xf8(%rsp), %rdi
incq %rdi
movq 0xf0(%rsp), %rax
cmpq %rax, %rdi
setb %cl
jne 0x1dab5c
movq 0xb8(%rsp), %r8
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r10
movq 0xa0(%rsp), %r11
movq 0x98(%rsp), %r14
movq 0x90(%rsp), %r15
movq 0x88(%rsp), %r13
xorl %ebp, %ebp
jmp 0x1db59e
testb $0x1, %dil
movq 0xb8(%rsp), %r8
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r10
movq 0xa0(%rsp), %r11
movq 0x98(%rsp), %r14
movq 0x90(%rsp), %r15
movq 0x88(%rsp), %r13
je 0x1db59e
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
cmpl $0x3, %ebp
jne 0x1da932
addq $0xaf8, %rsp # imm = 0xAF8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::QuadMvIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xbc0, %rsp # imm = 0xBC0
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1db5ee
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1db5dc
movq %rdx, 0x20(%rsp)
leaq 0x408(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%r14), %xmm3
vmaxss 0xc(%r14), %xmm2, %xmm1
vbroadcastss 0x1d4589a(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d159b1(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d110c1(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %xmm6
vbroadcastss 0x4(%r14), %xmm7
vbroadcastss 0x8(%r14), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rcx
xorq $0x10, %rcx
movq %r10, %r15
xorq $0x10, %r15
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0x1f7489d(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x1e0(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x400(%rsp), %rbx
pushq $0x6
popq %rdx
vbroadcastss 0x1d152bd(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1d10ffc(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x220(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps %xmm6, 0x1b0(%rsp)
vmovaps %xmm7, 0x1a0(%rsp)
vmovaps %xmm8, 0x190(%rsp)
vmovaps %xmm9, 0x180(%rsp)
vmovaps %xmm10, 0x170(%rsp)
vmovaps %xmm3, 0x160(%rsp)
movq %r15, 0x38(%rsp)
vmovaps %xmm4, 0x150(%rsp)
vmovaps %xmm5, 0x140(%rsp)
cmpq %rbx, %rdi
je 0x1db5dc
movq -0x8(%rdi), %r13
addq $-0x8, %rdi
testb $0x8, %r13b
jne 0x1db820
vmovaps 0x20(%r13,%r8), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%r13,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%r10), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%r11), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%r13,%rcx), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r13,%r15), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r12d
testb $0x8, %r13b
jne 0x1db858
testq %r12, %r12
je 0x1db85c
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rsi
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rsi
jne 0x1db861
movq %rax, %r13
xorl %esi, %esi
testl %esi, %esi
je 0x1db78e
jmp 0x1db8a6
movl %edx, %esi
jmp 0x1db84e
pushq $0x4
popq %rsi
jmp 0x1db84e
movq %rbx, %rdx
movq %rcx, %rbx
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rsi, %rax
je 0x1db898
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x1db876
movq %rcx, %r13
movq %rbx, %rcx
movq %rdx, %rbx
pushq $0x6
popq %rdx
jmp 0x1db84c
cmpl $0x6, %esi
jne 0x1dbf23
movl %r13d, %eax
andl $0xf, %eax
xorl %esi, %esi
addq $-0x8, %rax
movq %rax, 0x98(%rsp)
setne %bl
je 0x1dbf1b
andq $-0x10, %r13
xorl %r15d, %r15d
imulq $0xe0, %r15, %rax
vbroadcastf128 0xd0(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vbroadcastf128 0xc0(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vmovaps %ymm1, 0x260(%rsp)
vmovaps %ymm0, 0x240(%rsp)
vmovaps (%r13,%rax), %xmm0
vmovaps 0x10(%r13,%rax), %xmm1
vmovaps 0x20(%r13,%rax), %xmm2
vinsertf128 $0x1, 0x60(%r13,%rax), %ymm0, %ymm3
vinsertf128 $0x1, 0x70(%r13,%rax), %ymm1, %ymm5
vinsertf128 $0x1, 0x80(%r13,%rax), %ymm2, %ymm7
vbroadcastf128 0x30(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vbroadcastf128 0x40(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vbroadcastf128 0x50(%r13,%rax), %ymm2 # ymm2 = mem[0,1,0,1]
vbroadcastf128 0x90(%r13,%rax), %ymm9 # ymm9 = mem[0,1,0,1]
vbroadcastf128 0xa0(%r13,%rax), %ymm10 # ymm10 = mem[0,1,0,1]
vbroadcastf128 0xb0(%r13,%rax), %ymm11 # ymm11 = mem[0,1,0,1]
vsubps %ymm0, %ymm3, %ymm4
vsubps %ymm1, %ymm5, %ymm12
vmovaps %ymm12, 0x120(%rsp)
vsubps %ymm2, %ymm7, %ymm8
vsubps %ymm3, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm11, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x100(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm4, 0x40(%rsp)
vmulps %ymm4, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm4, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm4
vbroadcastss (%r14), %ymm12
vbroadcastss 0x4(%r14), %ymm13
vbroadcastss 0x8(%r14), %ymm14
vbroadcastss 0x14(%r14), %ymm15
vsubps %ymm12, %ymm3, %ymm3
vbroadcastss 0x18(%r14), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%r14), %ymm14
vmulps %ymm3, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmulps %ymm3, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm4, 0x1c0(%rsp)
vmulps %ymm4, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x100(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm12
vmovaps %ymm4, %ymm14
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x120(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d4544f(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x40(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d4541e(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x1e0(%rsp), %ymm10
jne 0x1dbae5
incq %r15
cmpq 0x98(%rsp), %r15
setb %bl
jne 0x1db8d3
jmp 0x1dbece
vandps 0x1e0(%rsp), %ymm10, %ymm10
vmulps 0x1c0(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm3, %ymm14, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%r14), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%r14), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x1dbacc
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x280(%rsp)
vmovaps %ymm8, 0x2a0(%rsp)
vmovaps %ymm3, 0x2c0(%rsp)
vmovaps %ymm4, 0x2e0(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps 0x2a0(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x220(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x280(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x200(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x3a0(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x3c0(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0x1c0(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x3e0(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1d10b28(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x2c0(%rsp), %ymm1, %ymm3
movq 0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x1c0(%rsp)
vmovaps %ymm3, 0x380(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x340(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovmskps %ymm0, %eax
movq %rcx, 0x18(%rsp)
movq %rax, 0x40(%rsp)
bsfq %rax, %rcx
movq %rcx, 0x100(%rsp)
movl 0x260(%rsp,%rcx,4), %eax
movq 0x1c0(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x10(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x120(%rsp)
testl %ecx, 0x34(%rax)
je 0x1dbcb4
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %rax
movq %rax, 0x30(%rsp)
cmpq $0x0, 0x10(%rax)
movq 0x18(%rsp), %rcx
jne 0x1dbcec
movq 0x120(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x1dbcec
xorl %eax, %eax
jmp 0x1dbcd1
movq 0x100(%rsp), %rax
movq 0x40(%rsp), %rcx
btcq %rax, %rcx
movq %rcx, 0x40(%rsp)
movb $0x1, %al
movq 0x18(%rsp), %rcx
testb %al, %al
je 0x1dbf31
movq 0x40(%rsp), %rax
testq %rax, %rax
jne 0x1dbc46
jmp 0x1dbacc
movq %r11, 0x70(%rsp)
movq %r10, 0x78(%rsp)
movq %r9, 0x80(%rsp)
movq %r8, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
movq 0x100(%rsp), %rsi
vmovss 0x340(%rsp,%rsi,4), %xmm0
vmovss 0x360(%rsp,%rsi,4), %xmm1
movq 0x20(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x240(%rsp,%rsi,4), %edx
vmovss 0x3a0(%rsp,%rsi,4), %xmm2
vmovss 0x3c0(%rsp,%rsi,4), %xmm3
vmovss 0x3e0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xa0(%rsp)
vmovss %xmm3, 0xa4(%rsp)
vmovss %xmm4, 0xa8(%rsp)
vmovss %xmm0, 0xac(%rsp)
vmovss %xmm1, 0xb0(%rsp)
movl %edx, 0xb4(%rsp)
movq 0x10(%rsp), %rax
movl %eax, 0xb8(%rsp)
movl (%rcx), %eax
movl %eax, 0xbc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xc0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x10(%rsp)
vmovss 0x380(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0x120(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xd8(%rsp)
movq %rcx, 0xe0(%rsp)
movq %r14, 0xe8(%rsp)
leaq 0xa0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x1, 0xf8(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x1dbe35
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1dbe79
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1dbe75
movq 0x30(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x1dbe5b
movq 0x120(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1dbe68
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1dbe79
xorl %eax, %eax
jmp 0x1dbe9d
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x40(%rsp), %rax
movq 0x100(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x40(%rsp)
movb $0x1, %al
movq 0x90(%rsp), %rdi
movq 0x88(%rsp), %r8
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %r10
movq 0x70(%rsp), %r11
movq 0x18(%rsp), %rcx
pushq $0x6
popq %rdx
xorl %esi, %esi
jmp 0x1dbcd1
vmovaps 0x1b0(%rsp), %xmm6
vmovaps 0x1a0(%rsp), %xmm7
vmovaps 0x190(%rsp), %xmm8
vmovaps 0x180(%rsp), %xmm9
vmovaps 0x170(%rsp), %xmm10
vmovaps 0x160(%rsp), %xmm3
movq 0x38(%rsp), %r15
vmovaps 0x150(%rsp), %xmm4
vmovaps 0x140(%rsp), %xmm5
leaq 0x400(%rsp), %rbx
cmpl $0x3, %esi
jne 0x1db77d
jmp 0x1db5dc
testb $0x1, %bl
vmovaps 0x1b0(%rsp), %xmm6
vmovaps 0x1a0(%rsp), %xmm7
vmovaps 0x190(%rsp), %xmm8
vmovaps 0x180(%rsp), %xmm9
vmovaps 0x170(%rsp), %xmm10
vmovaps 0x160(%rsp), %xmm3
movq 0x38(%rsp), %r15
vmovaps 0x150(%rsp), %xmm4
vmovaps 0x140(%rsp), %xmm5
leaq 0x400(%rsp), %rbx
je 0x1dbf23
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rsi
jmp 0x1dbf23
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xbc0, %rsp # imm = 0xBC0
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1dbfcc
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1dbfba
movq %rdx, %r8
leaq 0x408(%rsp), %r9
movq 0x70(%rax), %rax
movq %rax, -0x8(%r9)
vmovaps 0x10(%r14), %xmm3
vmaxss 0xc(%r14), %xmm2, %xmm1
vbroadcastss 0x1d44ebe(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d14fd5(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d106e5(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %xmm6
vbroadcastss 0x4(%r14), %xmm7
vbroadcastss 0x8(%r14), %xmm8
xorl %r10d, %r10d
vucomiss %xmm2, %xmm3
setb %r10b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x4, %r10d
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
setb %r11b
shll $0x4, %r11d
orq $0x20, %r11
xorl %esi, %esi
vucomiss %xmm2, %xmm5
setb %sil
shll $0x4, %esi
orq $0x40, %rsi
movq %r10, %rdi
xorq $0x10, %rdi
movq %r11, %r15
xorq $0x10, %r15
movq %rsi, %rcx
xorq $0x10, %rcx
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0x1f73ec3(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x1e0(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x400(%rsp), %rbx
vbroadcastss 0x1d148e6(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1d10625(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x220(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vmovaps %xmm6, 0x150(%rsp)
vmovaps %xmm7, 0x140(%rsp)
vmovaps %xmm8, 0x130(%rsp)
vmovaps %xmm9, 0x120(%rsp)
vmovaps %xmm10, 0x110(%rsp)
vmovaps %xmm3, 0x100(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
movq %r15, 0x20(%rsp)
movq %rcx, 0x18(%rsp)
vmovaps %xmm4, 0xf0(%rsp)
vmovaps %xmm5, 0xe0(%rsp)
cmpq %rbx, %r9
je 0x1dbfba
movq -0x8(%r9), %r13
addq $-0x8, %r9
testb $0x8, %r13b
jne 0x1dc206
vmovaps 0x20(%r13,%r10), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%r13,%r11), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%rsi), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%rdi), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%r13,%r15), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r13,%rcx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r12d
testb $0x8, %r13b
jne 0x1dc23e
testq %r12, %r12
je 0x1dc242
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x1dc247
movq %rax, %r13
xorl %eax, %eax
testl %eax, %eax
je 0x1dc174
jmp 0x1dc28b
pushq $0x6
jmp 0x1dc244
pushq $0x4
popq %rax
jmp 0x1dc234
movq %rcx, %rbx
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1dc27b
movq %rcx, (%r9)
addq $0x8, %r9
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1dc259
movq %rcx, %r13
movq %rbx, %rcx
leaq 0x400(%rsp), %rbx
jmp 0x1dc232
cmpl $0x6, %eax
jne 0x1dc9f2
movl %r13d, %edx
andl $0xf, %edx
xorl %eax, %eax
addq $-0x8, %rdx
movq %rdx, 0x78(%rsp)
setne %bl
je 0x1dc9ea
andq $-0x10, %r13
movq (%r8), %rax
movq %rax, 0x38(%rsp)
xorl %r15d, %r15d
imulq $0x60, %r15, %rax
prefetcht0 (%r13,%rax)
prefetcht0 0x40(%r13,%rax)
movl 0x40(%r13,%rax), %edx
movq 0x38(%rsp), %rcx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%r13,%rax), %esi
movl 0x4(%r13,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x10(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x20(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x30(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x44(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm7
movl 0x14(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x34(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x48(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm10
movl 0x18(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm11
movl 0x28(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x38(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x4c(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm12
movl 0x1c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm13
movl 0x2c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm14
movl 0x3c(%r13,%rax), %edx
vunpcklps %xmm10, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm12, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
vunpckhps %xmm12, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vmovaps %ymm4, 0x40(%rsp)
vunpcklps %xmm10, %xmm15, %xmm7 # xmm7 = xmm15[0],xmm10[0],xmm15[1],xmm10[1]
vunpckhps %xmm10, %xmm15, %xmm10 # xmm10 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
vunpcklps %xmm11, %xmm2, %xmm15 # xmm15 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpckhps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
vunpcklps %xmm13, %xmm6, %xmm11 # xmm11 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
vunpckhps %xmm13, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm11, %xmm15, %xmm6 # xmm6 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm11 # xmm11 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm9, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
vunpckhps %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
vunpcklps %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
vunpckhps %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3]
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpcklps %xmm9, %xmm13, %xmm5 # xmm5 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
vunpckhps %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
vunpcklps %xmm8, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm12, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm13, %xmm12 # xmm12 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
vunpckhps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
vbroadcastf128 0x40(%r13,%rax), %ymm3 # ymm3 = mem[0,1,0,1]
vmovaps %ymm3, 0x260(%rsp)
vbroadcastf128 0x50(%r13,%rax), %ymm3 # ymm3 = mem[0,1,0,1]
vmovaps %ymm3, 0x240(%rsp)
vinsertf128 $0x1, %xmm5, %ymm7, %ymm4
vinsertf128 $0x1, %xmm9, %ymm10, %ymm5
vmovaps 0x40(%rsp), %ymm3
vinsertf128 $0x1, %xmm1, %ymm3, %ymm7
vinsertf128 $0x1, %xmm6, %ymm6, %ymm1
vinsertf128 $0x1, %xmm11, %ymm11, %ymm6
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm12, %ymm12, %ymm9
vinsertf128 $0x1, %xmm8, %ymm8, %ymm10
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vsubps %ymm1, %ymm4, %ymm3
vsubps %ymm6, %ymm5, %ymm12
vmovaps %ymm12, 0x180(%rsp)
vsubps %ymm2, %ymm7, %ymm8
vsubps %ymm4, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm0, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm3, 0x40(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm3, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm3
vbroadcastss (%r14), %ymm12
vbroadcastss 0x4(%r14), %ymm13
vbroadcastss 0x8(%r14), %ymm14
vbroadcastss 0x14(%r14), %ymm15
vsubps %ymm12, %ymm4, %ymm2
vbroadcastss 0x18(%r14), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%r14), %ymm14
vmulps %ymm2, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmovaps %ymm2, 0x1a0(%rsp)
vmulps %ymm2, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm3, 0x160(%rsp)
vmulps %ymm3, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x1c0(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm12
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x180(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d44926(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x40(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d448f5(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x1e0(%rsp), %ymm10
jne 0x1dc60b
incq %r15
cmpq 0x78(%rsp), %r15
setb %bl
jne 0x1dc2bd
jmp 0x1dc982
vmovaps %ymm3, %ymm14
vandps 0x1e0(%rsp), %ymm10, %ymm10
vmulps 0x160(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x1a0(%rsp), %ymm3, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%r14), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%r14), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x1dc5f5
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x280(%rsp)
vmovaps %ymm8, 0x2a0(%rsp)
vmovaps %ymm3, 0x2c0(%rsp)
vmovaps %ymm4, 0x2e0(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x280(%rsp), %ymm0
vmovaps 0x2a0(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x220(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x280(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x2a0(%rsp)
vmovaps 0x200(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x3a0(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x3c0(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0x160(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x3e0(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1d0fff9(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x2c0(%rsp), %ymm1, %ymm3
vmovaps %ymm3, 0x380(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x340(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovmskps %ymm0, %edx
bsfq %rdx, %rsi
movl 0x260(%rsp,%rsi,4), %eax
movq 0x38(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rdi
movl 0x24(%r14), %ecx
testl %ecx, 0x34(%rdi)
je 0x1dc79a
movq 0x10(%r8), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1dc7b2
cmpq $0x0, 0x48(%rdi)
jne 0x1dc7b2
xorl %eax, %eax
jmp 0x1dc7a0
btcq %rsi, %rdx
movb $0x1, %al
testb %al, %al
je 0x1dca00
testq %rdx, %rdx
jne 0x1dc760
jmp 0x1dc5f5
movq %rcx, 0x180(%rsp)
movq %rdx, 0x40(%rsp)
movq %r11, 0x70(%rsp)
movq %r10, 0x1a0(%rsp)
movq %r9, 0x160(%rsp)
vmovss 0x340(%rsp,%rsi,4), %xmm0
vmovss 0x360(%rsp,%rsi,4), %xmm1
movq %r8, 0x1c0(%rsp)
movq 0x8(%r8), %rcx
movl 0x240(%rsp,%rsi,4), %edx
vmovss 0x3a0(%rsp,%rsi,4), %xmm2
vmovss 0x3c0(%rsp,%rsi,4), %xmm3
vmovss 0x3e0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0x80(%rsp)
vmovss %xmm3, 0x84(%rsp)
vmovss %xmm4, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovss %xmm1, 0x90(%rsp)
movl %edx, 0x94(%rsp)
movl %eax, 0x98(%rsp)
movl (%rcx), %eax
movl %eax, 0x9c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xa0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x10(%rsp)
movq %rsi, 0x68(%rsp)
vmovss 0x380(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x14(%rsp)
leaq 0x14(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%rdi), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq %rdi, 0x60(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
je 0x1dc8f3
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1dc93a
movq 0x180(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1dc936
movq 0x180(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x1dc91c
movq 0x60(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1dc929
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1dc93a
xorl %eax, %eax
jmp 0x1dc95b
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x40(%rsp), %rax
movq 0x68(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x40(%rsp)
movb $0x1, %al
movq 0x1c0(%rsp), %r8
movq 0x160(%rsp), %r9
movq 0x1a0(%rsp), %r10
movq 0x70(%rsp), %r11
movq 0x40(%rsp), %rdx
jmp 0x1dc7a0
vmovaps 0x150(%rsp), %xmm6
vmovaps 0x140(%rsp), %xmm7
vmovaps 0x130(%rsp), %xmm8
vmovaps 0x120(%rsp), %xmm9
vmovaps 0x110(%rsp), %xmm10
vmovaps 0x100(%rsp), %xmm3
movq 0x30(%rsp), %rsi
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r15
movq 0x18(%rsp), %rcx
vmovaps 0xf0(%rsp), %xmm4
vmovaps 0xe0(%rsp), %xmm5
leaq 0x400(%rsp), %rbx
xorl %eax, %eax
jmp 0x1dc9f2
leaq 0x400(%rsp), %rbx
cmpl $0x3, %eax
jne 0x1dc163
jmp 0x1dbfba
testb $0x1, %bl
vmovaps 0x150(%rsp), %xmm6
vmovaps 0x140(%rsp), %xmm7
vmovaps 0x130(%rsp), %xmm8
vmovaps 0x120(%rsp), %xmm9
vmovaps 0x110(%rsp), %xmm10
vmovaps 0x100(%rsp), %xmm3
movq 0x30(%rsp), %rsi
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r15
movq 0x18(%rsp), %rcx
vmovaps 0xf0(%rsp), %xmm4
vmovaps 0xe0(%rsp), %xmm5
leaq 0x400(%rsp), %rbx
movl $0x0, %eax
je 0x1dc9f2
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rax
jmp 0x1dc9f2
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, true, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xd40, %rsp # imm = 0xD40
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1de59b
movq %rsi, %r9
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1de59b
movq %rdx, %r8
leaq 0x588(%rsp), %r10
movq 0x70(%rax), %rax
movq %rax, -0x8(%r10)
vmaxss 0xc(%r9), %xmm2, %xmm1
vmovaps 0x10(%r9), %xmm3
vbroadcastss 0x1d436ec(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d13803(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d0ef21(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d43760(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d42701(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d426f8(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm3, %xmm3
vbroadcastss (%r9), %xmm7
vbroadcastss 0x4(%r9), %xmm8
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
setb %r11b
vshufps $0x0, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[0,0,0,0]
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm10 # xmm10 = xmm4[1,1,1,1]
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vshufps $0x0, %xmm3, %xmm3, %xmm12 # xmm12 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm13 # xmm13 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm14 # xmm14 = xmm3[2,2,2,2]
shll $0x4, %r11d
xorl %r14d, %r14d
vucomiss %xmm2, %xmm5
setb %r14b
shll $0x4, %r14d
orq $0x20, %r14
xorl %r15d, %r15d
vucomiss %xmm2, %xmm6
setb %r15b
shll $0x4, %r15d
orq $0x40, %r15
movq %r11, %rsi
xorq $0x10, %rsi
movq %r14, %rdi
xorq $0x10, %rdi
movq %r15, %rax
xorq $0x10, %rax
movq %rax, 0x118(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
leaq 0x1f726d0(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm1
vinsertf128 $0x1, 0xf0(%rax), %ymm1, %ymm1
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
vbroadcastss 0x1d0ee48(%rip), %ymm2 # 0x1eec714
vbroadcastss 0x1d130f7(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x220(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vbroadcastss 0x8(%r9), %xmm4
vmovaps %xmm7, 0xe0(%rsp)
vmovaps %xmm8, 0xd0(%rsp)
vmovaps %xmm9, 0xc0(%rsp)
vmovaps %xmm10, 0xb0(%rsp)
vmovaps %xmm11, 0xa0(%rsp)
vmovaps %xmm12, 0x90(%rsp)
vmovaps %xmm13, 0x80(%rsp)
vmovaps %xmm14, 0x70(%rsp)
movq %r14, 0x48(%rsp)
movq %r15, 0x40(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovaps %xmm5, 0x60(%rsp)
vmovaps %xmm6, 0x50(%rsp)
vmovaps %xmm4, 0xf0(%rsp)
leaq 0x580(%rsp), %rax
cmpq %rax, %r10
je 0x1de59b
movq -0x8(%r10), %rbx
addq $-0x8, %r10
testb $0x8, %bl
jne 0x1dda05
vmovaps 0x20(%rbx,%r11), %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%rbx,%r14), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r15), %xmm1
vsubps %xmm4, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vmaxps %xmm5, %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%rsi), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vmovaps 0x20(%rbx,%rdi), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vminps %xmm2, %xmm1, %xmm1
movq 0x118(%rsp), %rax
vmovaps 0x20(%rbx,%rax), %xmm2
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm14, %xmm2
vminps %xmm6, %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %r12d
testb $0x8, %bl
jne 0x1dda3d
testq %r12, %r12
je 0x1dda41
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %r13d, %r13d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x1dda47
movq %rax, %rbx
testl %r13d, %r13d
je 0x1dd97a
jmp 0x1dda7c
pushq $0x6
jmp 0x1dda43
pushq $0x4
popq %r13
jmp 0x1dda32
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1dda77
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1dda56
movq %rcx, %rbx
jmp 0x1dda32
cmpl $0x6, %r13d
jne 0x1de591
movl %ebx, %eax
andl $0xf, %eax
xorl %r13d, %r13d
addq $-0x8, %rax
setne %cl
je 0x1de591
movb %cl, 0x7(%rsp)
andq $-0x10, %rbx
movq (%r8), %rdx
xorl %ecx, %ecx
movq %r12, 0x18(%rsp)
movq %rax, 0x108(%rsp)
movq %rdx, 0x20(%rsp)
movq %rcx, 0x110(%rsp)
imulq $0x60, %rcx, %rax
prefetcht0 (%rbx,%rax)
prefetcht0 0x40(%rbx,%rax)
movq %rdx, %rcx
movl 0x40(%rbx,%rax), %edx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%rbx,%rax), %esi
movl 0x4(%rbx,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x10(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x20(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x30(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x44(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm7
movl 0x14(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x34(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x48(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm10
movl 0x18(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm11
movl 0x28(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x38(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x4c(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm12
movl 0x1c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm13
movl 0x2c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm14
movl 0x3c(%rbx,%rax), %edx
vunpcklps %xmm10, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm12, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
vunpckhps %xmm12, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpcklps %xmm10, %xmm15, %xmm7 # xmm7 = xmm15[0],xmm10[0],xmm15[1],xmm10[1]
vunpckhps %xmm10, %xmm15, %xmm10 # xmm10 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
vunpcklps %xmm11, %xmm2, %xmm15 # xmm15 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpckhps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
vunpcklps %xmm13, %xmm6, %xmm11 # xmm11 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
vunpckhps %xmm13, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm11, %xmm15, %xmm6 # xmm6 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm11 # xmm11 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm9, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
vunpckhps %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
vunpcklps %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
vunpckhps %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3]
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpcklps %xmm9, %xmm13, %xmm5 # xmm5 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
vunpckhps %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
vunpcklps %xmm8, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm12, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vunpcklps %xmm3, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm13, %xmm3 # xmm3 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
vbroadcastf128 0x40(%rbx,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vunpckhps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
vmovaps %ymm12, 0x3a0(%rsp)
vbroadcastf128 0x50(%rbx,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vmovaps %ymm12, 0x380(%rsp)
vinsertf128 $0x1, %xmm5, %ymm7, %ymm15
vinsertf128 $0x1, %xmm9, %ymm10, %ymm7
vinsertf128 $0x1, %xmm1, %ymm4, %ymm0
vinsertf128 $0x1, %xmm6, %ymm6, %ymm4
vinsertf128 $0x1, %xmm11, %ymm11, %ymm5
vinsertf128 $0x1, %xmm2, %ymm2, %ymm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vinsertf128 $0x1, %xmm8, %ymm8, %ymm10
vinsertf128 $0x1, %xmm14, %ymm14, %ymm3
vbroadcastss (%r9), %ymm12
vbroadcastss 0x4(%r9), %ymm13
vbroadcastss 0x8(%r9), %ymm14
vsubps %ymm12, %ymm15, %ymm2
vsubps %ymm13, %ymm7, %ymm15
vsubps %ymm14, %ymm0, %ymm0
vsubps %ymm12, %ymm4, %ymm4
vmovaps %ymm4, 0x1c0(%rsp)
vsubps %ymm13, %ymm5, %ymm11
vsubps %ymm14, %ymm1, %ymm1
vmovaps %ymm1, 0x1e0(%rsp)
vsubps %ymm12, %ymm9, %ymm4
vsubps %ymm13, %ymm10, %ymm1
vmovaps %ymm1, 0x1a0(%rsp)
vsubps %ymm14, %ymm3, %ymm14
vsubps %ymm2, %ymm4, %ymm3
vmovaps %ymm4, %ymm8
vmovaps %ymm4, 0x180(%rsp)
vsubps %ymm15, %ymm1, %ymm12
vsubps %ymm0, %ymm14, %ymm6
vaddps %ymm1, %ymm15, %ymm4
vaddps %ymm0, %ymm14, %ymm5
vmulps %ymm4, %ymm6, %ymm7
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm7, %ymm9, %ymm1
vaddps %ymm2, %ymm8, %ymm7
vmulps %ymm5, %ymm3, %ymm5
vmovaps %ymm6, 0x2c0(%rsp)
vmulps %ymm7, %ymm6, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x2e0(%rsp)
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm3, 0x300(%rsp)
vmulps %ymm4, %ymm3, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vbroadcastss 0x14(%r9), %ymm13
vbroadcastss 0x18(%r9), %ymm9
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm13, %ymm5
vbroadcastss 0x10(%r9), %ymm6
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm1, %ymm6, %ymm5
vaddps %ymm4, %ymm5, %ymm10
vmovaps %ymm11, %ymm8
vsubps %ymm11, %ymm15, %ymm11
vmovaps 0x1e0(%rsp), %ymm3
vsubps %ymm3, %ymm0, %ymm12
vmovaps %ymm15, 0x340(%rsp)
vaddps %ymm8, %ymm15, %ymm4
vmovaps %ymm0, 0x320(%rsp)
vaddps %ymm3, %ymm0, %ymm5
vmovaps %ymm3, %ymm0
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm11, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vmovaps 0x1c0(%rsp), %ymm1
vsubps %ymm1, %ymm2, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm2, 0x360(%rsp)
vaddps %ymm1, %ymm2, %ymm7
vmovaps %ymm12, 0x2a0(%rsp)
vmulps %ymm7, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm11, %ymm12
vmulps %ymm7, %ymm11, %ymm7
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm13, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm6, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vmovaps 0x180(%rsp), %ymm2
vsubps %ymm2, %ymm1, %ymm7
vaddps %ymm2, %ymm1, %ymm2
vmovaps 0x1a0(%rsp), %ymm1
vsubps %ymm1, %ymm8, %ymm11
vaddps %ymm1, %ymm8, %ymm1
vsubps %ymm14, %ymm0, %ymm5
vaddps %ymm0, %ymm14, %ymm3
vmovaps %ymm7, %ymm0
vmulps %ymm1, %ymm5, %ymm7
vmulps %ymm3, %ymm11, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm3, %ymm0, %ymm3
vmulps %ymm2, %ymm5, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm11, %ymm2
vmulps %ymm1, %ymm0, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm9, 0x180(%rsp)
vmulps %ymm1, %ymm9, %ymm1
vmovaps %ymm13, 0x1c0(%rsp)
vmulps %ymm3, %ymm13, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm6, 0x1a0(%rsp)
vmulps %ymm7, %ymm6, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm10, %ymm2
vaddps %ymm2, %ymm1, %ymm6
vminps %ymm4, %ymm10, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d43048(%rip), %ymm3 # 0x1f20ec4
vmovaps %ymm6, 0x240(%rsp)
vandps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1d4303a(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm6, 0x260(%rsp)
vmulps %ymm3, %ymm6, %ymm3
vbroadcastss 0x1d43018(%rip), %ymm7 # 0x1f20ec0
vxorps %ymm7, %ymm3, %ymm7
vcmpnltps %ymm7, %ymm2, %ymm2
vmovaps %ymm10, 0x1e0(%rsp)
vmovaps %ymm4, 0x280(%rsp)
vmaxps %ymm4, %ymm10, %ymm7
vmaxps %ymm1, %ymm7, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm1
vtestps %ymm1, %ymm1
je 0x1de506
vmovaps 0x2c0(%rsp), %ymm6
vmovaps %ymm11, %ymm8
vmovaps %ymm12, %ymm11
vmulps %ymm6, %ymm12, %ymm2
vmovaps 0x2e0(%rsp), %ymm13
vmovaps 0x2a0(%rsp), %ymm9
vmulps %ymm9, %ymm13, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm8, %ymm9, %ymm7
vmulps %ymm5, %ymm12, %ymm12
vsubps %ymm7, %ymm12, %ymm12
vbroadcastss 0x1d42f9d(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm2, %ymm2
vandps %ymm4, %ymm7, %ymm7
vcmpltps %ymm7, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm14
vmulps %ymm5, %ymm15, %ymm2
vmulps %ymm6, %ymm15, %ymm3
vmovaps 0x300(%rsp), %ymm10
vmulps %ymm9, %ymm10, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm9, %ymm7
vsubps %ymm2, %ymm7, %ymm7
vandps %ymm4, %ymm6, %ymm6
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vblendvps %ymm2, %ymm3, %ymm7, %ymm9
vmulps %ymm0, %ymm11, %ymm2
vmulps %ymm11, %ymm10, %ymm3
vmulps %ymm15, %ymm13, %ymm5
vmulps %ymm8, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm5, %ymm5
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0x180(%rsp), %ymm0, %ymm2
vmulps 0x1c0(%rsp), %ymm9, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x1a0(%rsp), %ymm14, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm3
vmulps 0x320(%rsp), %ymm0, %ymm2
vmulps 0x340(%rsp), %ymm9, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmulps 0x360(%rsp), %ymm14, %ymm5
vrcpps %ymm3, %ymm6
vaddps %ymm2, %ymm5, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps %ymm6, %ymm3, %ymm5
vbroadcastss 0x1d0e70e(%rip), %ymm4 # 0x1eec714
vsubps %ymm5, %ymm4, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%r9), %ymm6
vmulps %ymm5, %ymm2, %ymm2
vcmpleps %ymm2, %ymm6, %ymm5
vbroadcastss 0x20(%r9), %ymm6
vcmpleps %ymm6, %ymm2, %ymm6
vandps %ymm5, %ymm6, %ymm5
vcmpneqps 0x1d42ec7(%rip), %ymm3, %ymm3 # 0x1f20f00
vandps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm3, %xmm5
vpackssdw %xmm5, %xmm3, %xmm3
vpand %xmm1, %xmm3, %xmm1
vpmovsxwd %xmm1, %xmm3
vpshufd $0xee, %xmm1, %xmm5 # xmm5 = xmm1[2,3,2,3]
vpmovsxwd %xmm5, %xmm5
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x1de506
vmovaps 0x1e0(%rsp), %ymm4
vmovaps %ymm4, 0x3c0(%rsp)
vmovaps 0x280(%rsp), %ymm7
vmovaps %ymm7, 0x3e0(%rsp)
vmovaps 0x240(%rsp), %ymm5
vmovaps %ymm5, 0x400(%rsp)
vmovaps %ymm14, 0x420(%rsp)
vmovaps %ymm9, 0x440(%rsp)
vmovaps %ymm0, 0x460(%rsp)
vmovaps %ymm3, 0x480(%rsp)
vmovaps %ymm2, 0x4e0(%rsp)
vmovaps 0x220(%rsp), %ymm6
vmovaps %ymm6, 0x560(%rsp)
vrcpps %ymm5, %ymm2
vmulps %ymm2, %ymm5, %ymm3
vbroadcastss 0x1d0e623(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d12ee2(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x260(%rsp), %ymm8
vcmpnltps %ymm3, %ymm8, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm4, %ymm3
vminps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm7, %ymm2
vminps %ymm5, %ymm2, %ymm2
vsubps %ymm3, %ymm5, %ymm4
vsubps %ymm2, %ymm5, %ymm5
vblendvps %ymm6, %ymm4, %ymm2, %ymm2
vblendvps %ymm6, %ymm5, %ymm3, %ymm3
vmovaps %ymm2, 0x4c0(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
vmovaps 0x200(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm2
vmulps %ymm4, %ymm9, %ymm3
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, 0x500(%rsp)
vmovaps %ymm3, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vpsllw $0xf, %xmm1, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
vmovaps 0xe0(%rsp), %xmm7
vmovaps 0xd0(%rsp), %xmm8
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm11
vmovaps 0x90(%rsp), %xmm12
vmovaps 0x80(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm6
vmovaps 0xf0(%rsp), %xmm4
movq 0x20(%rsp), %rdx
bsfq %r15, %r12
movl 0x3a0(%rsp,%r12,4), %eax
movq 0x1e8(%rdx), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%r9), %ecx
testl %ecx, 0x34(%r13)
je 0x1de22f
movq 0x10(%r8), %r14
cmpq $0x0, 0x10(%r14)
jne 0x1de24f
cmpq $0x0, 0x48(%r13)
jne 0x1de24f
xorl %eax, %eax
jmp 0x1de235
btcq %r12, %r15
movb $0x1, %al
movq 0x18(%rsp), %r12
xorl %r13d, %r13d
testb %al, %al
je 0x1de574
testq %r15, %r15
jne 0x1de1f8
jmp 0x1de4d6
vmovss 0x4a0(%rsp,%r12,4), %xmm0
vmovd 0x4c0(%rsp,%r12,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x380(%rsp,%r12,4), %edx
vmovss 0x500(%rsp,%r12,4), %xmm2
vmovss 0x520(%rsp,%r12,4), %xmm3
vmovss 0x540(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0x120(%rsp)
vmovss %xmm3, 0x124(%rsp)
vmovss %xmm4, 0x128(%rsp)
vmovss %xmm0, 0x12c(%rsp)
vmovd %xmm1, 0x130(%rsp)
movl %edx, 0x134(%rsp)
movl %eax, 0x138(%rsp)
movl (%rcx), %eax
movl %eax, 0x13c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x140(%rsp)
vmovss 0x20(%r9), %xmm0
vmovss %xmm0, 0x1c0(%rsp)
vmovd 0x4e0(%rsp,%r12,4), %xmm0
vmovd %xmm0, 0x20(%r9)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0x150(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0x158(%rsp)
movq %rcx, 0x160(%rsp)
movq %r9, 0x168(%rsp)
leaq 0x120(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x1, 0x178(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r10, 0x38(%rsp)
movq %r11, 0x30(%rsp)
je 0x1de3fd
leaq 0x150(%rsp), %rdi
movq %r8, 0x1a0(%rsp)
movq %r9, 0x180(%rsp)
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x60(%rsp), %xmm5
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
vmovaps 0x70(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm13
vmovaps 0x90(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm9
movq 0x30(%rsp), %r11
vmovaps 0xd0(%rsp), %xmm8
vmovaps 0xe0(%rsp), %xmm7
movq 0x38(%rsp), %r10
movq 0x180(%rsp), %r9
movq 0x1a0(%rsp), %r8
movq 0x150(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1de4a6
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x1de4a2
testb $0x2, (%r14)
jne 0x1de417
testb $0x40, 0x3e(%r13)
je 0x1de495
leaq 0x150(%rsp), %rdi
movq %r8, %r14
movq %r9, %r13
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x60(%rsp), %xmm5
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
vmovaps 0x70(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm13
vmovaps 0x90(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm9
movq 0x30(%rsp), %r11
vmovaps 0xd0(%rsp), %xmm8
vmovaps 0xe0(%rsp), %xmm7
movq 0x38(%rsp), %r10
movq %r13, %r9
movq %r14, %r8
movq 0x150(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1de4a6
xorl %eax, %eax
jmp 0x1de4bb
vmovd 0x1c0(%rsp), %xmm0
vmovd %xmm0, 0x20(%r9)
btcq %r12, %r15
movb $0x1, %al
vmovaps 0xf0(%rsp), %xmm4
movq 0x18(%rsp), %r12
xorl %r13d, %r13d
movq 0x20(%rsp), %rdx
jmp 0x1de23d
movq 0x48(%rsp), %r14
movq 0x40(%rsp), %r15
movq 0x110(%rsp), %rcx
incq %rcx
movq 0x108(%rsp), %rax
cmpq %rax, %rcx
setb 0x7(%rsp)
jne 0x1ddaba
jmp 0x1de591
vmovaps 0xe0(%rsp), %xmm7
vmovaps 0xd0(%rsp), %xmm8
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm11
vmovaps 0x90(%rsp), %xmm12
vmovaps 0x80(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm6
vmovaps 0xf0(%rsp), %xmm4
movq 0x20(%rsp), %rdx
jmp 0x1de4e0
testb $0x1, 0x7(%rsp)
movq 0x48(%rsp), %r14
movq 0x40(%rsp), %r15
je 0x1de591
movl $0xff800000, 0x20(%r9) # imm = 0xFF800000
pushq $0x3
popq %r13
cmpl $0x3, %r13d
jne 0x1dd961
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xce0, %rsp # imm = 0xCE0
movq %rdx, 0x20(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1de5e7
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x1de5d5
leaq 0x528(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm5, %xmm1
vbroadcastss 0x1d428ab(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d129c2(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d0e0d2(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[0,0,0,0]
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm10 # xmm10 = xmm2[1,1,1,1]
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rbx
xorq $0x10, %rbx
movq %r10, %r15
xorq $0x10, %r15
vshufps $0x0, %xmm1, %xmm1, %xmm12 # xmm12 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm13 # xmm13 = xmm0[0,0,0,0]
leaq 0x1f718ae(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x300(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x520(%rsp), %r12
vbroadcastss 0x1d122d1(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1d0e010(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x340(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x320(%rsp)
movq %rsi, 0x28(%rsp)
vmovaps %xmm6, 0x1d0(%rsp)
vmovaps %xmm7, 0x1c0(%rsp)
vmovaps %xmm8, 0x1b0(%rsp)
movq %r8, 0x60(%rsp)
vmovaps %xmm9, 0x1a0(%rsp)
vmovaps %xmm10, 0x190(%rsp)
vmovaps %xmm11, 0x180(%rsp)
movq %r9, 0x58(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x48(%rsp)
movq %rbx, 0x40(%rsp)
movq %r15, 0x38(%rsp)
vmovaps %xmm12, 0x170(%rsp)
vmovaps %xmm13, 0x160(%rsp)
cmpq %r12, %rdi
je 0x1de5d5
movq -0x8(%rdi), %r14
addq $-0x8, %rdi
testb $0x8, %r14b
jne 0x1de876
movq %r14, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm12, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%rbx), %xmm0, %xmm3
vminps %xmm2, %xmm13, %xmm2
vaddps 0x20(%rax,%rbx), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%r15), %xmm0, %xmm4
vaddps 0x20(%rax,%r15), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r14d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1de8be
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r13d
testb $0x8, %r14b
jne 0x1de8ba
testq %r13, %r13
je 0x1de8e3
andq $-0x10, %r14
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %ecx, %ecx
movq (%r14,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdx
jne 0x1de8e8
movq %rax, %r14
testl %ecx, %ecx
je 0x1de798
jmp 0x1de92d
pushq $0x6
jmp 0x1de8e5
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1de86d
pushq $0x4
popq %rcx
jmp 0x1de8b0
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r14,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x1de926
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1de8f7
movq %rcx, %r14
xorl %ecx, %ecx
jmp 0x1de8b0
cmpl $0x6, %ecx
jne 0x1df3ce
movl %r14d, %eax
andl $0xf, %eax
xorl %ecx, %ecx
addq $-0x8, %rax
movq %rax, 0xa8(%rsp)
setne %dl
je 0x1df3ce
movq %r13, 0x68(%rsp)
movq %rdi, 0x70(%rsp)
andq $-0x10, %r14
movq 0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x78(%rsp)
xorl %eax, %eax
movb %dl, 0x1f(%rsp)
movq %rax, 0xd8(%rsp)
imulq $0x60, %rax, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x40(%r14,%rax), %ecx
movq 0x78(%rsp), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rsi
vmovss 0x28(%rsi), %xmm1
vmovss 0x2c(%rsi), %xmm2
vmovss 0x30(%rsi), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1d12005(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vmaxss %xmm1, %xmm5, %xmm3
vcvttss2si %xmm3, %ecx
movslq %ecx, %rcx
movq 0xe0(%rsi), %r15
imulq $0x38, %rcx, %r9
movq (%r15,%r9), %rsi
movl (%r14,%rax), %ebx
movl 0x4(%r14,%rax), %r8d
movq %r8, 0xc0(%rsp)
vmovups (%rsi,%rbx,4), %xmm2
movl 0x10(%r14,%rax), %edi
movq %rdi, 0x1e0(%rsp)
vmovups (%rsi,%rdi,4), %xmm1
movl 0x20(%r14,%rax), %edi
movq %rdi, 0x140(%rsp)
vmovups (%rsi,%rdi,4), %xmm7
movl 0x30(%r14,%rax), %edi
movq %rdi, 0x80(%rsp)
vmovups (%rsi,%rdi,4), %xmm8
movl 0x44(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %rsi
movq %rsi, 0xb0(%rsp)
movq (%rsi,%r9), %rsi
vmovups (%rsi,%r8,4), %xmm4
movl 0x14(%r14,%rax), %edi
movq %rdi, 0xd0(%rsp)
vmovups (%rsi,%rdi,4), %xmm5
movl 0x24(%r14,%rax), %edi
movq %rdi, 0x200(%rsp)
vmovups (%rsi,%rdi,4), %xmm9
movl 0x34(%r14,%rax), %edi
movq %rdi, 0x220(%rsp)
vmovups (%rsi,%rdi,4), %xmm10
movl 0x48(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %r8
movq (%r8,%r9), %rsi
movl 0x8(%r14,%rax), %edi
vmovups (%rsi,%rdi,4), %xmm13
movl 0x18(%r14,%rax), %r13d
vmovups (%rsi,%r13,4), %xmm6
movl 0x28(%r14,%rax), %r10d
movq %r10, 0xb8(%rsp)
vmovups (%rsi,%r10,4), %xmm11
movl 0x38(%r14,%rax), %r10d
movq %r10, 0xc8(%rsp)
vmovups (%rsi,%r10,4), %xmm12
movl 0x4c(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rdx
vsubss %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x2f0(%rsp)
movq 0xe0(%rdx), %rsi
movq (%rsi,%r9), %r12
movl 0xc(%r14,%rax), %edx
vmovups (%r12,%rdx,4), %xmm0
movl 0x1c(%r14,%rax), %r11d
vmovups (%r12,%r11,4), %xmm3
vunpcklps %xmm13, %xmm2, %xmm14 # xmm14 = xmm2[0],xmm13[0],xmm2[1],xmm13[1]
vunpckhps %xmm13, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm13[2],xmm2[3],xmm13[3]
vunpcklps %xmm0, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
vunpckhps %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
movl 0x2c(%r14,%rax), %r9d
vmovups (%r12,%r9,4), %xmm4
vunpcklps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vmovaps %xmm0, 0x2d0(%rsp)
vunpcklps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
vmovaps %xmm0, 0x290(%rsp)
vunpckhps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
vmovaps %xmm0, 0x2b0(%rsp)
vunpcklps %xmm6, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm3, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
vunpckhps %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
movl 0x3c(%r14,%rax), %r10d
vmovups (%r12,%r10,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x2e0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x2a0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x2c0(%rsp)
vunpcklps %xmm11, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
vunpckhps %xmm11, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm11[2],xmm7[3],xmm11[3]
vunpcklps %xmm4, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm3 # xmm3 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
incl %ecx
movslq %ecx, %rcx
imulq $0x38, %rcx, %r12
movq (%r15,%r12), %rcx
vmovups (%rcx,%rbx,4), %xmm4
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x280(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x240(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x260(%rsp)
vunpcklps %xmm12, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
vunpckhps %xmm12, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
vunpcklps %xmm5, %xmm10, %xmm2 # xmm2 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
vunpckhps %xmm5, %xmm10, %xmm3 # xmm3 = xmm10[2],xmm5[2],xmm10[3],xmm5[3]
movq (%r8,%r12), %r8
vmovups (%r8,%rdi,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x270(%rsp)
vunpcklps %xmm2, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x250(%rsp)
movq 0xb0(%rsp), %rdi
movq (%rdi,%r12), %rdi
vunpcklps %xmm5, %xmm4, %xmm0 # xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0xc0(%rsp), %rbx
vmovups (%rdi,%rbx,4), %xmm2
movq (%rsi,%r12), %rsi
vmovups (%rsi,%rdx,4), %xmm3
vunpcklps %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vunpcklps %xmm2, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpcklps %xmm4, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
vunpckhps %xmm4, %xmm0, %xmm8 # xmm8 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
movq 0x1e0(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
vmovups (%r8,%r13,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0xd0(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%rsi,%r11,4), %xmm2
vunpcklps %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpckhps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vunpcklps %xmm1, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vunpckhps %xmm4, %xmm3, %xmm7 # xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
movq 0x140(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0xb8(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x200(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%rsi,%r9,4), %xmm3
vunpcklps %xmm3, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm9 # xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x80(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0xc8(%rsp), %rcx
vmovups (%r8,%rcx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x220(%rsp), %rcx
vmovups (%rdi,%rcx,4), %xmm1
vmovups (%rsi,%r10,4), %xmm4
movq 0x28(%rsp), %rsi
vunpcklps %xmm4, %xmm1, %xmm11 # xmm11 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpckhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
vunpcklps %xmm1, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm11, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vmovaps 0x2f0(%rsp), %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[0,0,0,0]
vmovss 0x1d0d9c6(%rip), %xmm1 # 0x1eec714
vsubss %xmm11, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm10
vmulps 0x290(%rsp), %xmm1, %xmm11
vaddps %xmm10, %xmm11, %xmm11
vmulps %xmm0, %xmm8, %xmm8
vmulps 0x2b0(%rsp), %xmm1, %xmm10
vaddps %xmm8, %xmm10, %xmm10
vmulps %xmm0, %xmm13, %xmm8
vmulps 0x2d0(%rsp), %xmm1, %xmm13
vaddps %xmm8, %xmm13, %xmm13
vmulps %xmm2, %xmm0, %xmm2
vmulps 0x2a0(%rsp), %xmm1, %xmm8
vaddps %xmm2, %xmm8, %xmm8
vmulps %xmm7, %xmm0, %xmm2
vmulps 0x2c0(%rsp), %xmm1, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vmulps %xmm0, %xmm14, %xmm7
vmulps 0x2e0(%rsp), %xmm1, %xmm14
vaddps %xmm7, %xmm14, %xmm14
vmulps %xmm3, %xmm0, %xmm3
vmulps 0x240(%rsp), %xmm1, %xmm7
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm0, %xmm9, %xmm7
vmulps 0x260(%rsp), %xmm1, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmulps %xmm0, %xmm15, %xmm9
vmulps 0x280(%rsp), %xmm1, %xmm15
vaddps %xmm9, %xmm15, %xmm9
vmulps %xmm4, %xmm0, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm0
vmulps %xmm1, %xmm12, %xmm6
vaddps %xmm4, %xmm6, %xmm15
vbroadcastf128 0x40(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
vmovaps %ymm6, 0x380(%rsp)
vbroadcastf128 0x50(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
movq 0xd8(%rsp), %rax
vmulps 0x250(%rsp), %xmm1, %xmm12
vaddps %xmm5, %xmm12, %xmm12
vmulps 0x270(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps %ymm6, 0x360(%rsp)
vinsertf128 $0x1, %xmm3, %ymm11, %ymm4
vinsertf128 $0x1, %xmm7, %ymm10, %ymm5
vinsertf128 $0x1, %xmm9, %ymm13, %ymm7
vinsertf128 $0x1, %xmm8, %ymm8, %ymm1
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm14, %ymm14, %ymm8
vinsertf128 $0x1, %xmm15, %ymm15, %ymm9
vinsertf128 $0x1, %xmm12, %ymm12, %ymm10
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vsubps %ymm1, %ymm4, %ymm3
vsubps %ymm2, %ymm5, %ymm12
vmovaps %ymm12, 0x140(%rsp)
vsubps %ymm8, %ymm7, %ymm8
vsubps %ymm4, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm0, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm3, 0x80(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm3, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm3
vbroadcastss (%rsi), %ymm12
vbroadcastss 0x4(%rsi), %ymm13
vbroadcastss 0x8(%rsi), %ymm14
vbroadcastss 0x14(%rsi), %ymm15
vsubps %ymm12, %ymm4, %ymm2
vbroadcastss 0x18(%rsi), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%rsi), %ymm14
vmulps %ymm2, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmovaps %ymm2, 0x200(%rsp)
vmulps %ymm2, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm3, 0x1e0(%rsp)
vmulps %ymm3, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x220(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm12
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x140(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d41f28(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x80(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d41ef4(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x300(%rsp), %ymm10
jne 0x1df013
incq %rax
cmpq 0xa8(%rsp), %rax
setb %dl
vxorps %xmm5, %xmm5, %xmm5
jne 0x1de970
jmp 0x1df354
vmovaps %ymm3, %ymm14
vandps 0x300(%rsp), %ymm10, %ymm10
vmulps 0x1e0(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x200(%rsp), %ymm3, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%rsi), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%rsi), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x1deff6
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x3a0(%rsp)
vmovaps %ymm8, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
vmovaps %ymm4, 0x400(%rsp)
vmovaps %ymm0, 0x440(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps 0x3c0(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x340(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x3a0(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x320(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x4c0(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x4e0(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0x1e0(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x500(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1d0d5f1(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x3e0(%rsp), %ymm1, %ymm3
vmovaps %ymm3, 0x4a0(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x460(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovmskps %ymm0, %edx
movq %rax, %r13
bsfq %rdx, %rbx
movl 0x380(%rsp,%rbx,4), %eax
movq 0x78(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r12
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r12)
je 0x1df1ac
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %r15
cmpq $0x0, 0x10(%r15)
jne 0x1df1c7
cmpq $0x0, 0x48(%r12)
jne 0x1df1c7
xorl %eax, %eax
jmp 0x1df1b2
btcq %rbx, %rdx
movb $0x1, %al
testb %al, %al
je 0x1df3dc
testq %rdx, %rdx
movq %r13, %rax
jne 0x1df168
jmp 0x1deff6
movq %rdx, 0x80(%rsp)
vmovss 0x460(%rsp,%rbx,4), %xmm0
vmovss 0x480(%rsp,%rbx,4), %xmm1
movq 0x20(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x360(%rsp,%rbx,4), %edx
vmovss 0x4c0(%rsp,%rbx,4), %xmm2
vmovss 0x4e0(%rsp,%rbx,4), %xmm3
vmovss 0x500(%rsp,%rbx,4), %xmm4
vmovss %xmm2, 0xe0(%rsp)
vmovss %xmm3, 0xe4(%rsp)
vmovss %xmm4, 0xe8(%rsp)
vmovss %xmm0, 0xec(%rsp)
vmovss %xmm1, 0xf0(%rsp)
movl %edx, 0xf4(%rsp)
movl %eax, 0xf8(%rsp)
movl (%rcx), %eax
movl %eax, 0xfc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x100(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x140(%rsp)
vmovss 0x4a0(%rsp,%rbx,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x34(%rsp)
leaq 0x34(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq %rsi, 0x128(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
je 0x1df2e4
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1df319
movq 0x10(%r15), %rax
testq %rax, %rax
je 0x1df315
testb $0x2, (%r15)
jne 0x1df2fb
testb $0x40, 0x3e(%r12)
je 0x1df308
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1df319
xorl %eax, %eax
jmp 0x1df342
movq 0x28(%rsp), %rax
vmovss 0x140(%rsp), %xmm0
vmovss %xmm0, 0x20(%rax)
movq 0x80(%rsp), %rax
btcq %rbx, %rax
movq %rax, 0x80(%rsp)
movb $0x1, %al
movq 0x28(%rsp), %rsi
movq 0x80(%rsp), %rdx
jmp 0x1df1b2
movq 0x70(%rsp), %rdi
vmovaps 0x1d0(%rsp), %xmm6
vmovaps 0x1c0(%rsp), %xmm7
vmovaps 0x1b0(%rsp), %xmm8
movq 0x60(%rsp), %r8
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm11
movq 0x58(%rsp), %r9
movq 0x50(%rsp), %r10
movq 0x48(%rsp), %r11
movq 0x40(%rsp), %rbx
movq 0x38(%rsp), %r15
vmovaps 0x170(%rsp), %xmm12
vmovaps 0x160(%rsp), %xmm13
leaq 0x520(%rsp), %r12
movq 0x68(%rsp), %r13
xorl %ecx, %ecx
cmpl $0x3, %ecx
jne 0x1de787
jmp 0x1de5d5
testb $0x1, 0x1f(%rsp)
vxorps %xmm5, %xmm5, %xmm5
movq 0x70(%rsp), %rdi
vmovaps 0x1d0(%rsp), %xmm6
vmovaps 0x1c0(%rsp), %xmm7
vmovaps 0x1b0(%rsp), %xmm8
movq 0x60(%rsp), %r8
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm11
movq 0x58(%rsp), %r9
movq 0x50(%rsp), %r10
movq 0x48(%rsp), %r11
movq 0x40(%rsp), %rbx
movq 0x38(%rsp), %r15
vmovaps 0x170(%rsp), %xmm12
vmovaps 0x160(%rsp), %xmm13
leaq 0x520(%rsp), %r12
movq 0x68(%rsp), %r13
movl $0x0, %ecx
je 0x1df3ce
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rcx
jmp 0x1df3ce
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xe00, %rsp # imm = 0xE00
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e068b
vmovss 0x20(%rsi), %xmm0
vxorps %xmm6, %xmm6, %xmm6
vucomiss %xmm0, %xmm6
ja 0x1e068b
leaq 0x648(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm6, %xmm1
vbroadcastss 0x1d419f0(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d11b07(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1d0d225(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d41a64(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d40a05(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d409fc(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm8
vbroadcastss 0x8(%rsi), %xmm9
xorl %r9d, %r9d
vucomiss %xmm6, %xmm3
setb %r9b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x3f0(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm12 # xmm12 = xmm3[2,2,2,2]
vshufps $0x0, %xmm2, %xmm2, %xmm13 # xmm13 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm14 # xmm14 = xmm2[1,1,1,1]
shll $0x4, %r9d
xorl %r10d, %r10d
vucomiss %xmm6, %xmm4
setb %r10b
shll $0x4, %r10d
orq $0x20, %r10
xorl %r11d, %r11d
vucomiss %xmm6, %xmm5
vshufps $0xaa, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[2,2,2,2]
setb %r11b
shll $0x4, %r11d
orq $0x40, %r11
movq %r9, %rbx
xorq $0x10, %rbx
movq %r10, %r15
xorq $0x10, %r15
movq %r11, %rax
xorq $0x10, %rax
movq %rax, 0x158(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm15 # xmm15 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x3e0(%rsp)
leaq 0x1f709b7(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
vbroadcastss 0x1d418e4(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1d0d12b(%rip), %ymm10 # 0x1eec714
vbroadcastss 0x1d113da(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x420(%rsp)
vblendvps %ymm1, %ymm0, %ymm10, %ymm0
vmovaps %ymm0, 0x400(%rsp)
movq %rdx, 0x18(%rsp)
movq %rsi, 0x30(%rsp)
vmovaps %xmm7, 0xf0(%rsp)
vmovaps %xmm8, 0xe0(%rsp)
vmovaps %xmm9, 0xd0(%rsp)
movq %r9, 0x60(%rsp)
vmovaps %xmm11, 0xc0(%rsp)
vmovaps %xmm12, 0xb0(%rsp)
vmovaps %xmm13, 0xa0(%rsp)
vmovaps %xmm14, 0x90(%rsp)
movq %r10, 0x58(%rsp)
movq %r11, 0x50(%rsp)
vmovaps %xmm5, 0x80(%rsp)
movq %rbx, 0x48(%rsp)
movq %r15, 0x40(%rsp)
vmovaps %xmm15, 0x70(%rsp)
leaq 0x640(%rsp), %rax
cmpq %rax, %r8
je 0x1e068b
movq -0x8(%r8), %r14
addq $-0x8, %r8
testb $0x8, %r14b
jne 0x1df788
movq %r14, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r9), %xmm0, %xmm1
vaddps 0x20(%rax,%r9), %xmm1, %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps 0x3f0(%rsp), %xmm1, %xmm1
vmaxps %xmm1, %xmm15, %xmm1
vmulps 0x80(%rax,%r10), %xmm0, %xmm2
vaddps 0x20(%rax,%r10), %xmm2, %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm2
vmulps 0x80(%rax,%r11), %xmm0, %xmm3
vaddps 0x20(%rax,%r11), %xmm3, %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm3, %xmm12, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%rbx), %xmm0, %xmm2
vaddps 0x20(%rax,%rbx), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vmovaps 0x3e0(%rsp), %xmm4
vminps %xmm2, %xmm4, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
movq 0x158(%rsp), %rcx
vmulps 0x80(%rax,%rcx), %xmm0, %xmm4
vaddps 0x20(%rax,%rcx), %xmm4, %xmm4
vmulps %xmm3, %xmm14, %xmm3
vsubps %xmm9, %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r14d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1df7d2
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r13d
testb $0x8, %r14b
jne 0x1df7ce
testq %r13, %r13
je 0x1df7f7
andq $-0x10, %r14
bsfq %r13, %rax
leaq -0x1(%r13), %rdi
xorl %r12d, %r12d
movq (%r14,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdi
jne 0x1df7fd
movq %rax, %r14
testl %r12d, %r12d
je 0x1df694
jmp 0x1df840
pushq $0x6
jmp 0x1df7f9
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1df77f
pushq $0x4
popq %r12
jmp 0x1df7c3
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r14,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x1df83b
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x1df80c
movq %rcx, %r14
jmp 0x1df7c3
cmpl $0x6, %r12d
jne 0x1e0681
movl %r14d, %eax
andl $0xf, %eax
xorl %r12d, %r12d
addq $-0x8, %rax
setne %cl
je 0x1e0681
movq %r8, 0x68(%rsp)
andq $-0x10, %r14
movq (%rdx), %r8
xorl %edi, %edi
movq %r13, 0x28(%rsp)
movq %rax, 0x138(%rsp)
movq %r8, 0x20(%rsp)
movb %cl, 0x17(%rsp)
movq %rdi, 0x140(%rsp)
imulq $0x60, %rdi, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x40(%r14,%rax), %ecx
movq 0x1e8(%r8), %rdx
movq (%rdx,%rcx,8), %rsi
vmovss 0x28(%rsi), %xmm1
vmovss 0x2c(%rsi), %xmm2
vmovss 0x30(%rsi), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1d110fa(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm3
vcvttss2si %xmm3, %ecx
movslq %ecx, %rcx
movq 0xe0(%rsi), %r15
imulq $0x38, %rcx, %r9
movq (%r15,%r9), %rsi
movl (%r14,%rax), %r11d
movl 0x4(%r14,%rax), %r8d
movq %r8, 0x260(%rsp)
vmovups (%rsi,%r11,4), %xmm2
movl 0x10(%r14,%rax), %edi
movq %rdi, 0x340(%rsp)
vmovups (%rsi,%rdi,4), %xmm1
movl 0x20(%r14,%rax), %edi
movq %rdi, 0x1c0(%rsp)
vmovups (%rsi,%rdi,4), %xmm7
movl 0x30(%r14,%rax), %edi
movq %rdi, 0x100(%rsp)
vmovups (%rsi,%rdi,4), %xmm8
movl 0x44(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %rsi
movq %rsi, 0x150(%rsp)
movq (%rsi,%r9), %rsi
vmovups (%rsi,%r8,4), %xmm4
movl 0x14(%r14,%rax), %edi
movq %rdi, 0x2e0(%rsp)
vmovups (%rsi,%rdi,4), %xmm5
movl 0x24(%r14,%rax), %edi
movq %rdi, 0x320(%rsp)
vmovups (%rsi,%rdi,4), %xmm10
movl 0x34(%r14,%rax), %edi
movq %rdi, 0x360(%rsp)
vmovups (%rsi,%rdi,4), %xmm9
movl 0x48(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %r8
movq (%r8,%r9), %rsi
movl 0x8(%r14,%rax), %edi
vmovups (%rsi,%rdi,4), %xmm13
movl 0x18(%r14,%rax), %r10d
movq %r10, 0x148(%rsp)
vmovups (%rsi,%r10,4), %xmm6
movl 0x28(%r14,%rax), %r10d
movq %r10, 0x1e0(%rsp)
vmovups (%rsi,%r10,4), %xmm11
movl 0x38(%r14,%rax), %r10d
movq %r10, 0x2c0(%rsp)
vmovups (%rsi,%r10,4), %xmm12
movl 0x4c(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rdx
vsubss %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x300(%rsp)
movq 0xe0(%rdx), %rsi
movq (%rsi,%r9), %rbx
movl 0xc(%r14,%rax), %edx
vmovups (%rbx,%rdx,4), %xmm0
movl 0x1c(%r14,%rax), %r12d
vmovups (%rbx,%r12,4), %xmm3
vunpcklps %xmm13, %xmm2, %xmm14 # xmm14 = xmm2[0],xmm13[0],xmm2[1],xmm13[1]
vunpckhps %xmm13, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm13[2],xmm2[3],xmm13[3]
vunpcklps %xmm0, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
vunpckhps %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
movl 0x2c(%r14,%rax), %r9d
vmovups (%rbx,%r9,4), %xmm4
vunpcklps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vmovaps %xmm0, 0x280(%rsp)
vunpcklps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
vmovaps %xmm0, 0x3d0(%rsp)
vunpckhps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
vmovaps %xmm0, 0x220(%rsp)
vunpcklps %xmm6, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm3, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
vunpckhps %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
movl 0x3c(%r14,%rax), %r10d
vmovups (%rbx,%r10,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x2a0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x200(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x240(%rsp)
vunpcklps %xmm11, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
vunpckhps %xmm11, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm11[2],xmm7[3],xmm11[3]
vunpcklps %xmm4, %xmm10, %xmm2 # xmm2 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
vunpckhps %xmm4, %xmm10, %xmm3 # xmm3 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
incl %ecx
movslq %ecx, %rcx
imulq $0x38, %rcx, %rbx
movq (%r15,%rbx), %rcx
vmovups (%rcx,%r11,4), %xmm4
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x3c0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x380(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x390(%rsp)
vunpcklps %xmm12, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
vunpckhps %xmm12, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
vunpcklps %xmm5, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
vunpckhps %xmm5, %xmm9, %xmm3 # xmm3 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
movq (%r8,%rbx), %r8
vmovups (%r8,%rdi,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x3b0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x3a0(%rsp)
movq 0x150(%rsp), %rdi
movq (%rdi,%rbx), %rdi
vunpcklps %xmm5, %xmm4, %xmm0 # xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x260(%rsp), %r11
vmovups (%rdi,%r11,4), %xmm2
movq (%rsi,%rbx), %r11
vmovups (%r11,%rdx,4), %xmm3
vunpcklps %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vunpcklps %xmm2, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpcklps %xmm4, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
vunpckhps %xmm4, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
movq 0x340(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0x148(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x2e0(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%r11,%r12,4), %xmm2
vunpcklps %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpckhps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vunpcklps %xmm1, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vunpckhps %xmm4, %xmm3, %xmm8 # xmm8 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
movq 0x1c0(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0x1e0(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x320(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%r11,%r9,4), %xmm3
vunpcklps %xmm3, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm9 # xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x100(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0x2c0(%rsp), %rcx
vmovups (%r8,%rcx,4), %xmm1
movq 0x30(%rsp), %rsi
vunpcklps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x360(%rsp), %rcx
vmovups (%rdi,%rcx,4), %xmm1
vmovups (%r11,%r10,4), %xmm4
vunpcklps %xmm4, %xmm1, %xmm11 # xmm11 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpckhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
vunpcklps %xmm1, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm11, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vmovaps 0x300(%rsp), %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[0,0,0,0]
vmovss 0x1d0caa4(%rip), %xmm1 # 0x1eec714
vsubss %xmm11, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm10
vmulps 0x3d0(%rsp), %xmm1, %xmm11
vaddps %xmm10, %xmm11, %xmm11
vmulps %xmm7, %xmm0, %xmm7
vmulps 0x220(%rsp), %xmm1, %xmm10
vaddps %xmm7, %xmm10, %xmm10
vmulps %xmm0, %xmm13, %xmm7
vmulps 0x280(%rsp), %xmm1, %xmm13
vaddps %xmm7, %xmm13, %xmm13
vmulps %xmm2, %xmm0, %xmm2
vmulps 0x200(%rsp), %xmm1, %xmm7
vaddps %xmm2, %xmm7, %xmm7
vmulps %xmm0, %xmm8, %xmm2
vmulps 0x240(%rsp), %xmm1, %xmm8
vaddps %xmm2, %xmm8, %xmm2
vmulps %xmm0, %xmm14, %xmm8
vmulps 0x2a0(%rsp), %xmm1, %xmm14
vaddps %xmm8, %xmm14, %xmm14
vmulps %xmm3, %xmm0, %xmm3
vmulps 0x380(%rsp), %xmm1, %xmm8
vaddps %xmm3, %xmm8, %xmm3
vmulps %xmm0, %xmm9, %xmm8
vmulps 0x390(%rsp), %xmm1, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vmulps %xmm0, %xmm15, %xmm9
vmulps 0x3c0(%rsp), %xmm1, %xmm15
vaddps %xmm9, %xmm15, %xmm9
vmulps %xmm4, %xmm0, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm0
vmulps %xmm1, %xmm12, %xmm6
vbroadcastf128 0x40(%r14,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vaddps %xmm4, %xmm6, %xmm4
vmovaps %ymm12, 0x460(%rsp)
vbroadcastf128 0x50(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
vmulps 0x3a0(%rsp), %xmm1, %xmm12
vaddps %xmm5, %xmm12, %xmm5
vmulps 0x3b0(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps %ymm6, 0x440(%rsp)
vinsertf128 $0x1, %xmm3, %ymm11, %ymm1
vinsertf128 $0x1, %xmm8, %ymm10, %ymm3
vinsertf128 $0x1, %xmm9, %ymm13, %ymm6
vinsertf128 $0x1, %xmm7, %ymm7, %ymm7
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm14, %ymm14, %ymm14
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vinsertf128 $0x1, %xmm0, %ymm0, %ymm9
vbroadcastss (%rsi), %ymm10
vbroadcastss 0x4(%rsi), %ymm11
vbroadcastss 0x8(%rsi), %ymm12
vsubps %ymm10, %ymm1, %ymm1
vsubps %ymm11, %ymm3, %ymm3
vsubps %ymm12, %ymm6, %ymm15
vsubps %ymm10, %ymm7, %ymm13
vsubps %ymm11, %ymm2, %ymm8
vsubps %ymm12, %ymm14, %ymm14
vsubps %ymm10, %ymm4, %ymm2
vsubps %ymm11, %ymm5, %ymm4
vmovaps %ymm4, 0x1c0(%rsp)
vsubps %ymm12, %ymm9, %ymm6
vmovaps %ymm6, 0x100(%rsp)
vsubps %ymm1, %ymm2, %ymm11
vsubps %ymm3, %ymm4, %ymm12
vsubps %ymm15, %ymm6, %ymm0
vaddps %ymm3, %ymm4, %ymm4
vaddps %ymm6, %ymm15, %ymm5
vmulps %ymm4, %ymm0, %ymm7
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm7, %ymm9, %ymm9
vaddps %ymm1, %ymm2, %ymm7
vmulps %ymm5, %ymm11, %ymm5
vmovaps %ymm0, 0x2a0(%rsp)
vmulps %ymm7, %ymm0, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x2e0(%rsp)
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm11, 0x300(%rsp)
vmulps %ymm4, %ymm11, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vbroadcastss 0x14(%rsi), %ymm11
vbroadcastss 0x18(%rsi), %ymm6
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vbroadcastss 0x10(%rsi), %ymm0
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm0, %ymm9, %ymm5
vaddps %ymm4, %ymm5, %ymm10
vsubps %ymm8, %ymm3, %ymm9
vsubps %ymm14, %ymm15, %ymm12
vmovaps %ymm3, 0x340(%rsp)
vaddps %ymm3, %ymm8, %ymm4
vmovaps %ymm15, 0x320(%rsp)
vaddps %ymm14, %ymm15, %ymm5
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm9, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vsubps %ymm13, %ymm1, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm1, 0x360(%rsp)
vaddps %ymm1, %ymm13, %ymm7
vmovaps %ymm12, 0x280(%rsp)
vmulps %ymm7, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm9, %ymm12
vmulps %ymm7, %ymm9, %ymm7
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm0, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vmovaps %ymm13, %ymm3
vsubps %ymm2, %ymm13, %ymm13
vaddps %ymm2, %ymm3, %ymm2
vmovaps 0x1c0(%rsp), %ymm1
vsubps %ymm1, %ymm8, %ymm9
vaddps %ymm1, %ymm8, %ymm1
vmovaps 0x100(%rsp), %ymm3
vsubps %ymm3, %ymm14, %ymm5
vaddps %ymm3, %ymm14, %ymm3
vmulps %ymm1, %ymm5, %ymm7
vmulps %ymm3, %ymm9, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm3, %ymm13, %ymm3
vmulps %ymm2, %ymm5, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm9, %ymm2
vmulps %ymm1, %ymm13, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm6, 0x2c0(%rsp)
vmulps %ymm1, %ymm6, %ymm1
vmovaps %ymm11, 0x100(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm0, 0x1c0(%rsp)
vmulps %ymm7, %ymm0, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm10, %ymm2
vaddps %ymm2, %ymm1, %ymm0
vminps %ymm4, %ymm10, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d40f43(%rip), %ymm3 # 0x1f20ec4
vmovaps %ymm0, 0x200(%rsp)
vandps %ymm3, %ymm0, %ymm0
vbroadcastss 0x1d40f35(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm0, 0x220(%rsp)
vmulps %ymm3, %ymm0, %ymm3
vbroadcastss 0x1d40f13(%rip), %ymm7 # 0x1f20ec0
vxorps %ymm7, %ymm3, %ymm7
vcmpnltps %ymm7, %ymm2, %ymm2
vmovaps %ymm10, 0x260(%rsp)
vmovaps %ymm4, 0x240(%rsp)
vmaxps %ymm4, %ymm10, %ymm7
vmaxps %ymm1, %ymm7, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm0
vtestps %ymm0, %ymm0
je 0x1e05a6
vmovaps %ymm0, 0x1e0(%rsp)
vmovaps 0x2a0(%rsp), %ymm0
vmovaps %ymm12, %ymm11
vmulps %ymm0, %ymm12, %ymm2
vmovaps 0x2e0(%rsp), %ymm1
vmovaps %ymm9, %ymm8
vmovaps 0x280(%rsp), %ymm9
vmulps %ymm1, %ymm9, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm8, %ymm9, %ymm7
vmulps %ymm5, %ymm12, %ymm12
vsubps %ymm7, %ymm12, %ymm12
vbroadcastss 0x1d40e90(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm2, %ymm2
vandps %ymm4, %ymm7, %ymm7
vcmpltps %ymm7, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm14
vmulps %ymm5, %ymm15, %ymm2
vmulps %ymm0, %ymm15, %ymm3
vmovaps 0x300(%rsp), %ymm10
vmulps %ymm9, %ymm10, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm13, %ymm9, %ymm7
vsubps %ymm2, %ymm7, %ymm7
vandps %ymm4, %ymm6, %ymm6
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vblendvps %ymm2, %ymm3, %ymm7, %ymm9
vmulps %ymm13, %ymm11, %ymm2
vmulps %ymm11, %ymm10, %ymm3
vmulps %ymm1, %ymm15, %ymm5
vmulps %ymm8, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm5, %ymm5
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vmovdqa 0x1e0(%rsp), %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0x2c0(%rsp), %ymm0, %ymm2
vmulps 0x100(%rsp), %ymm9, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x1c0(%rsp), %ymm14, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm3
vmulps 0x320(%rsp), %ymm0, %ymm2
vmulps 0x340(%rsp), %ymm9, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmulps 0x360(%rsp), %ymm14, %ymm5
vrcpps %ymm3, %ymm6
vaddps %ymm2, %ymm5, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps %ymm6, %ymm3, %ymm5
vbroadcastss 0x1d0c5f7(%rip), %ymm4 # 0x1eec714
vsubps %ymm5, %ymm4, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%rsi), %ymm6
vmulps %ymm5, %ymm2, %ymm2
vcmpleps %ymm2, %ymm6, %ymm5
vbroadcastss 0x20(%rsi), %ymm6
vcmpleps %ymm6, %ymm2, %ymm6
vandps %ymm5, %ymm6, %ymm5
vcmpneqps 0x1d40db0(%rip), %ymm3, %ymm3 # 0x1f20f00
vandps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm3, %xmm5
vpackssdw %xmm5, %xmm3, %xmm3
vpand %xmm1, %xmm3, %xmm1
vpmovsxwd %xmm1, %xmm3
vpshufd $0xee, %xmm1, %xmm5 # xmm5 = xmm1[2,3,2,3]
vpmovsxwd %xmm5, %xmm5
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x1e05a6
vmovaps 0x260(%rsp), %ymm4
vmovaps %ymm4, 0x480(%rsp)
vmovaps 0x240(%rsp), %ymm5
vmovaps %ymm5, 0x4a0(%rsp)
vmovaps 0x200(%rsp), %ymm7
vmovaps %ymm7, 0x4c0(%rsp)
vmovaps %ymm14, 0x4e0(%rsp)
vmovaps %ymm9, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm3, 0x540(%rsp)
vmovaps %ymm2, 0x5a0(%rsp)
vmovaps 0x420(%rsp), %ymm6
vmovaps %ymm6, 0x620(%rsp)
vrcpps %ymm7, %ymm2
vmulps %ymm2, %ymm7, %ymm3
vbroadcastss 0x1d0c50c(%rip), %ymm10 # 0x1eec714
vsubps %ymm3, %ymm10, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1d10dcb(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x220(%rsp), %ymm7
vcmpnltps %ymm3, %ymm7, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm4, %ymm3
vminps %ymm10, %ymm3, %ymm3
vmulps %ymm2, %ymm5, %ymm2
vminps %ymm10, %ymm2, %ymm2
vsubps %ymm3, %ymm10, %ymm4
vsubps %ymm2, %ymm10, %ymm5
vblendvps %ymm6, %ymm4, %ymm2, %ymm2
vblendvps %ymm6, %ymm5, %ymm3, %ymm3
vmovaps %ymm2, 0x580(%rsp)
vmovaps %ymm3, 0x560(%rsp)
vmovaps 0x400(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm2
vmulps %ymm4, %ymm9, %ymm3
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, 0x5c0(%rsp)
vmovaps %ymm3, 0x5e0(%rsp)
vmovaps %ymm0, 0x600(%rsp)
vpsllw $0xf, %xmm1, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
movq 0x18(%rsp), %rdx
vmovaps 0xf0(%rsp), %xmm7
vmovaps 0xe0(%rsp), %xmm8
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm13
vmovaps 0x90(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm5
vmovaps 0x70(%rsp), %xmm15
vbroadcastss 0x1d40bc0(%rip), %ymm6 # 0x1f20ec4
movq 0x20(%rsp), %r8
movb 0x17(%rsp), %dil
bsfq %r15, %r12
movl 0x460(%rsp,%r12,4), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r13)
je 0x1e0344
movq 0x10(%rdx), %rbx
cmpq $0x0, 0x10(%rbx)
jne 0x1e0364
cmpq $0x0, 0x48(%r13)
jne 0x1e0364
xorl %eax, %eax
jmp 0x1e034a
btcq %r12, %r15
movb $0x1, %al
movq 0x28(%rsp), %r13
xorl %r12d, %r12d
testb %al, %al
je 0x1e0652
testq %r15, %r15
jne 0x1e030e
jmp 0x1e0610
vmovss 0x560(%rsp,%r12,4), %xmm0
vmovd 0x580(%rsp,%r12,4), %xmm1
movq 0x8(%rdx), %rcx
movl 0x440(%rsp,%r12,4), %edi
vmovss 0x5c0(%rsp,%r12,4), %xmm2
vmovss 0x5e0(%rsp,%r12,4), %xmm3
vmovss 0x600(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0x160(%rsp)
vmovss %xmm3, 0x164(%rsp)
vmovss %xmm4, 0x168(%rsp)
vmovss %xmm0, 0x16c(%rsp)
vmovd %xmm1, 0x170(%rsp)
movl %edi, 0x174(%rsp)
movl %eax, 0x178(%rsp)
movl (%rcx), %eax
movl %eax, 0x17c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x180(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x100(%rsp)
vmovd 0x5a0(%rsp,%r12,4), %xmm0
vmovd %xmm0, 0x20(%rsi)
orl $-0x1, 0x3c(%rsp)
leaq 0x3c(%rsp), %rax
movq %rax, 0x190(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0x198(%rsp)
movq %rcx, 0x1a0(%rsp)
movq %rsi, 0x1a8(%rsp)
leaq 0x160(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movl $0x1, 0x1b8(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
je 0x1e04da
leaq 0x190(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x20(%rsp), %r8
vmovaps 0x70(%rsp), %xmm15
vmovaps 0x80(%rsp), %xmm5
vmovaps 0x90(%rsp), %xmm14
vmovaps 0xa0(%rsp), %xmm13
vmovaps 0xb0(%rsp), %xmm12
vmovaps 0xc0(%rsp), %xmm11
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xe0(%rsp), %xmm8
vmovaps 0xf0(%rsp), %xmm7
movq 0x30(%rsp), %rsi
movq 0x18(%rsp), %rdx
movq 0x190(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e056e
movq 0x10(%rbx), %rax
testq %rax, %rax
je 0x1e056a
testb $0x2, (%rbx)
jne 0x1e04f3
testb $0x40, 0x3e(%r13)
je 0x1e055d
leaq 0x190(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x20(%rsp), %r8
vmovaps 0x70(%rsp), %xmm15
vmovaps 0x80(%rsp), %xmm5
vmovaps 0x90(%rsp), %xmm14
vmovaps 0xa0(%rsp), %xmm13
vmovaps 0xb0(%rsp), %xmm12
vmovaps 0xc0(%rsp), %xmm11
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xe0(%rsp), %xmm8
vmovaps 0xf0(%rsp), %xmm7
movq 0x30(%rsp), %rsi
movq 0x18(%rsp), %rdx
movq 0x190(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e056e
xorl %eax, %eax
jmp 0x1e0582
vmovd 0x100(%rsp), %xmm0
vmovd %xmm0, 0x20(%rsi)
btcq %r12, %r15
movb $0x1, %al
vbroadcastss 0x1d40939(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1d0c180(%rip), %ymm10 # 0x1eec714
movq 0x28(%rsp), %r13
xorl %r12d, %r12d
movb 0x17(%rsp), %dil
jmp 0x1e0352
movq 0x18(%rsp), %rdx
vmovaps 0xf0(%rsp), %xmm7
vmovaps 0xe0(%rsp), %xmm8
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm13
vmovaps 0x90(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm5
vmovaps 0x70(%rsp), %xmm15
vbroadcastss 0x1d408c2(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1d0c109(%rip), %ymm10 # 0x1eec714
movq 0x20(%rsp), %r8
movq 0x140(%rsp), %rdi
incq %rdi
movq 0x138(%rsp), %rax
cmpq %rax, %rdi
setb %cl
jne 0x1df880
movq 0x68(%rsp), %r8
movq 0x60(%rsp), %r9
movq 0x58(%rsp), %r10
movq 0x50(%rsp), %r11
movq 0x48(%rsp), %rbx
movq 0x40(%rsp), %r15
xorl %r12d, %r12d
jmp 0x1e0681
testb $0x1, %dil
movq 0x68(%rsp), %r8
movq 0x60(%rsp), %r9
movq 0x58(%rsp), %r10
movq 0x50(%rsp), %r11
movq 0x48(%rsp), %rbx
movq 0x40(%rsp), %r15
je 0x1e0681
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r12
cmpl $0x3, %r12d
jne 0x1df67b
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, true, embree::avx::SubdivPatch1Intersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xcc0, %rsp # imm = 0xCC0
movq %rdx, 0x90(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1e06da
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e06c8
leaq 0x508(%rsp), %r11
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d407bc(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d108d3(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1d0bff1(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d40830(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq %rax, -0x8(%r11)
vbroadcastss 0x1d3f7cd(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d3f7c4(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm11
vbroadcastss 0x4(%rsi), %xmm12
vbroadcastss 0x8(%rsi), %xmm13
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm14 # xmm14 = xmm4[0,0,0,0]
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm15 # xmm15 = xmm4[1,1,1,1]
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vshufps $0x0, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x4, %r9d
xorl %r8d, %r8d
vucomiss %xmm2, %xmm5
setb %r8b
shll $0x4, %r8d
orq $0x20, %r8
xorl %r12d, %r12d
vucomiss %xmm2, %xmm6
setb %r12b
shll $0x4, %r12d
orq $0x40, %r12
movq %r9, %r13
xorq $0x10, %r13
movq %r8, %r15
xorq $0x10, %r15
movq %r12, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
leaq 0x1f6f799(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x420(%rsp)
movq $0x0, 0x28(%rsp)
vmovaps %xmm11, 0x100(%rsp)
vmovaps %xmm12, 0xf0(%rsp)
vmovaps %xmm13, 0xe0(%rsp)
vmovaps %xmm14, 0xd0(%rsp)
vmovaps %xmm15, 0xc0(%rsp)
vmovaps %xmm7, 0x80(%rsp)
vmovaps %xmm8, 0x70(%rsp)
vmovaps %xmm3, 0x40(%rsp)
movq %r8, 0x18(%rsp)
vmovaps %xmm5, 0x30(%rsp)
vmovaps %xmm6, 0x60(%rsp)
movq %r9, 0x20(%rsp)
vmovaps %xmm4, 0x50(%rsp)
leaq 0x500(%rsp), %rax
cmpq %rax, %r11
je 0x1e06c8
movq -0x8(%r11), %r10
addq $-0x8, %r11
testb $0x8, %r10b
jne 0x1e0904
vmovaps 0x20(%r10,%r9), %xmm0
vsubps %xmm11, %xmm0, %xmm0
vmulps %xmm0, %xmm14, %xmm0
vmovaps 0x20(%r10,%r8), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r10,%r12), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vmaxps %xmm5, %xmm1, %xmm1
vmaxps %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r10,%r13), %xmm1
vsubps %xmm11, %xmm1, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vmovaps 0x20(%r10,%r15), %xmm2
vsubps %xmm12, %xmm2, %xmm2
vmulps %xmm2, %xmm8, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r10,%rbx), %xmm2
vsubps %xmm13, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vminps %xmm6, %xmm2, %xmm2
vminps %xmm2, %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %r14d
testb $0x8, %r10b
jne 0x1e093a
testq %r14, %r14
je 0x1e093e
andq $-0x10, %r10
bsfq %r14, %rcx
leaq -0x1(%r14), %rdi
xorl %eax, %eax
movq (%r10,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %r14, %rdi
jne 0x1e0943
movq %rcx, %r10
testl %eax, %eax
je 0x1e0879
jmp 0x1e097f
pushq $0x6
jmp 0x1e0940
pushq $0x4
popq %rax
jmp 0x1e0930
movq %rcx, (%r11)
addq $0x8, %r11
bsfq %rdi, %r8
leaq -0x1(%rdi), %rcx
movq (%r10,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
andq %rdi, %rcx
je 0x1e0975
movq %r8, (%r11)
addq $0x8, %r11
bsfq %rcx, %r8
leaq -0x1(%rcx), %rdi
jmp 0x1e0952
movq %r8, %r10
movq 0x18(%rsp), %r8
jmp 0x1e0930
cmpl $0x6, %eax
jne 0x1e152f
movl %r10d, %eax
andl $0xf, %eax
cmpl $0x8, %eax
jne 0x1e153d
movq 0x28(%rsp), %r8
movl 0xc(%r8), %eax
movl 0x10(%r8), %ecx
movl 0x24(%r8), %edi
addq %r8, %rdi
shrq $0x4, %r10
leaq (%rdi,%r10,4), %r9
vmovups 0x2c(%r9,%rax,4), %xmm0
vmovaps %xmm0, %xmm1
cmpq $0x3, %rcx
jb 0x1e09d2
leaq (%rdi,%r10,4), %r8
addq $0x2c, %r8
vmovups (%r8,%rax,8), %xmm1
vmovups 0x2c(%rdi,%r10,4), %xmm2
cmpq $0x2, %rax
je 0x1e1553
movq 0x28(%rsp), %rdi
movl 0x14(%rdi), %r8d
leaq (%r9,%r8,4), %rdi
addq $0x2c, %rdi
vmovups (%rdi,%rax,4), %xmm3
vmovaps %xmm3, %xmm5
cmpl $0x3, %ecx
jb 0x1e0a07
vmovups (%rdi,%rax,8), %xmm5
vmovups (%rdi), %xmm8
cmpl $0x2, %eax
je 0x1e1567
leaq (%r9,%r8,8), %rdi
addq $0x2c, %rdi
vmovups (%rdi,%rax,4), %xmm6
vmovaps %xmm6, %xmm7
cmpl $0x3, %ecx
jb 0x1e0a2f
vmovups (%rdi,%rax,8), %xmm7
vinsertf128 $0x1, %xmm0, %ymm2, %ymm2
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vunpcklps %ymm0, %ymm2, %ymm1 # ymm1 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[4],ymm0[4],ymm2[5],ymm0[5]
vshufps $0xa5, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm0, %ymm0, %ymm14 # ymm14 = ymm0[0,1,1,2,4,5,5,6]
vinsertf128 $0x1, %xmm3, %ymm8, %ymm8
vinsertf128 $0x1, %xmm5, %ymm3, %ymm5
vunpcklps %ymm5, %ymm8, %ymm2 # ymm2 = ymm8[0],ymm5[0],ymm8[1],ymm5[1],ymm8[4],ymm5[4],ymm8[5],ymm5[5]
vshufps $0xa5, %ymm8, %ymm8, %ymm11 # ymm11 = ymm8[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm5, %ymm5, %ymm5 # ymm5 = ymm5[0,1,1,2,4,5,5,6]
vmovups (%rdi), %xmm8
cmpl $0x2, %eax
je 0x1e157c
vinsertf128 $0x1, %xmm6, %ymm8, %ymm8
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vunpcklps %ymm6, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm6[0],ymm8[1],ymm6[1],ymm8[4],ymm6[4],ymm8[5],ymm6[5]
vshufps $0xa5, %ymm8, %ymm8, %ymm13 # ymm13 = ymm8[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm6, %ymm6, %ymm9 # ymm9 = ymm6[0,1,1,2,4,5,5,6]
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm10
vbroadcastss 0x8(%rsi), %ymm12
vsubps %ymm6, %ymm1, %ymm8
vsubps %ymm10, %ymm2, %ymm0
vsubps %ymm12, %ymm7, %ymm2
vsubps %ymm6, %ymm4, %ymm1
vmovaps %ymm1, 0xa0(%rsp)
vsubps %ymm10, %ymm11, %ymm3
vsubps %ymm12, %ymm13, %ymm11
vsubps %ymm6, %ymm14, %ymm4
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm12, %ymm9, %ymm5
vsubps %ymm8, %ymm4, %ymm14
vsubps %ymm0, %ymm6, %ymm15
vsubps %ymm2, %ymm5, %ymm13
vaddps %ymm0, %ymm6, %ymm7
vaddps %ymm2, %ymm5, %ymm9
vmulps %ymm7, %ymm13, %ymm10
vmulps %ymm9, %ymm15, %ymm12
vsubps %ymm10, %ymm12, %ymm12
vaddps %ymm4, %ymm8, %ymm10
vmulps %ymm9, %ymm14, %ymm9
vmovaps %ymm13, 0x280(%rsp)
vmulps %ymm10, %ymm13, %ymm13
vsubps %ymm9, %ymm13, %ymm9
vmovaps %ymm15, 0x1e0(%rsp)
vmulps %ymm10, %ymm15, %ymm10
vmovaps %ymm14, 0x200(%rsp)
vmulps %ymm7, %ymm14, %ymm7
vsubps %ymm10, %ymm7, %ymm13
vbroadcastss 0x14(%rsi), %ymm10
vbroadcastss 0x18(%rsi), %ymm15
vmulps %ymm13, %ymm15, %ymm13
vmulps %ymm9, %ymm10, %ymm14
vbroadcastss 0x10(%rsi), %ymm1
vmovaps %ymm1, 0x140(%rsp)
vaddps %ymm13, %ymm14, %ymm13
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm9
vsubps %ymm3, %ymm0, %ymm12
vsubps %ymm11, %ymm2, %ymm1
vmovaps %ymm0, 0x2a0(%rsp)
vaddps %ymm3, %ymm0, %ymm14
vmovaps %ymm2, 0x220(%rsp)
vaddps %ymm2, %ymm11, %ymm13
vmulps %ymm1, %ymm14, %ymm0
vmulps %ymm13, %ymm12, %ymm2
vsubps %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vmovaps 0xa0(%rsp), %ymm7
vsubps %ymm7, %ymm8, %ymm0
vmulps %ymm0, %ymm13, %ymm2
vmovaps %ymm8, 0x240(%rsp)
vaddps %ymm7, %ymm8, %ymm8
vmovaps %ymm7, %ymm13
vmovaps %ymm1, 0x4e0(%rsp)
vmulps %ymm1, %ymm8, %ymm7
vsubps %ymm2, %ymm7, %ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm8, %ymm12, %ymm7
vmovaps %ymm0, 0x4c0(%rsp)
vmulps %ymm0, %ymm14, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm7, %ymm15, %ymm7
vmulps %ymm1, %ymm10, %ymm1
vaddps %ymm7, %ymm1, %ymm1
vmovaps 0x140(%rsp), %ymm14
vmulps 0x120(%rsp), %ymm14, %ymm2
vaddps %ymm1, %ymm2, %ymm7
vmovaps %ymm7, 0x480(%rsp)
vsubps %ymm4, %ymm13, %ymm1
vaddps %ymm4, %ymm13, %ymm0
vmovaps %ymm1, %ymm13
vsubps %ymm6, %ymm3, %ymm8
vaddps %ymm6, %ymm3, %ymm1
vsubps %ymm5, %ymm11, %ymm12
vaddps %ymm5, %ymm11, %ymm2
vmulps %ymm1, %ymm12, %ymm3
vmulps %ymm2, %ymm8, %ymm5
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vmulps %ymm0, %ymm12, %ymm5
vsubps %ymm2, %ymm5, %ymm2
vmulps %ymm0, %ymm8, %ymm0
vmulps %ymm1, %ymm13, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm15, 0x120(%rsp)
vmulps %ymm0, %ymm15, %ymm0
vmovaps %ymm10, 0xa0(%rsp)
vmulps %ymm2, %ymm10, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm3, %ymm14, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vaddps %ymm7, %ymm9, %ymm1
vaddps %ymm1, %ymm0, %ymm4
vbroadcastss 0x1d40235(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm4, %ymm1
vbroadcastss 0x1d40230(%rip), %ymm2 # 0x1f20ecc
vmulps %ymm2, %ymm1, %ymm1
vminps %ymm7, %ymm9, %ymm2
vminps %ymm0, %ymm2, %ymm2
vbroadcastss 0x1d4020f(%rip), %ymm3 # 0x1f20ec0
vxorps %ymm3, %ymm1, %ymm3
vcmpnltps %ymm3, %ymm2, %ymm2
vmovaps %ymm9, 0x4a0(%rsp)
vmaxps %ymm7, %ymm9, %ymm3
vmaxps %ymm0, %ymm3, %ymm0
vcmpleps %ymm1, %ymm0, %ymm0
vorps %ymm0, %ymm2, %ymm0
imulq $0xc, %r8, %rdi
addq %r9, %rdi
addq $0x2c, %rdi
movq %rdi, 0x198(%rsp)
movq %rax, 0x1a0(%rsp)
movq %rcx, 0x1a8(%rsp)
movq 0x28(%rsp), %rax
movl 0x18(%rax), %edx
movl 0x1c(%rax), %ecx
leaq 0x198(%rsp), %rax
movq %rax, 0x320(%rsp)
vmovaps 0x420(%rsp), %ymm7
vtestps %ymm7, %ymm0
je 0x1e1591
vmovaps %ymm4, 0x440(%rsp)
vmovaps 0x280(%rsp), %ymm5
vmovaps 0x260(%rsp), %ymm9
vmovaps %ymm0, 0x460(%rsp)
vmulps %ymm5, %ymm9, %ymm0
vmovaps %ymm8, %ymm15
vmovaps 0x1e0(%rsp), %ymm8
vmovaps 0x4e0(%rsp), %ymm4
vmulps %ymm4, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm1
vmulps %ymm4, %ymm15, %ymm2
vmulps %ymm12, %ymm9, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vandps %ymm6, %ymm0, %ymm0
vandps %ymm6, %ymm2, %ymm2
vcmpltps %ymm2, %ymm0, %ymm0
vblendvps %ymm0, %ymm1, %ymm3, %ymm3
vmovaps 0x4c0(%rsp), %ymm11
vmulps %ymm12, %ymm11, %ymm0
vmulps %ymm5, %ymm11, %ymm1
vmovaps 0x200(%rsp), %ymm5
vmulps %ymm4, %ymm5, %ymm2
vsubps %ymm2, %ymm1, %ymm1
vmulps %ymm4, %ymm13, %ymm4
vsubps %ymm0, %ymm4, %ymm4
vandps %ymm6, %ymm2, %ymm2
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm0, %ymm2, %ymm0
vblendvps %ymm0, %ymm1, %ymm4, %ymm4
vmulps %ymm13, %ymm9, %ymm0
vmulps %ymm5, %ymm9, %ymm1
vmulps %ymm11, %ymm8, %ymm2
vmulps %ymm15, %ymm11, %ymm5
vsubps %ymm2, %ymm1, %ymm1
vsubps %ymm0, %ymm5, %ymm5
vandps %ymm6, %ymm2, %ymm2
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm0, %ymm2, %ymm0
vblendvps %ymm0, %ymm1, %ymm5, %ymm0
vmulps 0x120(%rsp), %ymm0, %ymm1
vmulps 0xa0(%rsp), %ymm4, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm3, %ymm14, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm1, %ymm1, %ymm2
vmulps 0x220(%rsp), %ymm0, %ymm1
vmulps 0x2a0(%rsp), %ymm4, %ymm5
vaddps %ymm1, %ymm5, %ymm1
vmulps 0x240(%rsp), %ymm3, %ymm5
vrcpps %ymm2, %ymm6
vaddps %ymm1, %ymm5, %ymm1
vaddps %ymm1, %ymm1, %ymm1
vmulps %ymm6, %ymm2, %ymm5
vbroadcastss 0x1d0b8c1(%rip), %ymm8 # 0x1eec714
vsubps %ymm5, %ymm8, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%rsi), %ymm6
vmulps %ymm5, %ymm1, %ymm1
vcmpleps %ymm1, %ymm6, %ymm5
vbroadcastss 0x20(%rsi), %ymm6
vcmpleps %ymm6, %ymm1, %ymm6
vandps %ymm6, %ymm5, %ymm5
vcmpneqps 0x1d4007a(%rip), %ymm2, %ymm2 # 0x1f20f00
vandps %ymm5, %ymm2, %ymm2
vpslld $0x1f, %xmm2, %xmm5
vpsrad $0x1f, %xmm5, %xmm5
vextractf128 $0x1, %ymm2, %xmm2
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vinsertf128 $0x1, %xmm2, %ymm5, %ymm2
vandps 0x460(%rsp), %ymm7, %ymm5
vtestps %ymm5, %ymm2
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r8
je 0x1e15f0
vandps %ymm5, %ymm2, %ymm2
vmovaps 0x4a0(%rsp), %ymm5
vmovaps %ymm5, 0x2c0(%rsp)
vmovaps 0x480(%rsp), %ymm5
vmovaps %ymm5, 0x2e0(%rsp)
vmovaps 0x440(%rsp), %ymm5
vmovaps %ymm5, 0x300(%rsp)
leaq 0x198(%rsp), %rax
movq %rax, 0x320(%rsp)
vmovaps %ymm2, 0x340(%rsp)
vmovaps %ymm1, 0x3a0(%rsp)
vmovaps %ymm3, 0x3c0(%rsp)
vmovaps %ymm4, 0x3e0(%rsp)
vmovaps %ymm0, 0x400(%rsp)
movq 0x90(%rsp), %rax
movq (%rax), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdx,8), %rdi
movl 0x24(%rsi), %eax
testl %eax, 0x34(%rdi)
vmovaps 0x100(%rsp), %xmm11
vmovaps 0xf0(%rsp), %xmm12
vmovaps 0xe0(%rsp), %xmm13
vmovaps 0xd0(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0x80(%rsp), %xmm7
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm6
je 0x1e14ee
movq 0x90(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x40(%rsp), %xmm3
vmovaps 0x30(%rsp), %xmm5
jne 0x1e0fde
movb $0x1, %r10b
movl %r10d, 0xa0(%rsp)
cmpq $0x0, 0x48(%rdi)
je 0x1e150b
movq %rax, 0x220(%rsp)
movq %rdi, 0x120(%rsp)
movl %ecx, 0x280(%rsp)
vmovaps 0x300(%rsp), %ymm0
vbroadcastss 0x1d3febd(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm0, %ymm1
vbroadcastss 0x1d0ffd4(%rip), %ymm2 # 0x1ef0fe8
vcmpnltps %ymm2, %ymm1, %ymm1
vrcpps %ymm0, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vbroadcastss 0x1d0b6ea(%rip), %ymm3 # 0x1eec714
vsubps %ymm0, %ymm3, %ymm0
vmulps %ymm0, %ymm2, %ymm0
vaddps %ymm0, %ymm2, %ymm0
vandps %ymm0, %ymm1, %ymm1
vmulps 0x2c0(%rsp), %ymm1, %ymm0
vminps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vmovaps %ymm0, 0x360(%rsp)
vmulps 0x2e0(%rsp), %ymm1, %ymm1
vminps %ymm3, %ymm1, %ymm1
vmovaps %ymm1, 0x380(%rsp)
movq 0x320(%rsp), %rdi
movq (%rdi), %rax
movq 0x8(%rdi), %rcx
vmovdqu (%rax,%rcx,4), %xmm2
cmpq $0x3, 0x10(%rdi)
vmovdqa %xmm2, %xmm3
jb 0x1e1093
vmovdqu (%rax,%rcx,8), %xmm3
vmovdqu (%rax), %xmm4
cmpq $0x2, %rcx
je 0x1e1671
movq 0x120(%rsp), %rdi
vinsertf128 $0x1, %xmm2, %ymm4, %ymm5
vinsertf128 $0x1, %xmm3, %ymm2, %ymm6
vunpcklps %ymm6, %ymm5, %ymm5 # ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
vpshufd $0xa5, %xmm4, %xmm4 # xmm4 = xmm4[1,1,2,2]
vpshufd $0xa5, %xmm2, %xmm6 # xmm6 = xmm2[1,1,2,2]
vinsertf128 $0x1, %xmm6, %ymm4, %ymm7
vpshufd $0x94, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,2]
vpshufd $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
vinsertf128 $0x1, %xmm3, %ymm2, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vpsrld $0x10, %xmm5, %xmm9
vextractf128 $0x1, %ymm5, %xmm10
vpsrld $0x10, %xmm10, %xmm10
vbroadcastss 0x1d3fe68(%rip), %ymm8 # 0x1f20f64
vandps %ymm5, %ymm8, %ymm5
vcvtdq2ps %ymm5, %ymm5
vbroadcastss 0x1d3d3d3(%rip), %ymm0 # 0x1f1e4e0
vmulps %ymm0, %ymm5, %ymm5
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm0, %ymm9, %ymm9
vpsrld $0x10, %xmm4, %xmm4
vpsrld $0x10, %xmm6, %xmm6
vandps %ymm7, %ymm8, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm0, %ymm7, %ymm7
vinsertf128 $0x1, %xmm6, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpsrld $0x10, %xmm2, %xmm2
vpsrld $0x10, %xmm3, %xmm3
vandps 0x140(%rsp), %ymm8, %ymm6
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm0, %ymm6, %ymm6
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm0, %ymm2, %ymm2
vmovaps 0xa0(%rsp), %ymm0
vmulps %ymm7, %ymm0, %ymm3
vmulps %ymm4, %ymm0, %ymm4
vmulps %ymm6, %ymm1, %ymm6
vaddps %ymm3, %ymm6, %ymm3
vmulps %ymm2, %ymm1, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1d0b57d(%rip), %ymm4 # 0x1eec714
vsubps %ymm0, %ymm4, %ymm0
vsubps %ymm1, %ymm0, %ymm0
vmulps %ymm5, %ymm0, %ymm1
vaddps %ymm1, %ymm3, %ymm1
vmulps %ymm0, %ymm9, %ymm0
vaddps %ymm0, %ymm2, %ymm0
vmovaps %ymm1, 0x360(%rsp)
vmovaps %ymm0, 0x380(%rsp)
vmovaps 0x340(%rsp), %ymm0
vmovmskps %ymm0, %eax
bsfq %rax, %r10
movq %rax, 0x240(%rsp)
testl %eax, %eax
setne %al
movl %eax, 0xa0(%rsp)
je 0x1e1628
movq 0x90(%rsp), %rax
movq 0x8(%rax), %rax
movq %rax, 0x260(%rsp)
movq %r11, 0x200(%rsp)
vmovaps 0x80(%rsp), %xmm7
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm6
movq %rdx, 0x1e0(%rsp)
movq %rsi, 0x118(%rsp)
vmovss 0x360(%rsp,%r10,4), %xmm0
vmovss 0x380(%rsp,%r10,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x2a0(%rsp)
vmovss 0x3a0(%rsp,%r10,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
vmovss 0x3c0(%rsp,%r10,4), %xmm2
vmovss 0x3e0(%rsp,%r10,4), %xmm3
vmovss 0x400(%rsp,%r10,4), %xmm4
vmovss %xmm2, 0x160(%rsp)
vmovss %xmm3, 0x164(%rsp)
vmovss %xmm4, 0x168(%rsp)
vmovss %xmm0, 0x16c(%rsp)
vmovss %xmm1, 0x170(%rsp)
movl 0x280(%rsp), %eax
movl %eax, 0x174(%rsp)
movl %edx, 0x178(%rsp)
movq 0x260(%rsp), %rcx
movl (%rcx), %eax
movl %eax, 0x17c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x180(%rsp)
orl $-0x1, 0x9c(%rsp)
leaq 0x9c(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x18(%rdi), %rax
movq %rax, 0x1b8(%rsp)
movq %rcx, 0x1c0(%rsp)
movq %rsi, 0x1c8(%rsp)
leaq 0x160(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x1, 0x1d8(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
movq %r10, 0x140(%rsp)
je 0x1e13db
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x140(%rsp), %r10
movq 0x120(%rsp), %rdi
movq 0x1e0(%rsp), %rdx
vmovaps 0x60(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
movq 0x18(%rsp), %r8
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm7
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0xd0(%rsp), %xmm14
movq 0x20(%rsp), %r9
vmovaps 0xe0(%rsp), %xmm13
vmovaps 0xf0(%rsp), %xmm12
vmovaps 0x100(%rsp), %xmm11
movq 0x200(%rsp), %r11
movq 0x118(%rsp), %rsi
movq 0x1b0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e14a7
movq 0x220(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1e1660
movq 0x220(%rsp), %rcx
testb $0x2, (%rcx)
movq 0x120(%rsp), %rdi
jne 0x1e140f
testb $0x40, 0x3e(%rdi)
je 0x1e1496
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x140(%rsp), %r10
movq 0x120(%rsp), %rdi
movq 0x1e0(%rsp), %rdx
vmovaps 0x60(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
movq 0x18(%rsp), %r8
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm7
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0xd0(%rsp), %xmm14
movq 0x20(%rsp), %r9
vmovaps 0xe0(%rsp), %xmm13
vmovaps 0xf0(%rsp), %xmm12
vmovaps 0x100(%rsp), %xmm11
movq 0x200(%rsp), %r11
movq 0x118(%rsp), %rsi
movq 0x1b0(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x1e1660
vmovss 0x2a0(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0x240(%rsp), %rax
btcq %r10, %rax
bsfq %rax, %r10
movq %rax, 0x240(%rsp)
testq %rax, %rax
setne %al
movl %eax, 0xa0(%rsp)
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x40(%rsp), %xmm3
jne 0x1e1233
jmp 0x1e150b
movl $0x0, 0xa0(%rsp)
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x40(%rsp), %xmm3
vmovaps 0x30(%rsp), %xmm5
xorl %ecx, %ecx
movl 0xa0(%rsp), %eax
testb $0x1, %al
je 0x1e1524
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rax
jmp 0x1e152f
xorl %eax, %eax
testq %rcx, %rcx
jne 0x1e1654
cmpl $0x3, %eax
jne 0x1e0860
jmp 0x1e06c8
andq $-0x10, %r10
movl 0x2c(%r10), %eax
movq 0x30(%r10,%rax), %rcx
xorl %eax, %eax
movq %r10, 0x28(%rsp)
jmp 0x1e1514
vshufps $0x54, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
vshufps $0x54, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
vshufps $0x54, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
jmp 0x1e09e3
vshufps $0x54, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
vshufps $0x54, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
vshufps $0x54, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
jmp 0x1e0a14
vshufps $0x54, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
vshufps $0x54, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
vshufps $0x54, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,1,1,1]
jmp 0x1e0a71
movl $0x0, 0xa0(%rsp)
vmovaps 0x100(%rsp), %xmm11
vmovaps 0xf0(%rsp), %xmm12
vmovaps 0xe0(%rsp), %xmm13
movq 0x20(%rsp), %r9
vmovaps 0xd0(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x80(%rsp), %xmm7
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm3
movq 0x18(%rsp), %r8
jmp 0x1e1643
movl $0x0, 0xa0(%rsp)
vmovaps 0x100(%rsp), %xmm11
vmovaps 0xf0(%rsp), %xmm12
vmovaps 0xe0(%rsp), %xmm13
vmovaps 0xd0(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x80(%rsp), %xmm7
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm3
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm6
jmp 0x1e150b
movq %rcx, (%r11)
addq $0x8, %r11
jmp 0x1e152f
vmovaps 0x50(%rsp), %xmm4
vmovaps 0x40(%rsp), %xmm3
jmp 0x1e150b
vpshufd $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
vpshufd $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
vpshufd $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1e10a1
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, true, embree::avx::SubdivPatch1MBIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xd20, %rsp # imm = 0xD20
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1e16ba
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x1e16a8
leaq 0x568(%rsp), %r11
movq 0x70(%rax), %rax
movq %rax, -0x8(%r11)
vmovaps 0x10(%rsi), %xmm2
vxorps %xmm6, %xmm6, %xmm6
vmaxss 0xc(%rsi), %xmm6, %xmm1
vbroadcastss 0x1d3f7d4(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d0f8eb(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1d0b009(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d3f848(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d3e7e9(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d3e7e0(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%rsi), %xmm11
vbroadcastss 0x4(%rsi), %xmm12
xorl %r8d, %r8d
vucomiss %xmm6, %xmm3
vbroadcastss 0x8(%rsi), %xmm13
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm14 # xmm14 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm15 # xmm15 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[2,2,2,2]
vshufps $0x0, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm10 # xmm10 = xmm2[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm6, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r12d, %r12d
vucomiss %xmm6, %xmm5
setb %r12b
shll $0x4, %r12d
orq $0x40, %r12
movq %r8, %r13
xorq $0x10, %r13
movq %r9, %r15
xorq $0x10, %r15
movq %r12, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
leaq 0x1f6e7b5(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x440(%rsp)
movq $0x0, 0x38(%rsp)
movq %rdx, 0x158(%rsp)
movq %rsi, 0x28(%rsp)
vmovaps %xmm11, 0x140(%rsp)
vmovaps %xmm12, 0x130(%rsp)
vmovaps %xmm13, 0x120(%rsp)
vmovaps %xmm14, 0x110(%rsp)
vmovaps %xmm7, 0xc0(%rsp)
vmovaps %xmm8, 0xb0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovaps %xmm10, 0x90(%rsp)
vmovaps %xmm5, 0x70(%rsp)
vmovaps %xmm6, 0x80(%rsp)
movq %r8, 0x20(%rsp)
vmovaps %xmm15, 0x100(%rsp)
movq %r9, 0x18(%rsp)
leaq 0x560(%rsp), %rax
cmpq %rax, %r11
je 0x1e16a8
movq -0x8(%r11), %r10
addq $-0x8, %r11
testb $0x8, %r10b
jne 0x1e1958
movq %r10, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm11, %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vmaxps %xmm1, %xmm5, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm12, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vmulps 0x80(%rax,%r12), %xmm0, %xmm3
vaddps 0x20(%rax,%r12), %xmm3, %xmm3
vsubps %xmm13, %xmm3, %xmm3
vmulps %xmm3, %xmm7, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r13), %xmm0, %xmm2
vaddps 0x20(%rax,%r13), %xmm2, %xmm2
vsubps %xmm11, %xmm2, %xmm2
vmulps %xmm2, %xmm8, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vminps %xmm2, %xmm6, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm12, %xmm3, %xmm3
vmulps 0x80(%rax,%rbx), %xmm0, %xmm4
vaddps 0x20(%rax,%rbx), %xmm4, %xmm4
vmulps %xmm3, %xmm9, %xmm3
vsubps %xmm13, %xmm4, %xmm4
vmulps %xmm4, %xmm10, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r10d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1e19a0
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r14d
testb $0x8, %r10b
jne 0x1e199c
testq %r14, %r14
je 0x1e19c5
andq $-0x10, %r10
bsfq %r14, %rcx
leaq -0x1(%r14), %rdi
xorl %eax, %eax
movq (%r10,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r14, %rdi
jne 0x1e19ca
movq %rcx, %r10
testl %eax, %eax
je 0x1e1876
jmp 0x1e1a0d
pushq $0x6
jmp 0x1e19c7
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1e194f
pushq $0x4
popq %rax
jmp 0x1e1992
movq %rcx, (%r11)
addq $0x8, %r11
bsfq %rdi, %rdx
leaq -0x1(%rdi), %rcx
movq (%r10,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rdi, %rcx
je 0x1e1a08
movq %rdx, (%r11)
addq $0x8, %r11
bsfq %rcx, %rdx
leaq -0x1(%rcx), %rdi
jmp 0x1e19d9
movq %rdx, %r10
jmp 0x1e1992
cmpl $0x6, %eax
jne 0x1e27be
movl %r10d, %eax
andl $0xf, %eax
cmpl $0x8, %eax
jne 0x1e27cc
movq 0x38(%rsp), %rsi
movl 0xc(%rsi), %eax
movl 0x10(%rsi), %ecx
movslq 0xd8(%rsp), %rdx
movl 0x24(%rsi), %r8d
movl 0x28(%rsi), %edi
imulq %rdi, %rdx
addq %rsi, %r8
addq %rdx, %r8
shrq $0x4, %r10
leaq (%r8,%r10,4), %rdx
leaq (%r8,%r10,4), %r9
addq $0x2c, %r9
vmovups 0x2c(%rdx,%rax,4), %xmm0
vmovaps %xmm0, %xmm2
cmpq $0x3, %rcx
jb 0x1e1a6f
vmovups (%r9,%rax,8), %xmm2
vmovups 0x2c(%r8,%r10,4), %xmm12
cmpq $0x2, %rax
je 0x1e282f
movq 0x38(%rsp), %rsi
movl 0x14(%rsi), %esi
leaq (%rdx,%rsi,4), %r8
addq $0x2c, %r8
vmovups (%r8,%rax,4), %xmm4
vmovaps %xmm4, %xmm3
cmpl $0x3, %ecx
jb 0x1e1aa5
vmovups (%r8,%rax,8), %xmm3
vmovups (%r8), %xmm1
cmpl $0x2, %eax
je 0x1e2844
vmovaps %xmm3, 0xe0(%rsp)
vmovaps %ymm1, 0x160(%rsp)
movq %rsi, 0x40(%rsp)
leaq (%rdx,%rsi,8), %r10
addq $0x2c, %r10
vmovups (%r10,%rax,4), %xmm6
vmovaps %xmm6, %xmm15
cmpl $0x3, %ecx
jb 0x1e1ae7
vmovups (%r10,%rax,8), %xmm15
vmovups (%r10), %xmm5
cmpl $0x2, %eax
movq %r11, 0x30(%rsp)
je 0x1e286a
shrl $0x2, %edi
shll $0x2, %edi
leaq (%r9,%rdi), %r11
vmovups (%r11,%rax,4), %xmm9
vmovaps %xmm9, %xmm10
cmpl $0x3, %ecx
jb 0x1e1b1a
vmovups (%r11,%rax,8), %xmm10
vmovups (%r9,%rdi), %xmm3
cmpl $0x2, %eax
je 0x1e287f
movq 0x30(%rsp), %r11
movq 0x18(%rsp), %r9
addq %rdi, %r8
vmovups (%r8,%rax,4), %xmm13
vmovaps %xmm13, %xmm14
cmpl $0x3, %ecx
jb 0x1e1b4c
vmovups (%r8,%rax,8), %xmm14
vmovups (%r8), %xmm1
vmovaps %ymm5, %ymm7
cmpl $0x2, %eax
je 0x1e2895
addq %rdi, %r10
vmovups (%r10), %xmm8
vmovups (%r10,%rax,4), %xmm5
vmovaps %xmm5, %xmm11
cmpl $0x3, %ecx
jb 0x1e1b7b
vmovups (%r10,%rax,8), %xmm11
vinsertf128 $0x1, %xmm0, %ymm12, %ymm12
vinsertf128 $0x1, %xmm2, %ymm0, %ymm0
vunpcklps %ymm0, %ymm12, %ymm2 # ymm2 = ymm12[0],ymm0[0],ymm12[1],ymm0[1],ymm12[4],ymm0[4],ymm12[5],ymm0[5]
vmovaps %ymm2, 0x1c0(%rsp)
vshufps $0xa5, %ymm12, %ymm12, %ymm2 # ymm2 = ymm12[1,1,2,2,5,5,6,6]
vmovaps %ymm2, 0x280(%rsp)
vshufps $0x94, %ymm0, %ymm0, %ymm12 # ymm12 = ymm0[0,1,1,2,4,5,5,6]
vmovaps 0x160(%rsp), %ymm0
vinsertf128 $0x1, %xmm4, %ymm0, %ymm2
vinsertf128 $0x1, 0xe0(%rsp), %ymm4, %ymm4
vunpcklps %ymm4, %ymm2, %ymm0 # ymm0 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[4],ymm4[4],ymm2[5],ymm4[5]
vshufps $0xa5, %ymm2, %ymm2, %ymm2 # ymm2 = ymm2[1,1,2,2,5,5,6,6]
vmovaps %ymm2, 0x260(%rsp)
vshufps $0x94, %ymm4, %ymm4, %ymm2 # ymm2 = ymm4[0,1,1,2,4,5,5,6]
vmovaps %ymm2, 0x160(%rsp)
vinsertf128 $0x1, %xmm6, %ymm7, %ymm2
vinsertf128 $0x1, %xmm15, %ymm6, %ymm6
vunpcklps %ymm6, %ymm2, %ymm7 # ymm7 = ymm2[0],ymm6[0],ymm2[1],ymm6[1],ymm2[4],ymm6[4],ymm2[5],ymm6[5]
vshufps $0xa5, %ymm2, %ymm2, %ymm4 # ymm4 = ymm2[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm6, %ymm6, %ymm2 # ymm2 = ymm6[0,1,1,2,4,5,5,6]
vmovaps %ymm2, 0xe0(%rsp)
vinsertf128 $0x1, %xmm9, %ymm3, %ymm6
vinsertf128 $0x1, %xmm10, %ymm9, %ymm10
vunpcklps %ymm10, %ymm6, %ymm2 # ymm2 = ymm6[0],ymm10[0],ymm6[1],ymm10[1],ymm6[4],ymm10[4],ymm6[5],ymm10[5]
vshufps $0xa5, %ymm6, %ymm6, %ymm9 # ymm9 = ymm6[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm10, %ymm10, %ymm6 # ymm6 = ymm10[0,1,1,2,4,5,5,6]
vinsertf128 $0x1, %xmm13, %ymm1, %ymm1
vinsertf128 $0x1, %xmm14, %ymm13, %ymm14
vunpcklps %ymm14, %ymm1, %ymm13 # ymm13 = ymm1[0],ymm14[0],ymm1[1],ymm14[1],ymm1[4],ymm14[4],ymm1[5],ymm14[5]
vshufps $0xa5, %ymm1, %ymm1, %ymm10 # ymm10 = ymm1[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm14, %ymm14, %ymm1 # ymm1 = ymm14[0,1,1,2,4,5,5,6]
vmovaps %ymm12, %ymm15
cmpl $0x2, %eax
je 0x1e28ab
movq 0x158(%rsp), %rdi
movq 0x28(%rsp), %rsi
movq 0x40(%rsp), %r8
vinsertf128 $0x1, %xmm5, %ymm8, %ymm14
vinsertf128 $0x1, %xmm11, %ymm5, %ymm11
vmovaps 0x2d0(%rsp), %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm12
vmovss 0x1d0aa8e(%rip), %xmm5 # 0x1eec714
vsubss %xmm8, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm8
vmulps %ymm2, %ymm12, %ymm2
vmulps 0x1c0(%rsp), %ymm8, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmovaps %ymm2, 0x40(%rsp)
vmulps %ymm13, %ymm12, %ymm5
vmulps %ymm0, %ymm8, %ymm0
vaddps %ymm5, %ymm0, %ymm2
vunpcklps %ymm11, %ymm14, %ymm5 # ymm5 = ymm14[0],ymm11[0],ymm14[1],ymm11[1],ymm14[4],ymm11[4],ymm14[5],ymm11[5]
vmulps %ymm5, %ymm12, %ymm5
vmulps %ymm7, %ymm8, %ymm7
vaddps %ymm5, %ymm7, %ymm5
vmulps %ymm9, %ymm12, %ymm7
vmulps 0x280(%rsp), %ymm8, %ymm9
vaddps %ymm7, %ymm9, %ymm7
vshufps $0xa5, %ymm14, %ymm14, %ymm9 # ymm9 = ymm14[1,1,2,2,5,5,6,6]
vshufps $0x94, %ymm11, %ymm11, %ymm11 # ymm11 = ymm11[0,1,1,2,4,5,5,6]
vmulps %ymm10, %ymm12, %ymm10
vmulps %ymm9, %ymm12, %ymm9
vmulps 0x260(%rsp), %ymm8, %ymm3
vaddps %ymm3, %ymm10, %ymm0
vmulps %ymm4, %ymm8, %ymm4
vaddps %ymm4, %ymm9, %ymm9
vmulps %ymm6, %ymm12, %ymm4
vmulps %ymm1, %ymm12, %ymm1
vmulps %ymm11, %ymm12, %ymm6
vmulps %ymm15, %ymm8, %ymm10
vaddps %ymm4, %ymm10, %ymm10
vmulps 0x160(%rsp), %ymm8, %ymm4
vaddps %ymm1, %ymm4, %ymm12
vmulps 0xe0(%rsp), %ymm8, %ymm1
vaddps %ymm6, %ymm1, %ymm15
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm13
vbroadcastss 0x8(%rsi), %ymm14
vmovaps 0x40(%rsp), %ymm1
vsubps %ymm6, %ymm1, %ymm8
vsubps %ymm13, %ymm2, %ymm2
vsubps %ymm14, %ymm5, %ymm11
vsubps %ymm6, %ymm7, %ymm1
vmovaps %ymm1, 0x40(%rsp)
vsubps %ymm13, %ymm0, %ymm3
vsubps %ymm14, %ymm9, %ymm0
vsubps %ymm6, %ymm10, %ymm4
vsubps %ymm13, %ymm12, %ymm6
vsubps %ymm14, %ymm15, %ymm5
vsubps %ymm8, %ymm4, %ymm14
vsubps %ymm2, %ymm6, %ymm15
vsubps %ymm11, %ymm5, %ymm13
vaddps %ymm2, %ymm6, %ymm7
vaddps %ymm5, %ymm11, %ymm9
vmulps %ymm7, %ymm13, %ymm10
vmulps %ymm9, %ymm15, %ymm12
vsubps %ymm10, %ymm12, %ymm12
vaddps %ymm4, %ymm8, %ymm10
vmulps %ymm9, %ymm14, %ymm9
vmovaps %ymm13, 0x2a0(%rsp)
vmulps %ymm10, %ymm13, %ymm13
vsubps %ymm9, %ymm13, %ymm9
vmovaps %ymm15, 0x180(%rsp)
vmulps %ymm10, %ymm15, %ymm10
vmovaps %ymm14, 0x1a0(%rsp)
vmulps %ymm7, %ymm14, %ymm7
vsubps %ymm10, %ymm7, %ymm13
vbroadcastss 0x14(%rsi), %ymm10
vbroadcastss 0x18(%rsi), %ymm15
vmulps %ymm13, %ymm15, %ymm13
vmulps %ymm9, %ymm10, %ymm14
vbroadcastss 0x10(%rsi), %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vaddps %ymm13, %ymm14, %ymm13
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm9
vsubps %ymm3, %ymm2, %ymm12
vsubps %ymm0, %ymm11, %ymm1
vmovaps %ymm2, 0x260(%rsp)
vaddps %ymm3, %ymm2, %ymm14
vmovaps %ymm11, 0x1c0(%rsp)
vaddps %ymm0, %ymm11, %ymm13
vmovaps %ymm0, %ymm11
vmulps %ymm1, %ymm14, %ymm0
vmulps %ymm13, %ymm12, %ymm2
vsubps %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x160(%rsp)
vmovaps 0x40(%rsp), %ymm7
vsubps %ymm7, %ymm8, %ymm0
vmulps %ymm0, %ymm13, %ymm2
vmovaps %ymm8, 0x280(%rsp)
vaddps %ymm7, %ymm8, %ymm8
vmovaps %ymm7, %ymm13
vmovaps %ymm1, 0x500(%rsp)
vmulps %ymm1, %ymm8, %ymm7
vsubps %ymm2, %ymm7, %ymm1
vmovaps %ymm12, 0x520(%rsp)
vmulps %ymm8, %ymm12, %ymm7
vmovaps %ymm0, 0x4e0(%rsp)
vmulps %ymm0, %ymm14, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm7, %ymm15, %ymm7
vmulps %ymm1, %ymm10, %ymm1
vaddps %ymm7, %ymm1, %ymm1
vmovaps 0xe0(%rsp), %ymm14
vmulps 0x160(%rsp), %ymm14, %ymm2
vaddps %ymm1, %ymm2, %ymm7
vmovaps %ymm7, 0x4a0(%rsp)
vsubps %ymm4, %ymm13, %ymm1
vaddps %ymm4, %ymm13, %ymm0
vmovaps %ymm1, %ymm13
vsubps %ymm6, %ymm3, %ymm8
vaddps %ymm6, %ymm3, %ymm1
vsubps %ymm5, %ymm11, %ymm12
vaddps %ymm5, %ymm11, %ymm2
vmulps %ymm1, %ymm12, %ymm3
vmulps %ymm2, %ymm8, %ymm5
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vmulps %ymm0, %ymm12, %ymm5
vsubps %ymm2, %ymm5, %ymm2
vmulps %ymm0, %ymm8, %ymm0
vmulps %ymm1, %ymm13, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm15, 0x540(%rsp)
vmulps %ymm0, %ymm15, %ymm0
vmovaps %ymm10, 0x40(%rsp)
vmulps %ymm2, %ymm10, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm3, %ymm14, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vaddps %ymm7, %ymm9, %ymm1
vaddps %ymm1, %ymm0, %ymm4
vbroadcastss 0x1d3ef88(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm4, %ymm1
vbroadcastss 0x1d3ef83(%rip), %ymm2 # 0x1f20ecc
vmulps %ymm2, %ymm1, %ymm1
vminps %ymm7, %ymm9, %ymm2
vminps %ymm0, %ymm2, %ymm2
vbroadcastss 0x1d3ef62(%rip), %ymm3 # 0x1f20ec0
vxorps %ymm3, %ymm1, %ymm3
vcmpnltps %ymm3, %ymm2, %ymm2
vmovaps %ymm9, 0x4c0(%rsp)
vmaxps %ymm7, %ymm9, %ymm3
vmaxps %ymm0, %ymm3, %ymm0
vcmpleps %ymm1, %ymm0, %ymm0
vorps %ymm0, %ymm2, %ymm0
imulq $0xc, %r8, %r8
addq %r8, %rdx
addq $0x2c, %rdx
movq %rdx, 0x218(%rsp)
movq %rax, 0x220(%rsp)
movq %rcx, 0x228(%rsp)
movq 0x38(%rsp), %rax
movl 0x18(%rax), %ecx
movq %rcx, 0x160(%rsp)
movl 0x1c(%rax), %ecx
leaq 0x218(%rsp), %rax
movq %rax, 0x340(%rsp)
vmovaps 0x440(%rsp), %ymm7
vtestps %ymm7, %ymm0
je 0x1e28c1
vmovaps %ymm4, 0x460(%rsp)
vmovaps 0x2a0(%rsp), %ymm5
vmovaps 0x520(%rsp), %ymm9
vmovaps %ymm0, 0x480(%rsp)
vmulps %ymm5, %ymm9, %ymm0
vmovaps %ymm8, %ymm15
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x500(%rsp), %ymm4
vmulps %ymm4, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm1
vmulps %ymm4, %ymm15, %ymm2
vmulps %ymm12, %ymm9, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vandps %ymm6, %ymm0, %ymm0
vandps %ymm6, %ymm2, %ymm2
vcmpltps %ymm2, %ymm0, %ymm0
vblendvps %ymm0, %ymm1, %ymm3, %ymm3
vmovaps 0x4e0(%rsp), %ymm11
vmulps %ymm12, %ymm11, %ymm0
vmulps %ymm5, %ymm11, %ymm1
vmovaps 0x1a0(%rsp), %ymm5
vmulps %ymm4, %ymm5, %ymm2
vsubps %ymm2, %ymm1, %ymm1
vmulps %ymm4, %ymm13, %ymm4
vsubps %ymm0, %ymm4, %ymm4
vandps %ymm6, %ymm2, %ymm2
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm0, %ymm2, %ymm0
vblendvps %ymm0, %ymm1, %ymm4, %ymm4
vmulps %ymm13, %ymm9, %ymm0
vmulps %ymm5, %ymm9, %ymm1
vmulps %ymm11, %ymm8, %ymm2
vmulps %ymm15, %ymm11, %ymm5
vsubps %ymm2, %ymm1, %ymm1
vsubps %ymm0, %ymm5, %ymm5
vandps %ymm6, %ymm2, %ymm2
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm0, %ymm2, %ymm0
vblendvps %ymm0, %ymm1, %ymm5, %ymm0
vmulps 0x540(%rsp), %ymm0, %ymm1
vmulps 0x40(%rsp), %ymm4, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm3, %ymm14, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm1, %ymm1, %ymm2
vmulps 0x1c0(%rsp), %ymm0, %ymm1
vmulps 0x260(%rsp), %ymm4, %ymm5
vaddps %ymm1, %ymm5, %ymm1
vmulps 0x280(%rsp), %ymm3, %ymm5
vrcpps %ymm2, %ymm6
vaddps %ymm1, %ymm5, %ymm1
vaddps %ymm1, %ymm1, %ymm1
vmulps %ymm6, %ymm2, %ymm5
vbroadcastss 0x1d0a60f(%rip), %ymm8 # 0x1eec714
vsubps %ymm5, %ymm8, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%rsi), %ymm6
vmulps %ymm5, %ymm1, %ymm1
vcmpleps %ymm1, %ymm6, %ymm5
vbroadcastss 0x20(%rsi), %ymm6
vcmpleps %ymm6, %ymm1, %ymm6
vandps %ymm6, %ymm5, %ymm5
vcmpneqps 0x1d3edc8(%rip), %ymm2, %ymm2 # 0x1f20f00
vandps %ymm5, %ymm2, %ymm2
vpslld $0x1f, %xmm2, %xmm5
vpsrad $0x1f, %xmm5, %xmm5
vextractf128 $0x1, %ymm2, %xmm2
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vinsertf128 $0x1, %xmm2, %ymm5, %ymm2
vandps 0x480(%rsp), %ymm7, %ymm5
vtestps %ymm5, %ymm2
movq 0x20(%rsp), %r8
je 0x1e28e2
vandps %ymm5, %ymm2, %ymm2
vmovaps 0x4c0(%rsp), %ymm5
vmovaps %ymm5, 0x2e0(%rsp)
vmovaps 0x4a0(%rsp), %ymm5
vmovaps %ymm5, 0x300(%rsp)
vmovaps 0x460(%rsp), %ymm5
vmovaps %ymm5, 0x320(%rsp)
leaq 0x218(%rsp), %rax
movq %rax, 0x340(%rsp)
vmovaps %ymm2, 0x360(%rsp)
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
vmovaps %ymm4, 0x400(%rsp)
vmovaps %ymm0, 0x420(%rsp)
movq (%rdi), %rax
movq 0x1e8(%rax), %rax
movq 0x160(%rsp), %rdx
movq (%rax,%rdx,8), %rdx
movl 0x24(%rsi), %eax
testl %eax, 0x34(%rdx)
vmovaps 0x140(%rsp), %xmm11
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x120(%rsp), %xmm13
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x100(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x90(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm6
je 0x1e278f
movq 0x10(%rdi), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x70(%rsp), %xmm5
jne 0x1e228c
movb $0x1, %r10b
movl %r10d, 0x40(%rsp)
cmpq $0x0, 0x48(%rdx)
je 0x1e279d
movq %rax, 0x1c0(%rsp)
movq %rdx, 0x180(%rsp)
movl %ecx, 0x1a0(%rsp)
vmovaps 0x320(%rsp), %ymm0
vbroadcastss 0x1d3ec0f(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm0, %ymm1
vbroadcastss 0x1d0ed26(%rip), %ymm2 # 0x1ef0fe8
vcmpnltps %ymm2, %ymm1, %ymm1
vrcpps %ymm0, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vbroadcastss 0x1d0a43c(%rip), %ymm3 # 0x1eec714
vsubps %ymm0, %ymm3, %ymm0
vmulps %ymm0, %ymm2, %ymm0
vaddps %ymm0, %ymm2, %ymm0
vandps %ymm0, %ymm1, %ymm1
vmulps 0x2e0(%rsp), %ymm1, %ymm0
vminps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x40(%rsp)
vmovaps %ymm0, 0x380(%rsp)
vmulps 0x300(%rsp), %ymm1, %ymm1
vminps %ymm3, %ymm1, %ymm1
vmovaps %ymm1, 0x3a0(%rsp)
movq 0x340(%rsp), %rdx
movq (%rdx), %rax
movq 0x8(%rdx), %rcx
vmovdqu (%rax,%rcx,4), %xmm2
cmpq $0x3, 0x10(%rdx)
vmovdqa %xmm2, %xmm3
jb 0x1e233e
vmovdqu (%rax,%rcx,8), %xmm3
vmovdqu (%rax), %xmm4
cmpq $0x2, %rcx
je 0x1e295b
movl 0x1a0(%rsp), %ecx
movq 0x180(%rsp), %rdx
vinsertf128 $0x1, %xmm2, %ymm4, %ymm5
vinsertf128 $0x1, %xmm3, %ymm2, %ymm6
vunpcklps %ymm6, %ymm5, %ymm5 # ymm5 = ymm5[0],ymm6[0],ymm5[1],ymm6[1],ymm5[4],ymm6[4],ymm5[5],ymm6[5]
vpshufd $0xa5, %xmm4, %xmm4 # xmm4 = xmm4[1,1,2,2]
vpshufd $0xa5, %xmm2, %xmm6 # xmm6 = xmm2[1,1,2,2]
vinsertf128 $0x1, %xmm6, %ymm4, %ymm7
vpshufd $0x94, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,2]
vpshufd $0x94, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,2]
vinsertf128 $0x1, %xmm3, %ymm2, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vpsrld $0x10, %xmm5, %xmm9
vextractf128 $0x1, %ymm5, %xmm10
vpsrld $0x10, %xmm10, %xmm10
vbroadcastss 0x1d3ebb6(%rip), %ymm8 # 0x1f20f64
vandps %ymm5, %ymm8, %ymm5
vcvtdq2ps %ymm5, %ymm5
vbroadcastss 0x1d3c121(%rip), %ymm0 # 0x1f1e4e0
vmulps %ymm0, %ymm5, %ymm5
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm0, %ymm9, %ymm9
vpsrld $0x10, %xmm4, %xmm4
vpsrld $0x10, %xmm6, %xmm6
vandps %ymm7, %ymm8, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm0, %ymm7, %ymm7
vinsertf128 $0x1, %xmm6, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpsrld $0x10, %xmm2, %xmm2
vpsrld $0x10, %xmm3, %xmm3
vandps 0xe0(%rsp), %ymm8, %ymm6
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm0, %ymm6, %ymm6
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm0, %ymm2, %ymm2
vmovaps 0x40(%rsp), %ymm0
vmulps %ymm7, %ymm0, %ymm3
vmulps %ymm4, %ymm0, %ymm4
vmulps %ymm6, %ymm1, %ymm6
vaddps %ymm3, %ymm6, %ymm3
vmulps %ymm2, %ymm1, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1d0a2ce(%rip), %ymm4 # 0x1eec714
vsubps %ymm0, %ymm4, %ymm0
vsubps %ymm1, %ymm0, %ymm0
vmulps %ymm5, %ymm0, %ymm1
vaddps %ymm1, %ymm3, %ymm1
vmulps %ymm0, %ymm9, %ymm0
vaddps %ymm0, %ymm2, %ymm0
vmovaps %ymm1, 0x380(%rsp)
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vmovmskps %ymm0, %eax
bsfq %rax, %r10
movq %rax, 0xe0(%rsp)
testl %eax, %eax
setne %al
movl %eax, 0x40(%rsp)
je 0x1e2917
movq 0x8(%rdi), %rax
movq %rax, 0x2a0(%rsp)
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x90(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm6
movq %r10, %rdi
vmovss 0x380(%rsp,%rdi,4), %xmm0
vmovss 0x3a0(%rsp,%rdi,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x260(%rsp)
vmovss 0x3c0(%rsp,%rdi,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
vmovss 0x3e0(%rsp,%rdi,4), %xmm2
vmovss 0x400(%rsp,%rdi,4), %xmm3
movq %rdi, 0x280(%rsp)
vmovss 0x420(%rsp,%rdi,4), %xmm4
vmovss %xmm2, 0x1e0(%rsp)
vmovss %xmm3, 0x1e4(%rsp)
vmovss %xmm4, 0x1e8(%rsp)
vmovss %xmm0, 0x1ec(%rsp)
vmovss %xmm1, 0x1f0(%rsp)
movl %ecx, 0x1f4(%rsp)
movq 0x160(%rsp), %rax
movl %eax, 0x1f8(%rsp)
movq 0x2a0(%rsp), %rdi
movl (%rdi), %eax
movl %eax, 0x1fc(%rsp)
movl 0x4(%rdi), %eax
movl %eax, 0x200(%rsp)
orl $-0x1, 0xdc(%rsp)
leaq 0xdc(%rsp), %rax
movq %rax, 0x230(%rsp)
movq 0x18(%rdx), %rax
movq %rax, 0x238(%rsp)
movq %rdi, 0x240(%rsp)
movq %rsi, 0x248(%rsp)
leaq 0x1e0(%rsp), %rax
movq %rax, 0x250(%rsp)
movl $0x1, 0x258(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x1e2686
leaq 0x230(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x180(%rsp), %rdx
movl 0x1a0(%rsp), %ecx
vmovaps 0x80(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm5
movq 0x18(%rsp), %r9
vmovaps 0x90(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm15
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x120(%rsp), %xmm13
movq 0x20(%rsp), %r8
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x140(%rsp), %xmm11
movq 0x30(%rsp), %r11
movq 0x28(%rsp), %rsi
movq 0x230(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e274f
movq 0x1c0(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x1e279d
movq 0x1c0(%rsp), %rdi
testb $0x2, (%rdi)
jne 0x1e26b2
testb $0x40, 0x3e(%rdx)
je 0x1e2742
leaq 0x230(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x180(%rsp), %rdx
movl 0x1a0(%rsp), %ecx
vmovaps 0x80(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm5
movq 0x18(%rsp), %r9
vmovaps 0x90(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm15
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x120(%rsp), %xmm13
movq 0x20(%rsp), %r8
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x140(%rsp), %xmm11
movq 0x30(%rsp), %r11
movq 0x28(%rsp), %rsi
movq 0x230(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x1e279d
vmovss 0x260(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0xe0(%rsp), %rax
movq 0x280(%rsp), %rdi
btcq %rdi, %rax
bsfq %rax, %rdi
movq %rax, 0xe0(%rsp)
testq %rax, %rax
setne %al
movl %eax, 0x40(%rsp)
jne 0x1e24da
jmp 0x1e279d
movl $0x0, 0x40(%rsp)
vmovaps 0x70(%rsp), %xmm5
xorl %ecx, %ecx
movl 0x40(%rsp), %eax
testb $0x1, %al
je 0x1e27b3
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rax
jmp 0x1e27be
xorl %eax, %eax
testq %rcx, %rcx
jne 0x1e294f
cmpl $0x3, %eax
jne 0x1e185d
jmp 0x1e16a8
andq $-0x10, %r10
movq (%r10), %rdx
movl 0x8(%rdx), %eax
decl %eax
vcvtsi2ss %rax, %xmm13, %xmm0
vmulss 0x1c(%rsi), %xmm0, %xmm1
movl 0x2c(%rdx), %eax
vroundss $0x9, %xmm1, %xmm1, %xmm2
vaddss 0x1d0e1d9(%rip), %xmm0, %xmm0 # 0x1ef09cc
vminss %xmm0, %xmm2, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm0, %xmm2, %xmm0
vsubss %xmm0, %xmm1, %xmm1
vmovaps %xmm1, 0x2d0(%rsp)
vcvttss2si %xmm0, %ecx
movl %ecx, 0xd8(%rsp)
movslq %ecx, %rcx
leaq (%rax,%rcx,8), %rax
movq %rdx, 0x38(%rsp)
movq 0x30(%rdx,%rax), %rcx
xorl %eax, %eax
jmp 0x1e27a3
vshufps $0x54, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,1,1,1]
vshufps $0x54, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
vshufps $0x54, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
jmp 0x1e1a80
vshufps $0x54, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
vmovaps %ymm1, 0x160(%rsp)
vshufps $0x54, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
vshufps $0x54, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
vmovaps %xmm3, 0xe0(%rsp)
jmp 0x1e1ac5
vshufps $0x54, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
vshufps $0x54, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,1,1,1]
vshufps $0x54, %xmm15, %xmm15, %xmm15 # xmm15 = xmm15[0,1,1,1]
jmp 0x1e1afa
vshufps $0x54, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
vshufps $0x54, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,1,1,1]
vshufps $0x54, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,1,1,1]
jmp 0x1e1b29
vshufps $0x54, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,1,1,1]
vshufps $0x54, %xmm13, %xmm13, %xmm13 # xmm13 = xmm13[0,1,1,1]
vshufps $0x54, %xmm14, %xmm14, %xmm14 # xmm14 = xmm14[0,1,1,1]
jmp 0x1e1b5e
vshufps $0x54, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,1,1,1]
vshufps $0x54, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,1,1,1]
vshufps $0x54, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,1,1,1]
jmp 0x1e1c4b
movl $0x0, 0x40(%rsp)
vmovaps 0x140(%rsp), %xmm11
vmovaps 0x130(%rsp), %xmm12
movq 0x20(%rsp), %r8
jmp 0x1e28fc
movl $0x0, 0x40(%rsp)
vmovaps 0x140(%rsp), %xmm11
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x120(%rsp), %xmm13
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x100(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm7
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x90(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm6
jmp 0x1e279d
movq %rcx, (%r11)
addq $0x8, %r11
jmp 0x1e27be
vpshufd $0x54, %xmm4, %xmm4 # xmm4 = xmm4[0,1,1,1]
vpshufd $0x54, %xmm2, %xmm2 # xmm2 = xmm2[0,1,1,1]
vpshufd $0x54, %xmm3, %xmm3 # xmm3 = xmm3[0,1,1,1]
jmp 0x1e234c
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::ObjectIntersector1<false>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8c8, %rsp # imm = 0x8C8
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1e29a2
addq $0x8c8, %rsp # imm = 0x8C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x1e2990
leaq 0x128(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%rsi), %xmm5, %xmm1
vmovaps 0x10(%rsi), %xmm2
vbroadcastss 0x1d3e4f0(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d0e607(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d09d17(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[0,0,0,0]
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm10 # xmm10 = xmm2[1,1,1,1]
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
vshufps $0xaa, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[2,2,2,2]
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %r15
xorq $0x10, %r15
movq %r10, %rcx
xorq $0x10, %rcx
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0x120(%rsp), %rbp
vmovaps %xmm6, 0x110(%rsp)
vmovaps %xmm7, 0x100(%rsp)
vmovaps %xmm8, 0xf0(%rsp)
movq %r8, 0x30(%rsp)
vmovaps %xmm9, 0xe0(%rsp)
vmovaps %xmm10, 0xd0(%rsp)
movq %r9, 0x28(%rsp)
vmovaps %xmm3, 0xc0(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %r15, 0x10(%rsp)
movq %rcx, 0x8(%rsp)
vmovaps %xmm4, 0xb0(%rsp)
vmovaps %xmm5, 0xa0(%rsp)
cmpq %rbp, %rdi
je 0x1e2990
movq -0x8(%rdi), %rbx
addq $-0x8, %rdi
testb $0x8, %bl
jne 0x1e2b95
vmovaps 0x20(%rbx,%r8), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%rbx,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r10), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbx,%r11), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%rbx,%r15), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbx,%rcx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r12d
testb $0x8, %bl
jne 0x1e2bcd
testq %r12, %r12
je 0x1e2bd1
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %r14d, %r14d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r12, %rdx
jne 0x1e2bd7
movq %rax, %rbx
testl %r14d, %r14d
je 0x1e2b05
jmp 0x1e2c12
pushq $0x6
jmp 0x1e2bd3
pushq $0x4
popq %r14
jmp 0x1e2bc2
movq %rcx, %r13
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1e2c0a
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e2be9
movq %rcx, %rbx
movq %r13, %rcx
jmp 0x1e2bc2
cmpl $0x6, %r14d
jne 0x1e2d7e
movl %ebx, %eax
andl $0xf, %eax
xorl %r14d, %r14d
addq $-0x8, %rax
movq %rax, 0x50(%rsp)
setne %bpl
je 0x1e2d76
movq %rdi, 0x38(%rsp)
andq $-0x10, %rbx
movq 0x40(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x48(%rsp)
xorl %r15d, %r15d
movl (%rbx,%r15,8), %ecx
movq 0x48(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rax
movl 0x34(%rax), %edx
testl %edx, 0x24(%rsi)
je 0x1e2d41
movl 0x4(%rbx,%r15,8), %edx
orl $-0x1, 0x4(%rsp)
leaq 0x4(%rsp), %rdi
movq %rdi, 0x58(%rsp)
movq 0x18(%rax), %rdi
movq %rdi, 0x60(%rsp)
movq 0x40(%rsp), %rdi
movq 0x8(%rdi), %r8
movq %r8, 0x70(%rsp)
movq %rsi, %r13
movq %rsi, 0x78(%rsp)
movl $0x1, 0x80(%rsp)
movl %ecx, 0x84(%rsp)
movl %edx, 0x68(%rsp)
movq %rax, 0x88(%rsp)
andq $0x0, 0x90(%rsp)
movq 0x10(%rdi), %rcx
movq %rcx, 0x98(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x1e2ce3
movq 0x68(%rax), %rcx
leaq 0x58(%rsp), %rdi
callq *%rcx
movq %r13, %rsi
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x20(%r13), %xmm0
vmovaps 0x110(%rsp), %xmm6
vmovaps 0x100(%rsp), %xmm7
vmovaps 0xf0(%rsp), %xmm8
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm3
vmovaps 0xb0(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm5
ja 0x1e2d8d
incq %r15
cmpq 0x50(%rsp), %r15
setb %bpl
jne 0x1e2c50
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rcx
leaq 0x120(%rsp), %rbp
cmpl $0x3, %r14d
jne 0x1e2af4
jmp 0x1e2990
testb $0x1, %bpl
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rcx
leaq 0x120(%rsp), %rbp
je 0x1e2d7e
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x1e2d7e
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::ObjectIntersector1<true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x8c8, %rsp # imm = 0x8C8
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x1e2dfe
addq $0x8c8, %rsp # imm = 0x8C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x1e2dec
leaq 0x128(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%rsi), %xmm5, %xmm1
vmovaps 0x10(%rsi), %xmm2
vbroadcastss 0x1d3e094(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d0e1ab(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d098bb(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[0,0,0,0]
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm10 # xmm10 = xmm2[1,1,1,1]
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
vshufps $0xaa, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[2,2,2,2]
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %r15
xorq $0x10, %r15
movq %r10, %rbp
xorq $0x10, %rbp
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,0,0,0]
leaq 0x120(%rsp), %r13
vmovaps %xmm6, 0x110(%rsp)
vmovaps %xmm7, 0x100(%rsp)
vmovaps %xmm8, 0xf0(%rsp)
movq %r8, 0x30(%rsp)
vmovaps %xmm9, 0xe0(%rsp)
vmovaps %xmm10, 0xd0(%rsp)
movq %r9, 0x28(%rsp)
vmovaps %xmm11, 0xc0(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %r15, 0x10(%rsp)
movq %rbp, 0x8(%rsp)
vmovaps %xmm5, 0xb0(%rsp)
vmovaps %xmm12, 0xa0(%rsp)
cmpq %r13, %rdi
je 0x1e2dec
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1e303f
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm5, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vminps %xmm2, %xmm12, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%rbp), %xmm0, %xmm4
vaddps 0x20(%rax,%rbp), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1e3089
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %ebx
testb $0x8, %r12b
jne 0x1e3085
testq %rbx, %rbx
je 0x1e30ae
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdx
jne 0x1e30b4
movq %rax, %r12
testl %r14d, %r14d
je 0x1e2f61
jmp 0x1e30f7
pushq $0x6
jmp 0x1e30b0
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1e3036
pushq $0x4
popq %r14
jmp 0x1e307a
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x1e30f2
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e30c3
movq %rcx, %r12
jmp 0x1e307a
cmpl $0x6, %r14d
jne 0x1e3263
movl %r12d, %eax
andl $0xf, %eax
xorl %r14d, %r14d
addq $-0x8, %rax
movq %rax, 0x50(%rsp)
setne %r13b
je 0x1e325b
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
movq 0x40(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x48(%rsp)
xorl %ebp, %ebp
movl (%r12,%rbp,8), %ecx
movq 0x48(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rax
movl 0x34(%rax), %edx
testl %edx, 0x24(%rsi)
je 0x1e3226
movl 0x4(%r12,%rbp,8), %edx
orl $-0x1, 0x4(%rsp)
leaq 0x4(%rsp), %rdi
movq %rdi, 0x58(%rsp)
movq 0x18(%rax), %rdi
movq %rdi, 0x60(%rsp)
movq 0x40(%rsp), %rdi
movq 0x8(%rdi), %r8
movq %r8, 0x70(%rsp)
movq %rsi, %r15
movq %rsi, 0x78(%rsp)
movl $0x1, 0x80(%rsp)
movl %ecx, 0x84(%rsp)
movl %edx, 0x68(%rsp)
movq %rax, 0x88(%rsp)
andq $0x0, 0x90(%rsp)
movq 0x10(%rdi), %rcx
movq %rcx, 0x98(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x1e31c8
movq 0x68(%rax), %rcx
leaq 0x58(%rsp), %rdi
callq *%rcx
movq %r15, %rsi
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x20(%r15), %xmm0
vmovaps 0x110(%rsp), %xmm6
vmovaps 0x100(%rsp), %xmm7
vmovaps 0xf0(%rsp), %xmm8
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm12
ja 0x1e3272
incq %rbp
cmpq 0x50(%rsp), %rbp
setb %r13b
jne 0x1e3135
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rbp
leaq 0x120(%rsp), %r13
cmpl $0x3, %r14d
jne 0x1e2f50
jmp 0x1e2dec
testb $0x1, %r13b
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rbp
leaq 0x120(%rsp), %r13
je 0x1e3263
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x1e3263
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::InstanceIntersector1>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x878, %rsp # imm = 0x878
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e36b8
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e36b8
leaq 0xd8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d3dbba(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d0dcd1(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d093e1(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rcx
xorq $0x10, %rcx
movq %r10, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0xd0(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovaps %xmm6, 0xc0(%rsp)
vmovaps %xmm7, 0xb0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
movq %r8, 0x30(%rsp)
vmovaps %xmm9, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm3, 0x70(%rsp)
movq %r9, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %rcx, 0x10(%rsp)
vmovaps %xmm4, 0x60(%rsp)
vmovaps %xmm5, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x1e36b8
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1e34c4
vmovaps 0x20(%r12,%r8), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%r12,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r12,%r10), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r12,%r11), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%r12,%rcx), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r12,%rbx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r13d
testb $0x8, %r12b
jne 0x1e34fc
testq %r13, %r13
je 0x1e3500
andq $-0x10, %r12
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r13, %rdx
jne 0x1e3506
movq %rax, %r12
testl %r14d, %r14d
je 0x1e3432
jmp 0x1e3541
pushq $0x6
jmp 0x1e3502
pushq $0x4
popq %r14
jmp 0x1e34f1
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1e3539
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e3518
movq %rcx, %r12
movq %r15, %rcx
jmp 0x1e34f1
cmpl $0x6, %r14d
jne 0x1e36ae
movl %r12d, %ebp
andl $0xf, %ebp
xorl %r14d, %r14d
movq %rbp, %rax
addq $-0x8, %rax
setne %r15b
je 0x1e36a6
movq %rax, 0x48(%rsp)
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x3f50dc
testb %al, %al
jne 0x1e35c8
addq $0x10, %r12
addq $-0x9, %rbp
xorl %r15d, %r15d
cmpq %r15, %rbp
je 0x1e3644
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x3f50dc
addq $0x10, %r12
incq %r15
testb %al, %al
je 0x1e3594
cmpq 0x48(%rsp), %r15
setb %r15b
testb %r15b, %r15b
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x30(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovaps 0x60(%rsp), %xmm4
vmovaps 0x50(%rsp), %xmm5
leaq 0xd0(%rsp), %rbp
je 0x1e36ae
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x1e36ae
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x30(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovaps 0x60(%rsp), %xmm4
vmovaps 0x50(%rsp), %xmm5
leaq 0xd0(%rsp), %rbp
cmpl $0x3, %r14d
jne 0x1e3421
addq $0x878, %rsp # imm = 0x878
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::InstanceIntersector1MB>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x878, %rsp # imm = 0x878
movq %rdx, 0x38(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e3b4f
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e3b4f
leaq 0xd8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d3d7a0(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d0d8b7(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d08fc7(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %r15
xorq $0x10, %r15
movq %r10, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,0,0,0]
leaq 0xd0(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovaps %xmm6, 0xc0(%rsp)
vmovaps %xmm7, 0xb0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
movq %r8, 0x28(%rsp)
vmovaps %xmm9, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm11, 0x70(%rsp)
movq %r9, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %r11, 0x10(%rsp)
vmovaps %xmm5, 0x60(%rsp)
vmovaps %xmm12, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x1e3b4f
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1e3925
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm5, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vminps %xmm2, %xmm12, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%rbx), %xmm0, %xmm4
vaddps 0x20(%rax,%rbx), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1e396f
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r14d
testb $0x8, %r12b
jne 0x1e396b
testq %r14, %r14
je 0x1e3994
andq $-0x10, %r12
bsfq %r14, %rax
leaq -0x1(%r14), %rdx
xorl %r13d, %r13d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r14, %rdx
jne 0x1e399a
movq %rax, %r12
testl %r13d, %r13d
je 0x1e3847
jmp 0x1e39dd
pushq $0x6
jmp 0x1e3996
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1e391c
pushq $0x4
popq %r13
jmp 0x1e3960
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x1e39d8
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e39a9
movq %rcx, %r12
jmp 0x1e3960
cmpl $0x6, %r13d
jne 0x1e3b45
movl %r12d, %eax
andl $0xf, %eax
xorl %r13d, %r13d
movq %rax, 0x40(%rsp)
addq $-0x8, %rax
setne %bpl
je 0x1e3b3d
movq %rax, 0x48(%rsp)
movq %rdi, 0x30(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x3f5b0c
testb %al, %al
jne 0x1e3a69
addq $0x10, %r12
addq $-0x9, 0x40(%rsp)
xorl %ebp, %ebp
cmpq %rbp, 0x40(%rsp)
je 0x1e3ae0
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x3f5b0c
addq $0x10, %r12
incq %rbp
testb %al, %al
je 0x1e3a33
cmpq 0x48(%rsp), %rbp
setb %bpl
testb %bpl, %bpl
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x28(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm12
leaq 0xd0(%rsp), %rbp
je 0x1e3b45
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r13
jmp 0x1e3b45
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x28(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm12
leaq 0xd0(%rsp), %rbp
cmpl $0x3, %r13d
jne 0x1e3836
addq $0x878, %rsp # imm = 0x878
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1, false, embree::avx::ArrayIntersector1<embree::avx::InstanceArrayIntersector1>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x878, %rsp # imm = 0x878
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e3f6a
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e3f6a
leaq 0xd8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d3d308(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d0d41f(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d08b2f(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rcx
xorq $0x10, %rcx
movq %r10, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,0,0,0]
leaq 0xd0(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovaps %xmm6, 0xc0(%rsp)
vmovaps %xmm7, 0xb0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
movq %r8, 0x30(%rsp)
vmovaps %xmm9, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm3, 0x70(%rsp)
movq %r9, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %rcx, 0x10(%rsp)
vmovaps %xmm4, 0x60(%rsp)
vmovaps %xmm5, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x1e3f6a
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1e3d76
vmovaps 0x20(%r12,%r8), %xmm0
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%r12,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r12,%r10), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r12,%r11), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%r12,%rcx), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r12,%rbx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vpminsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r13d
testb $0x8, %r12b
jne 0x1e3dae
testq %r13, %r13
je 0x1e3db2
andq $-0x10, %r12
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r13, %rdx
jne 0x1e3db8
movq %rax, %r12
testl %r14d, %r14d
je 0x1e3ce4
jmp 0x1e3df3
pushq $0x6
jmp 0x1e3db4
pushq $0x4
popq %r14
jmp 0x1e3da3
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x1e3deb
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e3dca
movq %rcx, %r12
movq %r15, %rcx
jmp 0x1e3da3
cmpl $0x6, %r14d
jne 0x1e3f60
movl %r12d, %ebp
andl $0xf, %ebp
xorl %r14d, %r14d
movq %rbp, %rax
addq $-0x8, %rax
setne %r15b
je 0x1e3f58
movq %rax, 0x48(%rsp)
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x4070ac
testb %al, %al
jne 0x1e3e7a
addq $0x8, %r12
addq $-0x9, %rbp
xorl %r15d, %r15d
cmpq %r15, %rbp
je 0x1e3ef6
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x4070ac
addq $0x8, %r12
incq %r15
testb %al, %al
je 0x1e3e46
cmpq 0x48(%rsp), %r15
setb %r15b
testb %r15b, %r15b
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x30(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovaps 0x60(%rsp), %xmm4
vmovaps 0x50(%rsp), %xmm5
leaq 0xd0(%rsp), %rbp
je 0x1e3f60
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x1e3f60
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x30(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovaps 0x60(%rsp), %xmm4
vmovaps 0x50(%rsp), %xmm5
leaq 0xd0(%rsp), %rbp
cmpl $0x3, %r14d
jne 0x1e3cd3
addq $0x878, %rsp # imm = 0x878
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::InstanceArrayIntersector1MB>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x878, %rsp # imm = 0x878
movq %rdx, 0x38(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e4401
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e4401
leaq 0xd8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d3ceee(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d0d005(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d08715(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %xmm6
vbroadcastss 0x4(%rsi), %xmm7
vbroadcastss 0x8(%rsi), %xmm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm9 # xmm9 = xmm3[0,0,0,0]
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,1,1,1]
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[2,2,2,2]
shll $0x4, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x4, %r9d
orq $0x20, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x4, %r10d
orq $0x40, %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %r15
xorq $0x10, %r15
movq %r10, %rbx
xorq $0x10, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,0,0,0]
leaq 0xd0(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovaps %xmm6, 0xc0(%rsp)
vmovaps %xmm7, 0xb0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
movq %r8, 0x28(%rsp)
vmovaps %xmm9, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm11, 0x70(%rsp)
movq %r9, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %r11, 0x10(%rsp)
vmovaps %xmm5, 0x60(%rsp)
vmovaps %xmm12, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x1e4401
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x1e41d7
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x80(%rax,%r8), %xmm0, %xmm1
vaddps 0x20(%rax,%r8), %xmm1, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmaxps %xmm1, %xmm5, %xmm1
vmulps 0x80(%rax,%r9), %xmm0, %xmm2
vaddps 0x20(%rax,%r9), %xmm2, %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmulps 0x80(%rax,%r10), %xmm0, %xmm3
vaddps 0x20(%rax,%r10), %xmm3, %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmulps 0x80(%rax,%r11), %xmm0, %xmm2
vaddps 0x20(%rax,%r11), %xmm2, %xmm2
vsubps %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm9, %xmm2
vmulps 0x80(%rax,%r15), %xmm0, %xmm3
vminps %xmm2, %xmm12, %xmm2
vaddps 0x20(%rax,%r15), %xmm3, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmulps 0x80(%rax,%rbx), %xmm0, %xmm4
vaddps 0x20(%rax,%rbx), %xmm4, %xmm4
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vminps %xmm4, %xmm3, %xmm3
vminps %xmm3, %xmm2, %xmm2
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x1e4221
vcmpleps %xmm2, %xmm1, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %r14d
testb $0x8, %r12b
jne 0x1e421d
testq %r14, %r14
je 0x1e4246
andq $-0x10, %r12
bsfq %r14, %rax
leaq -0x1(%r14), %rdx
xorl %r13d, %r13d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r14, %rdx
jne 0x1e424c
movq %rax, %r12
testl %r13d, %r13d
je 0x1e40f9
jmp 0x1e428f
pushq $0x6
jmp 0x1e4248
vcmpleps %xmm2, %xmm1, %xmm1
vmovaps 0xe0(%rax), %xmm2
vcmpleps %xmm0, %xmm2, %xmm2
vcmpltps 0xf0(%rax), %xmm0, %xmm0
vandps %xmm0, %xmm2, %xmm0
vandps %xmm1, %xmm0, %xmm0
jmp 0x1e41ce
pushq $0x4
popq %r13
jmp 0x1e4212
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x1e428a
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e425b
movq %rcx, %r12
jmp 0x1e4212
cmpl $0x6, %r13d
jne 0x1e43f7
movl %r12d, %eax
andl $0xf, %eax
xorl %r13d, %r13d
movq %rax, 0x40(%rsp)
addq $-0x8, %rax
setne %bpl
je 0x1e43ef
movq %rax, 0x48(%rsp)
movq %rdi, 0x30(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x4090d0
testb %al, %al
jne 0x1e431b
addq $0x8, %r12
addq $-0x9, 0x40(%rsp)
xorl %ebp, %ebp
cmpq %rbp, 0x40(%rsp)
je 0x1e4392
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x4090d0
addq $0x8, %r12
incq %rbp
testb %al, %al
je 0x1e42e5
cmpq 0x48(%rsp), %rbp
setb %bpl
testb %bpl, %bpl
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x28(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm12
leaq 0xd0(%rsp), %rbp
je 0x1e43f7
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r13
jmp 0x1e43f7
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovaps 0xc0(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
movq 0x28(%rsp), %r8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovaps 0x60(%rsp), %xmm5
vmovaps 0x50(%rsp), %xmm12
leaq 0xd0(%rsp), %rbp
cmpl $0x3, %r13d
jne 0x1e40e8
addq $0x878, %rsp # imm = 0x878
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<4, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xac8, %rsp # imm = 0xAC8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x1e507c
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x1e507c
movq %rdx, %r8
leaq 0x328(%rsp), %r10
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d3ca5c(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d0cb73(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq %rax, -0x8(%r10)
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1d0827f(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %xmm7
vbroadcastss 0x4(%rsi), %xmm4
vmovaps %xmm4, 0x240(%rsp)
xorl %r11d, %r11d
vucomiss %xmm2, %xmm3
vbroadcastss 0x8(%rsi), %xmm4
vmovaps %xmm4, 0x230(%rsp)
setb %r11b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vmovaps %xmm4, 0x220(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vmovaps %xmm5, 0x210(%rsp)
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmovaps %xmm3, 0x200(%rsp)
xorl %eax, %eax
vucomiss %xmm2, %xmm4
setb %al
xorl %ecx, %ecx
vucomiss %xmm2, %xmm5
setb %cl
shll $0x2, %r11d
movq %r11, %rdi
xorq $0x4, %rdi
leal 0x8(,%rax,4), %r9d
movq %r9, %rbx
xorq $0x4, %rbx
leal 0x10(,%rcx,4), %r14d
movq %r14, %r12
xorq $0x4, %r12
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %xmm1, 0x1f0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x1e0(%rsp)
leaq 0x1f6ba20(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm7, 0x90(%rsp)
movq %rdi, 0x88(%rsp)
movq %r9, 0x80(%rsp)
movq %rbx, 0x78(%rsp)
movq %r14, 0x70(%rsp)
movq %r12, 0x30(%rsp)
leaq 0x320(%rsp), %rax
cmpq %rax, %r10
je 0x1e507c
movq -0x8(%r10), %r13
addq $-0x8, %r10
testb $0x8, %r13b
jne 0x1e473b
movq %r13, %rax
andq $-0x10, %rax
leaq 0x20(%rax), %rcx
testq %rax, %rax
cmoveq %rax, %rcx
vmovq (%rcx), %xmm0
vmovq 0x4(%rcx), %xmm1
vpminub %xmm1, %xmm0, %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vpxor %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %eax
vbroadcastss 0x18(%rcx), %xmm0
vbroadcastss 0x24(%rcx), %xmm1
vmovq (%rcx,%r11), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vcvtdq2ps %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm0, %xmm2
vmovq (%rcx,%rdi), %xmm3
vpmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1c(%rcx), %xmm1
vbroadcastss 0x28(%rcx), %xmm3
vmovq (%rcx,%r9), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm1, %xmm4
vmovq (%rcx,%rbx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vbroadcastss 0x2c(%rcx), %xmm3
vmovq (%rcx,%r14), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm3, %xmm5
vmovq (%rcx,%r12), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x20(%rcx), %xmm6
vaddps %xmm5, %xmm6, %xmm5
vaddps %xmm3, %xmm6, %xmm3
vsubps %xmm7, %xmm2, %xmm2
vmovaps 0x220(%rsp), %xmm9
vmulps %xmm2, %xmm9, %xmm2
vmovaps 0x240(%rsp), %xmm6
vsubps %xmm6, %xmm4, %xmm4
vmovaps 0x210(%rsp), %xmm10
vmulps %xmm4, %xmm10, %xmm4
vpmaxsd %xmm4, %xmm2, %xmm2
vmovaps 0x230(%rsp), %xmm8
vsubps %xmm8, %xmm5, %xmm4
vmovaps 0x200(%rsp), %xmm5
vmulps %xmm4, %xmm5, %xmm4
vpmaxsd 0x1f0(%rsp), %xmm4, %xmm4
vpmaxsd %xmm4, %xmm2, %xmm2
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpminsd %xmm1, %xmm0, %xmm0
vsubps %xmm8, %xmm3, %xmm1
vmulps %xmm1, %xmm5, %xmm1
vpminsd 0x1e0(%rsp), %xmm1, %xmm1
vpminsd %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm0, %xmm2, %xmm0
vmovmskps %xmm0, %ecx
notb %cl
andb %al, %cl
movzbl %cl, %ebp
testb $0x8, %r13b
jne 0x1e477b
testq %rbp, %rbp
je 0x1e477f
andq $-0x10, %r13
bsfq %rbp, %rax
leaq -0x1(%rbp), %rdx
xorl %r15d, %r15d
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
andq %rbp, %rdx
jne 0x1e4785
movq %rax, %r13
testl %r15d, %r15d
je 0x1e45b2
jmp 0x1e47c2
pushq $0x6
jmp 0x1e4781
pushq $0x4
popq %r15
jmp 0x1e4770
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
andq %rdx, %rax
je 0x1e47bd
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x1e4794
movq %rcx, %r13
jmp 0x1e4770
cmpl $0x6, %r15d
jne 0x1e5072
movl %r13d, %eax
andl $0xf, %eax
xorl %r15d, %r15d
addq $-0x8, %rax
setne %cl
je 0x1e5072
andq $-0x10, %r13
movq (%r8), %rdi
xorl %edx, %edx
movq %rax, 0xa0(%rsp)
movq %rdi, 0x28(%rsp)
movb %cl, 0xe(%rsp)
movq %rdx, 0xa8(%rsp)
imulq $0x50, %rdx, %rax
movl 0x30(%r13,%rax), %edx
movq 0x228(%rdi), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%r13,%rax), %r9d
movl 0x4(%r13,%rax), %edi
vmovups (%rdx,%r9,4), %xmm2
movl 0x10(%r13,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm1
movl 0x20(%r13,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm0
movl 0x34(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm5
movl 0x14(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm4
movl 0x24(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm3
movl 0x38(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm6
movl 0x18(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm7
movl 0x28(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm8
movq 0x28(%rsp), %rdi
movl 0x3c(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm9
movl 0x1c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm10
movl 0x2c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm11
movb 0xe(%rsp), %dl
vunpcklps %xmm6, %xmm2, %xmm12 # xmm12 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm9, %xmm5, %xmm6 # xmm6 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vmovaps %xmm2, 0x10(%rsp)
vunpcklps %xmm6, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
vunpckhps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm6[2],xmm12[3],xmm6[3]
vunpcklps %xmm7, %xmm1, %xmm9 # xmm9 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm4, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpcklps %xmm7, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
vunpckhps %xmm7, %xmm9, %xmm15 # xmm15 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
vunpcklps %xmm8, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm11, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vunpcklps %xmm3, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
vunpckhps %xmm8, %xmm9, %xmm8 # xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
vmovaps 0x30(%r13,%rax), %xmm0
vmovaps %xmm0, 0x260(%rsp)
vmovaps 0x40(%r13,%rax), %xmm0
vmovaps %xmm0, 0x250(%rsp)
vbroadcastss (%rsi), %xmm9
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm9, %xmm5, %xmm7
vsubps %xmm13, %xmm6, %xmm3
vmovaps 0x10(%rsp), %xmm0
vsubps %xmm14, %xmm0, %xmm1
vsubps %xmm9, %xmm10, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vsubps %xmm13, %xmm15, %xmm0
vmovaps %xmm0, 0x40(%rsp)
vsubps %xmm14, %xmm2, %xmm10
vsubps %xmm9, %xmm12, %xmm12
vsubps %xmm13, %xmm8, %xmm11
vsubps %xmm14, %xmm4, %xmm2
vmovaps %xmm2, 0x50(%rsp)
vsubps %xmm7, %xmm12, %xmm14
vsubps %xmm3, %xmm11, %xmm15
vsubps %xmm1, %xmm2, %xmm0
vaddps %xmm3, %xmm11, %xmm4
vaddps %xmm1, %xmm2, %xmm5
vmulps %xmm4, %xmm0, %xmm8
vmulps %xmm5, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm7, %xmm12, %xmm8
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm0, 0x170(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm5
vmovaps %xmm15, 0x180(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x190(%rsp)
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vbroadcastss 0x18(%rsi), %xmm6
vmulps %xmm4, %xmm6, %xmm4
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vbroadcastss 0x10(%rsi), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vmulps %xmm0, %xmm13, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x40(%rsp), %xmm2
vsubps %xmm2, %xmm3, %xmm0
vmovaps %xmm10, %xmm8
vsubps %xmm10, %xmm1, %xmm10
vmovaps %xmm3, 0x1b0(%rsp)
vaddps %xmm2, %xmm3, %xmm4
vmovaps %xmm1, 0x1a0(%rsp)
vaddps %xmm1, %xmm8, %xmm5
vmovaps %xmm8, %xmm1
vmulps %xmm4, %xmm10, %xmm14
vmulps %xmm5, %xmm0, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmovaps 0x10(%rsp), %xmm14
vsubps %xmm14, %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vmovaps %xmm7, 0x1c0(%rsp)
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm7, %xmm10, %xmm8
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm0, 0x160(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vmovaps %xmm13, 0x150(%rsp)
vmulps %xmm4, %xmm13, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmovaps 0x1d0(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm3
vaddps %xmm4, %xmm3, %xmm0
vsubps %xmm12, %xmm14, %xmm5
vaddps %xmm12, %xmm14, %xmm3
vsubps %xmm11, %xmm2, %xmm8
vaddps %xmm2, %xmm11, %xmm2
vmovaps %xmm0, %xmm12
vmovaps 0x50(%rsp), %xmm0
vsubps %xmm0, %xmm1, %xmm4
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm2, %xmm4, %xmm1
vmulps %xmm0, %xmm8, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm3, %xmm4, %xmm7
vsubps %xmm0, %xmm7, %xmm0
vmovaps 0x90(%rsp), %xmm7
vmulps %xmm3, %xmm8, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm6, 0x10(%rsp)
vmulps %xmm2, %xmm6, %xmm2
vmovaps %xmm15, 0x50(%rsp)
vmulps %xmm0, %xmm15, %xmm0
vmovaps %xmm13, %xmm15
vaddps %xmm2, %xmm0, %xmm0
vmulps %xmm1, %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm12, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm13
vminps %xmm12, %xmm9, %xmm1
vminps %xmm0, %xmm1, %xmm1
vbroadcastss 0x1d3c37a(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm13, %xmm3
vbroadcastss 0x1d3c375(%rip), %xmm2 # 0x1f20ecc
vmovaps %xmm3, 0x140(%rsp)
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d3c353(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm2, %xmm3
vcmpnltps %xmm3, %xmm1, %xmm1
vmovaps %xmm9, 0x40(%rsp)
vmovaps %xmm12, 0x130(%rsp)
vmaxps %xmm12, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm2, %xmm0, %xmm0
vorps %xmm0, %xmm1, %xmm0
movb $0x0, 0xf(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x2a0(%rsp)
vtestps 0x110(%rsp), %xmm0
je 0x1e500d
vmovaps 0x170(%rsp), %xmm6
vmovaps 0x160(%rsp), %xmm12
vmovaps %xmm0, 0x120(%rsp)
vmulps %xmm6, %xmm12, %xmm0
vmovaps 0x180(%rsp), %xmm9
vmulps %xmm10, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm4, %xmm12, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d3c2c4(%rip), %xmm11 # 0x1f20ec4
vandps %xmm0, %xmm11, %xmm0
vandps %xmm2, %xmm11, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm0
vmovaps 0x150(%rsp), %xmm14
vmulps %xmm4, %xmm14, %xmm1
vmulps %xmm6, %xmm14, %xmm2
vmovaps 0x190(%rsp), %xmm7
vmulps %xmm7, %xmm10, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm5, %xmm10, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm3, %xmm11, %xmm3
vandps %xmm1, %xmm11, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmulps %xmm5, %xmm12, %xmm2
vmulps %xmm7, %xmm12, %xmm3
vmulps %xmm14, %xmm9, %xmm5
vmulps %xmm8, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm5, %xmm11, %xmm5
vandps %xmm2, %xmm11, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps 0x10(%rsp), %xmm2, %xmm3
vmulps 0x50(%rsp), %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vmulps %xmm0, %xmm15, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x1a0(%rsp), %xmm2, %xmm3
vmulps 0x1b0(%rsp), %xmm1, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps 0x1c0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1d07a43(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovaps 0x90(%rsp), %xmm7
vcmpneqps 0x1d06d03(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x120(%rsp), %xmm4
vandps 0x110(%rsp), %xmm4, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x1e500d
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x40(%rsp), %xmm6
vmovaps %xmm6, 0x270(%rsp)
vmovaps 0x130(%rsp), %xmm11
vmovaps %xmm11, 0x280(%rsp)
vmovaps %xmm13, 0x290(%rsp)
movq %rax, 0x2a0(%rsp)
vmovaps %xmm5, 0x2b0(%rsp)
vmovaps %xmm3, 0x2e0(%rsp)
vmovaps %xmm0, 0x2f0(%rsp)
vmovaps %xmm1, 0x300(%rsp)
vmovaps %xmm2, 0x310(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1d07967(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d0c226(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x140(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x2c0(%rsp)
vmulps %xmm0, %xmm11, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x2d0(%rsp)
vmovmskps %xmm5, %r14d
bsfq %r14, %r15
movl 0x260(%rsp,%r15,4), %eax
movq 0x1e8(%rdi), %rcx
movq (%rcx,%rax,8), %rbx
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbx)
je 0x1e4e35
movq 0x10(%r8), %r12
cmpq $0x0, 0x10(%r12)
jne 0x1e4e50
cmpq $0x0, 0x48(%rbx)
jne 0x1e4e50
xorl %eax, %eax
movq 0x30(%rsp), %r12
jmp 0x1e4e3b
btcq %r15, %r14
movb $0x1, %al
xorl %r15d, %r15d
testb %al, %al
je 0x1e5048
testq %r14, %r14
jne 0x1e4dfa
jmp 0x1e500d
vmovss 0x2c0(%rsp,%r15,4), %xmm0
vmovss 0x2d0(%rsp,%r15,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x250(%rsp,%r15,4), %edx
vmovss 0x2f0(%rsp,%r15,4), %xmm2
vmovss 0x300(%rsp,%r15,4), %xmm3
vmovss 0x310(%rsp,%r15,4), %xmm4
vmovss %xmm2, 0xb0(%rsp)
vmovss %xmm3, 0xb4(%rsp)
vmovss %xmm4, 0xb8(%rsp)
vmovss %xmm0, 0xbc(%rsp)
vmovss %xmm1, 0xc0(%rsp)
movl %edx, 0xc4(%rsp)
movl %eax, 0xc8(%rsp)
movl (%rcx), %eax
movl %eax, 0xcc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xd0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x10(%rsp)
vmovss 0x2e0(%rsp,%r15,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x3c(%rsp)
leaq 0x3c(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0xe8(%rsp)
movq %rcx, 0xf0(%rsp)
movq %rsi, 0xf8(%rsp)
leaq 0xb0(%rsp), %rax
movq %rax, 0x100(%rsp)
movl $0x1, 0x108(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r10, 0x68(%rsp)
movq %r11, 0x60(%rsp)
je 0x1e4f95
leaq 0xe0(%rsp), %rdi
movq %r8, 0x50(%rsp)
movq %rsi, 0x40(%rsp)
callq *%rax
movq 0x28(%rsp), %rdi
movq 0x60(%rsp), %r11
vmovaps 0x90(%rsp), %xmm7
movq 0x68(%rsp), %r10
movq 0x40(%rsp), %rsi
movq 0x50(%rsp), %r8
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e4feb
movq 0x10(%r12), %rax
testq %rax, %rax
je 0x1e4fe7
testb $0x2, (%r12)
jne 0x1e4fac
testb $0x40, 0x3e(%rbx)
je 0x1e4fda
leaq 0xe0(%rsp), %rdi
movq %r8, %rbx
movq %rsi, %r12
callq *%rax
movq 0x28(%rsp), %rdi
movq 0x60(%rsp), %r11
vmovaps 0x90(%rsp), %xmm7
movq 0x68(%rsp), %r10
movq %r12, %rsi
movq %rbx, %r8
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1e4feb
xorl %eax, %eax
jmp 0x1e4ffc
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r15, %r14
movb $0x1, %al
movq 0x30(%rsp), %r12
xorl %r15d, %r15d
movb 0xe(%rsp), %dl
jmp 0x1e4e3e
movq 0xa8(%rsp), %rdx
incq %rdx
movq 0xa0(%rsp), %rax
cmpq %rax, %rdx
setb %cl
jne 0x1e47f8
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %rbx
movq 0x70(%rsp), %r14
jmp 0x1e5072
testb $0x1, %dl
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %rbx
movq 0x70(%rsp), %r14
je 0x1e5072
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r15
cmpl $0x3, %r15d
jne 0x1e4599
addq $0xac8, %rsp # imm = 0xAC8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHBuilderHairMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::VirtualRecalculatePrimRef, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::OBBNodeMB_t<embree::NodeRefPtr<8>, 8>::Create, embree::OBBNodeMB_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::BVHNHairMBlurBuilderSAH<8, embree::CurveNiMB<8>, embree::LineMi<8>, embree::PointMi<8>>::build()::'lambda'(embree::SetMB const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB linfo(empty);
PrimInfoMB rinfo(empty);
unsigned int geomID = (*set.prims)[begin].geomID();
size_t center = serial_partitioning(set.prims->data(),begin,end,linfo,rinfo,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& a, const PrimRefMB& ref ) { a.add_primref(ref); });
new (&lset) SetMB(linfo,set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(rinfo,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x90(%rsi), %rax
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cfe779(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cfe74d(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cfda74(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1cfebcf(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1ee109
cmpl %ebx, 0xc(%rax)
jne 0x1ee109
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1cfeb0c(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d35e48(%rip), %xmm1 # 0x1f23f32
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1ee025
cmpq %r15, %rax
seta %bpl
ja 0x1ee5fd
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1ee25e
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1cfe9f3(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d35d0c(%rip), %xmm0 # 0x1f23f32
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1ee4c7
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1ee139
testb %bpl, %bpl
jne 0x1ee4c7
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1cfe8f3(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cfe7ea(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1ee014
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1ee4d9
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur_hair.h
|
embree::avx::BVHBuilderHairMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::VirtualRecalculatePrimRef, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::OBBNodeMB_t<embree::NodeRefPtr<4>, 4>::Create, embree::OBBNodeMB_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNHairMBlurBuilderSAH<4, embree::CurveNiMB<4>, embree::LineMi<4>, embree::PointMi<4>>::build()::'lambda'(embree::SetMB const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderHairMSMBlur::BuildRecord&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1ee7dd
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cfe0f0(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d334b7(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d334aa(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d32885(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1d0299c(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d10a03(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d3284b(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1ee651
retq
|
/embree[P]embree/kernels/bvh/../builders/../bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderHairMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::VirtualRecalculatePrimRef, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::OBBNodeMB_t<embree::NodeRefPtr<4>, 4>::Create, embree::OBBNodeMB_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNHairMBlurBuilderSAH<4, embree::CurveNiMB<8>, embree::LineMi<8>, embree::PointMi<8>>::build()::'lambda'(embree::SetMB const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderHairMSMBlur::BuildRecord&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1ee9d5
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cfdef8(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d332bf(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d332b2(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d3268d(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1d027a4(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d1080b(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d32653(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1ee849
retq
|
/embree[P]embree/kernels/bvh/../builders/../bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderHairMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::VirtualRecalculatePrimRef, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::OBBNodeMB_t<embree::NodeRefPtr<8>, 8>::Create, embree::OBBNodeMB_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::BVHNHairMBlurBuilderSAH<8, embree::CurveNiMB<8>, embree::LineMi<8>, embree::PointMi<8>>::build()::'lambda'(embree::SetMB const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderHairMSMBlur::BuildRecord&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1eebb7
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cfdd22(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d330e9(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d330dc(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d324b7(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1d025ce(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d10635(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d3247d(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1eea1f
retq
|
/embree[P]embree/kernels/bvh/../builders/../bvh/bvh_node_aabb_mb4d.h
|
embree::avx::SpatialBinInfo<16ul, embree::PrimRef>::best(embree::avx::SpatialBinMapping<16ul> const&, unsigned long) const
|
SpatialBinSplit<BINS> best(const SpatialBinMapping<BINS>& mapping, const size_t blocks_shift) const
{
/* sweep from right to left and compute parallel prefix of merged bounds */
vfloat4 rAreas[BINS];
vuint4 rCounts[BINS];
vuint4 count = 0; BBox3fa bx = empty; BBox3fa by = empty; BBox3fa bz = empty;
for (size_t i=BINS-1; i>0; i--)
{
count += numEnd[i];
rCounts[i] = count;
bx.extend(bounds[i][0]); rAreas[i][0] = halfArea(bx);
by.extend(bounds[i][1]); rAreas[i][1] = halfArea(by);
bz.extend(bounds[i][2]); rAreas[i][2] = halfArea(bz);
rAreas[i][3] = 0.0f;
}
/* sweep from left to right and compute SAH */
vuint4 blocks_add = (1 << blocks_shift)-1;
vuint4 ii = 1; vfloat4 vbestSAH = pos_inf; vuint4 vbestPos = 0; vuint4 vbestlCount = 0; vuint4 vbestrCount = 0;
count = 0; bx = empty; by = empty; bz = empty;
for (size_t i=1; i<BINS; i++, ii+=1)
{
count += numBegin[i-1];
bx.extend(bounds[i-1][0]); float Ax = halfArea(bx);
by.extend(bounds[i-1][1]); float Ay = halfArea(by);
bz.extend(bounds[i-1][2]); float Az = halfArea(bz);
const vfloat4 lArea = vfloat4(Ax,Ay,Az,Az);
const vfloat4 rArea = rAreas[i];
const vuint4 lCount = (count +blocks_add) >> (unsigned int)(blocks_shift);
const vuint4 rCount = (rCounts[i]+blocks_add) >> (unsigned int)(blocks_shift);
const vfloat4 sah = madd(lArea,vfloat4(lCount),rArea*vfloat4(rCount));
// const vfloat4 sah = madd(lArea,vfloat4(vint4(lCount)),rArea*vfloat4(vint4(rCount)));
const vbool4 mask = sah < vbestSAH;
vbestPos = select(mask,ii ,vbestPos);
vbestSAH = select(mask,sah,vbestSAH);
vbestlCount = select(mask,count,vbestlCount);
vbestrCount = select(mask,rCounts[i],vbestrCount);
}
/* find best dimension */
float bestSAH = inf;
int bestDim = -1;
int bestPos = 0;
unsigned int bestlCount = 0;
unsigned int bestrCount = 0;
for (int dim=0; dim<3; dim++)
{
/* ignore zero sized dimensions */
if (unlikely(mapping.invalid(dim)))
continue;
/* test if this is a better dimension */
if (vbestSAH[dim] < bestSAH && vbestPos[dim] != 0) {
bestDim = dim;
bestPos = vbestPos[dim];
bestSAH = vbestSAH[dim];
bestlCount = vbestlCount[dim];
bestrCount = vbestrCount[dim];
}
}
assert(bestSAH >= 0.0f);
/* return invalid split if no split found */
if (bestDim == -1)
return SpatialBinSplit<BINS>(inf,-1,0,mapping);
/* return best found split */
return SpatialBinSplit<BINS>(bestSAH,bestDim,bestPos,bestlCount,bestrCount,1.0f,mapping);
}
|
subq $0x218, %rsp # imm = 0x218
movq %rdi, %rax
leaq 0x5f0(%rsi), %rdi
vbroadcastss 0x1cfc94a(%rip), %xmm0 # 0x1eeba20
vbroadcastss 0x1cfdaa5(%rip), %xmm1 # 0x1eecb84
vpxor %xmm2, %xmm2, %xmm2
movl $0x7f0, %r8d # imm = 0x7F0
vmovaps %xmm1, %xmm3
vmovaps %xmm0, %xmm4
vmovaps %xmm1, %xmm5
vmovaps %xmm0, %xmm6
vpaddd (%rsi,%r8), %xmm2, %xmm2
vmovdqa %xmm2, -0x5f0(%rsp,%r8)
vminps -0x50(%rdi), %xmm0, %xmm0
vmaxps -0x40(%rdi), %xmm1, %xmm1
vsubps %xmm0, %xmm1, %xmm7
vminps -0x30(%rdi), %xmm4, %xmm4
vmaxps -0x20(%rdi), %xmm3, %xmm3
vsubps %xmm4, %xmm3, %xmm8
vinsertps $0x4c, %xmm7, %xmm8, %xmm9 # xmm9 = xmm7[1],xmm8[1],zero,zero
vshufpd $0x1, %xmm7, %xmm7, %xmm10 # xmm10 = xmm7[1,0]
vinsertps $0x9c, %xmm8, %xmm10, %xmm10 # xmm10 = xmm10[0],xmm8[2],zero,zero
vaddps %xmm10, %xmm9, %xmm11
vmulps %xmm10, %xmm9, %xmm9
vunpcklps %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
vmulps %xmm7, %xmm11, %xmm7
vaddps %xmm7, %xmm9, %xmm7
vmovlps %xmm7, -0x6f0(%rsp,%r8)
vminps -0x10(%rdi), %xmm6, %xmm6
vmaxps (%rdi), %xmm5, %xmm5
vsubps %xmm6, %xmm5, %xmm7
vmovshdup %xmm7, %xmm8 # xmm8 = xmm7[1,1,3,3]
vshufpd $0x1, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[1,0]
vaddss %xmm9, %xmm8, %xmm10
vmulss %xmm9, %xmm8, %xmm8
vmulss %xmm7, %xmm10, %xmm7
vaddss %xmm7, %xmm8, %xmm7
vmovss %xmm7, -0x6e8(%rsp,%r8)
andl $0x0, -0x6e4(%rsp,%r8)
addq $-0x10, %r8
addq $-0x60, %rdi
cmpq $0x700, %r8 # imm = 0x700
jne 0x1ef0f9
movl $0xffffffff, %edi # imm = 0xFFFFFFFF
movl %edi, %r8d
shll %cl, %r8d
notl %r8d
vmovd %r8d, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovdqa %xmm0, -0x30(%rsp)
vbroadcastss 0x1cfc852(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, -0x50(%rsp)
vpxor %xmm2, %xmm2, %xmm2
vmovdqa %xmm2, -0x80(%rsp)
vmovdqa %xmm2, -0x60(%rsp)
vmovdqa %xmm2, -0x70(%rsp)
vmovddup 0x1d31cf6(%rip), %xmm3 # xmm3 = mem[0,0]
vbroadcastss 0x1cfd989(%rip), %xmm4 # 0x1eecb84
movabsq $0x50, %r8
movl $0x600, %r9d # imm = 0x600
vmovaps -0x80(%rsp), %xmm5
vmovddup 0x1d32c67(%rip), %xmm1 # xmm1 = mem[0,0]
vmovaps %xmm1, -0x40(%rsp)
vmovaps %xmm4, %xmm9
vmovaps %xmm0, %xmm10
vmovaps %xmm4, %xmm11
vmovaps %xmm0, %xmm12
vmovaps %xmm0, %xmm13
vxorps %xmm15, %xmm15, %xmm15
vxorps %xmm6, %xmm6, %xmm6
vbroadcastss 0x1d31c7f(%rip), %xmm14 # 0x1f20ec4
vmovaps %xmm3, -0x20(%rsp)
vmovaps %xmm5, -0x10(%rsp)
vminps -0x50(%rsi,%r8), %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmaxps -0x40(%rsi,%r8), %xmm4, %xmm4
vsubps %xmm0, %xmm4, %xmm8
vminps -0x30(%rsi,%r8), %xmm10, %xmm10
vmaxps -0x20(%rsi,%r8), %xmm9, %xmm9
vminps -0x10(%rsi,%r8), %xmm12, %xmm12
vmaxps (%rsi,%r8), %xmm11, %xmm11
vsubps %xmm10, %xmm9, %xmm7
vsubps %xmm12, %xmm11, %xmm0
vmovaps %xmm6, %xmm3
vshufps $0x65, %xmm0, %xmm8, %xmm6 # xmm6 = xmm8[1,1],xmm0[2,1]
vblendps $0x2, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[1],xmm6[2,3]
vshufps $0x96, %xmm0, %xmm8, %xmm5 # xmm5 = xmm8[2,1],xmm0[1,2]
vinsertps $0x90, %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm7[2],xmm5[2,3]
vaddps %xmm5, %xmm6, %xmm1
vmulps %xmm5, %xmm6, %xmm5
vpaddd (%rsi,%r9), %xmm2, %xmm2
vshufps $0x0, %xmm0, %xmm8, %xmm0 # xmm0 = xmm8[0,0],xmm0[0,0]
vinsertps $0x10, %xmm7, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm7[0],xmm0[2,3]
vmulps %xmm1, %xmm0, %xmm0
vaddps %xmm5, %xmm0, %xmm0
vmovdqa -0x30(%rsp), %xmm7
vpaddd %xmm7, %xmm2, %xmm1
vmovd %ecx, %xmm5
vpsrld %xmm5, %xmm1, %xmm1
vmovdqa -0x4e0(%rsp,%r9), %xmm6
vpaddd %xmm6, %xmm7, %xmm7
vpsrld %xmm5, %xmm7, %xmm5
vpsrad $0x1f, %xmm1, %xmm7
vmovdqa -0x40(%rsp), %xmm8
vpand %xmm7, %xmm8, %xmm7
vpand %xmm1, %xmm14, %xmm1
vcvtdq2ps %xmm1, %xmm1
vaddps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm0
vpsrad $0x1f, %xmm5, %xmm1
vpand %xmm1, %xmm8, %xmm1
vpand %xmm5, %xmm14, %xmm5
vcvtdq2ps %xmm5, %xmm5
vaddps %xmm1, %xmm5, %xmm1
vmovaps -0x10(%rsp), %xmm5
vmulps -0x5e0(%rsp,%r9), %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vcmpltps %xmm13, %xmm0, %xmm1
vminps %xmm13, %xmm0, %xmm13
vmovaps (%rsp), %xmm0
vblendvps %xmm1, %xmm6, %xmm3, %xmm6
vmovaps -0x20(%rsp), %xmm3
vblendvps %xmm1, %xmm3, %xmm5, %xmm5
vblendvps %xmm1, %xmm2, %xmm15, %xmm15
vpsubd 0x1cfcabc(%rip), %xmm3, %xmm3 # 0x1eebe20
addq $0x60, %r8
addq $0x10, %r9
cmpq $0x5f0, %r8 # imm = 0x5F0
jne 0x1ef245
vmovaps %xmm13, -0x50(%rsp)
vmovaps %xmm5, -0x80(%rsp)
vmovaps %xmm15, -0x60(%rsp)
vmovaps %xmm6, -0x70(%rsp)
vmovss 0x1cfc687(%rip), %xmm0 # 0x1eeba20
xorl %r9d, %r9d
vxorps %xmm1, %xmm1, %xmm1
xorl %ecx, %ecx
xorl %esi, %esi
xorl %r8d, %r8d
vmovss 0x10(%rdx,%r9,4), %xmm2
vucomiss %xmm1, %xmm2
jne 0x1ef3b6
jnp 0x1ef3e1
vmovss -0x50(%rsp,%r9,4), %xmm2
vucomiss %xmm2, %xmm0
jbe 0x1ef3e1
movl -0x80(%rsp,%r9,4), %r10d
testl %r10d, %r10d
je 0x1ef3e1
movl -0x60(%rsp,%r9,4), %esi
movl -0x70(%rsp,%r9,4), %ecx
vmovaps %xmm2, %xmm0
movl %r9d, %edi
movl %r10d, %r8d
incq %r9
cmpq $0x3, %r9
jne 0x1ef3a7
xorl %r9d, %r9d
cmpl $-0x1, %edi
jne 0x1ef3fa
vmovss 0x1cfc626(%rip), %xmm0 # 0x1eeba20
cmovel %r9d, %r8d
cmovel %edi, %esi
cmovel %edi, %ecx
vmovss %xmm0, (%rax)
movl %edi, 0x4(%rax)
movl %r8d, 0x8(%rax)
movl %esi, 0xc(%rax)
movl %ecx, 0x10(%rax)
movl $0x3f800000, 0x14(%rax) # imm = 0x3F800000
vmovaps (%rdx), %xmm0
vmovaps %xmm0, 0x20(%rax)
vmovaps 0x10(%rdx), %xmm0
vmovaps %xmm0, 0x30(%rax)
vmovaps 0x20(%rdx), %xmm0
vmovaps %xmm0, 0x40(%rax)
addq $0x218, %rsp # imm = 0x218
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_spatial.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::TriangleMesh, embree::TriangleMi<4>>, embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
mvector<PrimRefMB>& prims = *set.prims;
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB left(empty);
PrimInfoMB right(empty);
unsigned int geomID = prims[begin].geomID();
size_t center = serial_partitioning(prims.data(),begin,end,left,right,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& dst, const PrimRefMB& prim ) { dst.add_primref(prim); });
new (&lset) SetMB(left, set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(right,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x90(%rsi), %rax
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cfba03(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cfb9d7(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cfacfe(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1cfbe59(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1f0e7f
cmpl %ebx, 0xc(%rax)
jne 0x1f0e7f
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1cfbd96(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d408b5(%rip), %xmm1 # 0x1f31715
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1f0d9b
cmpq %r15, %rax
seta %bpl
ja 0x1f1373
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1f0fd4
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1cfbc7d(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d40779(%rip), %xmm0 # 0x1f31715
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1f123d
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1f0eaf
testb %bpl, %bpl
jne 0x1f123d
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1cfbb7d(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cfba74(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1f0d8a
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1f124f
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::TriangleMesh, embree::TriangleMi<4>>, embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
mvector<PrimRefMB>& prims = *set.prims;
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB left(empty);
PrimInfoMB right(empty);
unsigned int geomID = prims[begin].geomID();
size_t center = serial_partitioning(prims.data(),begin,end,left,right,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& dst, const PrimRefMB& prim ) { dst.add_primref(prim); });
new (&lset) SetMB(left, set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(right,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x90(%rsi), %rax
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cfac9f(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cfac73(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cf9f9a(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1cfb0f5(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1f1be3
cmpl %ebx, 0xc(%rax)
jne 0x1f1be3
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1cfb032(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d3fb5b(%rip), %xmm1 # 0x1f3171f
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1f1aff
cmpq %r15, %rax
seta %bpl
ja 0x1f20d7
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1f1d38
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1cfaf19(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d3fa1f(%rip), %xmm0 # 0x1f3171f
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1f1fa1
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1f1c13
testb %bpl, %bpl
jne 0x1f1fa1
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1cfae19(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cfad10(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1f1aee
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1f1fb3
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::QuadMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::QuadMesh, embree::QuadMi<4>>, embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
mvector<PrimRefMB>& prims = *set.prims;
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB left(empty);
PrimInfoMB right(empty);
unsigned int geomID = prims[begin].geomID();
size_t center = serial_partitioning(prims.data(),begin,end,left,right,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& dst, const PrimRefMB& prim ) { dst.add_primref(prim); });
new (&lset) SetMB(left, set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(right,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x90(%rsi), %rax
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cf9f3b(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cf9f0f(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cf9236(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1cfa391(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1f2947
cmpl %ebx, 0xc(%rax)
jne 0x1f2947
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1cfa2ce(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d3edfb(%rip), %xmm1 # 0x1f31723
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1f2863
cmpq %r15, %rax
seta %bpl
ja 0x1f2e3b
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1f2a9c
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1cfa1b5(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d3ecbf(%rip), %xmm0 # 0x1f31723
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1f2d05
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1f2977
testb %bpl, %bpl
jne 0x1f2d05
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1cfa0b5(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cf9fac(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1f2852
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1f2d17
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::QuadMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::QuadMesh, embree::QuadMi<4>>, embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
mvector<PrimRefMB>& prims = *set.prims;
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB left(empty);
PrimInfoMB right(empty);
unsigned int geomID = prims[begin].geomID();
size_t center = serial_partitioning(prims.data(),begin,end,left,right,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& dst, const PrimRefMB& prim ) { dst.add_primref(prim); });
new (&lset) SetMB(left, set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(right,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x90(%rsi), %rax
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cf9889(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cf985d(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cf8b84(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1cf9cdf(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1f2ff9
cmpl %ebx, 0xc(%rax)
jne 0x1f2ff9
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1cf9c1c(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d3e74f(%rip), %xmm1 # 0x1f31729
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1f2f15
cmpq %r15, %rax
seta %bpl
ja 0x1f34ed
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1f314e
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1cf9b03(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d3e613(%rip), %xmm0 # 0x1f31729
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1f33b7
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1f3029
testb %bpl, %bpl
jne 0x1f33b7
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1cf9a03(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cf98fa(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1f2f04
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1f33c9
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::TriangleMesh, embree::TriangleMi<4>>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f6e35
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf5a98(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2ae5f(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2ae52(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d2a22d(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cfa344(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d083ab(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d2a1f3(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f6ca9
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::TriangleMesh, embree::TriangleMvMB<4>>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f702f
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf58a0(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2a043(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d2ac5e(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2ac51(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d2a02c(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cfa143(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d081aa(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x40(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0x60(%rsi,%rax,4)
vmovss %xmm9, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x50(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0x70(%rsi,%rax,4)
vmovss %xmm8, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0xa0(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm7, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0xd0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0xe0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f6eaa
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::TriangleMesh, embree::TriangleMi<4>>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f73e9
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf54f2(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d29c95(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d2a8b0(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2a8a3(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d29c7e(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf9d95(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d07dfc(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x80(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xa0(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0xe0(%rsi,%rax,4)
vmovss %xmm8, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x140(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0x180(%rsi,%rax,4)
vmovss %xmm7, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0x1a0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0x1c0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f7258
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::TriangleMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::TriangleMesh, embree::TriangleMi<4>>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f75cb
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf530e(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2a6d5(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2a6c8(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d29aa3(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf9bba(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d07c21(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d29a69(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f7433
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::QuadMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::QuadMesh, embree::QuadMi<4>>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f7b69
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf4d66(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d29509(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d2a124(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2a117(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d294f2(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf9609(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d07670(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x40(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0x60(%rsi,%rax,4)
vmovss %xmm9, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x50(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0x70(%rsi,%rax,4)
vmovss %xmm8, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0xa0(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm7, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0xd0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0xe0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f79e4
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::QuadMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::QuadMesh, embree::QuadMi<4>>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f7d3f
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf4b8e(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d29f55(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d29f48(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d29323(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf943a(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d074a1(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d292e9(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f7bb3
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::QuadMesh>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::QuadMesh, embree::QuadMi<4>>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f8105
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf47d4(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d29b9b(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d29b8e(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d28f69(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf9080(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d070e7(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d28f2f(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f7f6d
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::UserGeometry>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::UserGeometry, embree::Object>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f84b3
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf441a(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d297e1(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d297d4(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d28baf(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf8cc6(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d06d2d(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d28b75(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f8327
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::UserGeometry>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::UserGeometry, embree::Object>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f8697
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf4244(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d289e7(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d29602(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d295f5(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d289d0(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf8ae7(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d06b4e(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x80(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xa0(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0xe0(%rsi,%rax,4)
vmovss %xmm8, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x140(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0x180(%rsi,%rax,4)
vmovss %xmm7, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0x1a0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0x1c0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f8506
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::Instance>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::Instance, embree::InstancePrimitive>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f8c27
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf3ca6(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2906d(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d29060(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d2843b(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf8552(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d065b9(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d28401(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f8a9b
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::Instance>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::Instance, embree::InstancePrimitive>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f8fed
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf38ec(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d28cb3(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d28ca6(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d28081(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf8198(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d061ff(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d28047(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f8e55
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::RecalculatePrimRef<embree::InstanceArray>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeaf<4, embree::InstanceArray, embree::InstanceArrayPrimitive>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f91c5
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf370a(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d27ead(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d28ac8(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d28abb(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d27e96(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf7fad(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d06014(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x40(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0x60(%rsi,%rax,4)
vmovss %xmm9, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x50(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0x70(%rsi,%rax,4)
vmovss %xmm8, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0xa0(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm7, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0xd0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0xe0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f9040
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::InstanceArray>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::InstanceArray, embree::InstanceArrayPrimitive>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f957f
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf335c(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d27aff(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d2871a(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d2870d(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d27ae8(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf7bff(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d05c66(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x80(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xa0(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0xe0(%rsi,%rax,4)
vmovss %xmm8, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x140(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0x180(%rsi,%rax,4)
vmovss %xmm7, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0x1a0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0x1c0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f93ee
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::RecalculatePrimRef<embree::InstanceArray>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeaf<8, embree::InstanceArray, embree::InstanceArrayPrimitive>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f9761
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf3178(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2853f(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d28532(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d2790d(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf7a24(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d05a8b(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d278d3(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f95c9
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::GridRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::CreateMSMBlurLeafGrid<4>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f9b0f
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf2dbe(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d28185(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d28178(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d27553(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf766a(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d056d1(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d27519(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f9983
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::GridRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeafGrid<8>, embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f9cf3
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf2be8(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d2738b(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d27fa6(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d27f99(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d27374(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf748b(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d054f2(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x80(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xa0(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0xe0(%rsi,%rax,4)
vmovss %xmm8, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x140(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0x180(%rsi,%rax,4)
vmovss %xmm7, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0x1a0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0x1c0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f9b62
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<8>, embree::avx::GridRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<8>, 8>::Set, embree::avx::CreateMSMBlurLeafGrid<8>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x1f9ed5
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cf2a04(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d27dcb(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d27dbe(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d27199(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf72b0(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1d05317(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d2715f(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x40(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x80(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm10, 0x60(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0xa0(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0xe0(%rsi,%rax,4)
vmovss %xmm1, 0x100(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x140(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x180(%rsi,%rax,4)
vmovss %xmm9, 0x120(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x160(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0x1a0(%rsi,%rax,4)
vmovss %xmm8, 0x1c0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0x1e0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x1f9d3d
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<4, embree::QuadMesh, embree::QuadMv<4>>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<4, embree::QuadMesh, embree::QuadMv<4>>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<4, embree::QuadMesh, embree::QuadMv<4>>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<4, embree::QuadMesh, embree::QuadMv<4>>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f54b15(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cf0590(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cf16eb(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1fb517
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1fb517
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1fb4c8
cmpq %r14, %rdi
seta %r15b
ja 0x1fb66a
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fb65f
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fb667
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1fb566
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fb66a
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fb4b5
movq %r14, %rbx
jmp 0x1fb5c1
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<4, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<4, embree::Instance, embree::InstancePrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<4, embree::Instance, embree::InstancePrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<4, embree::Instance, embree::InstancePrimitive>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f53f1d(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cef998(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cf0af3(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1fc10f
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1fc10f
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1fc0c0
cmpq %r14, %rdi
seta %r15b
ja 0x1fc262
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fc257
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fc25f
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1fc15e
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fc262
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fc0ad
movq %r14, %rbx
jmp 0x1fc1b9
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::TriangleMesh, embree::TriangleMv<4>>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::TriangleMesh, embree::TriangleMv<4>>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::TriangleMesh, embree::TriangleMv<4>>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::TriangleMesh, embree::TriangleMv<4>>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f52d29(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cee7a4(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cef8ff(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1fd303
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1fd303
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1fd2b4
cmpq %r14, %rdi
seta %r15b
ja 0x1fd456
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fd44b
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fd453
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1fd352
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fd456
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fd2a1
movq %r14, %rbx
jmp 0x1fd3ad
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::QuadMesh, embree::QuadMv<4>>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::QuadMesh, embree::QuadMv<4>>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::QuadMesh, embree::QuadMv<4>>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::QuadMesh, embree::QuadMv<4>>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f52131(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cedbac(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1ceed07(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1fdefb
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1fdefb
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1fdeac
cmpq %r14, %rdi
seta %r15b
ja 0x1fe04e
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fe043
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fe04b
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1fdf4a
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fe04e
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fde99
movq %r14, %rbx
jmp 0x1fdfa5
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::UserGeometry, embree::Object>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::UserGeometry, embree::Object>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::UserGeometry, embree::Object>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::UserGeometry, embree::Object>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f51b35(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1ced5b0(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cee70b(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1fe4f7
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1fe4f7
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1fe4a8
cmpq %r14, %rdi
seta %r15b
ja 0x1fe64a
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fe63f
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fe647
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1fe546
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fe64a
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fe495
movq %r14, %rbx
jmp 0x1fe5a1
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f51539(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cecfb4(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cee10f(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1feaf3
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1feaf3
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1feaa4
cmpq %r14, %rdi
seta %r15b
ja 0x1fec46
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1fec3b
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1fec43
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1feb42
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1fec46
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1fea91
movq %r14, %rbx
jmp 0x1feb9d
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, 32ul>::sequential_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
std::pair<size_t,size_t> sequential_object_split(const Split& split, const PrimInfoExtRange& set, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfo local_left(empty);
PrimInfo local_right(empty);
const unsigned int splitPos = split.pos;
const unsigned int splitDim = split.dim;
const unsigned int splitDimMask = (unsigned int)1 << splitDim;
const vint4 vSplitPos(splitPos);
const vbool4 vSplitMask( (int)splitDimMask );
size_t center = serial_partitioning(prims0,
begin,end,local_left,local_right,
[&] (const PrimRef& ref) { return split.mapping.bin_unsafe(ref,vSplitPos,vSplitMask); },
[] (PrimInfo& pinfo,const PrimRef& ref) { pinfo.add_center2(ref); });
new (&lset) PrimInfoExtRange(begin,center,center,local_left);
new (&rset) PrimInfoExtRange(center,end,end,local_right);
assert(area(lset.geomBounds) >= 0.0f);
assert(area(rset.geomBounds) >= 0.0f);
return std::pair<size_t,size_t>(local_left.size(),local_right.size());
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %rbx
andq $-0x20, %rsp
subq $0x20, %rsp
movq %rcx, %r9
movq 0x40(%rdx), %r10
movb 0x4(%rsi), %cl
pushq $0x1
popq %rax
shll %cl, %eax
movq 0x48(%rdx), %rcx
vpshufd $0x0, 0x8(%rsi), %xmm8 # xmm8 = mem[0,0,0,0]
cltq
shlq $0x4, %rax
leaq 0x1f50f3d(%rip), %rdx # 0x214ff80
vmovaps (%rdx,%rax), %xmm9
movq (%rdi), %r11
movq %r10, %rdi
shlq $0x6, %rdi
addq %r11, %rdi
movq %rcx, %rbx
shlq $0x6, %rbx
addq %r11, %rbx
vbroadcastss 0x1cec9b8(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0x1cedb13(%rip), %xmm0 # 0x1eecb84
xorl %edx, %edx
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm3
vmovaps %xmm1, %xmm4
vmovaps %xmm0, %xmm5
vmovaps %xmm1, %xmm6
vmovaps %xmm0, %xmm7
xorl %eax, %eax
leaq -0x40(%rbx), %r14
cmpq %r14, %rdi
ja 0x1ff0ef
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps (%rdi), %xmm13
vmovaps 0x10(%rdi), %xmm14
vaddps %xmm14, %xmm13, %xmm12
vsubps %xmm10, %xmm12, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
je 0x1ff0ef
vminps %xmm13, %xmm4, %xmm4
vmaxps %xmm14, %xmm5, %xmm5
vminps %xmm12, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
incq %rax
addq $0x40, %rdi
cmpq %r14, %rdi
jbe 0x1ff0a0
cmpq %r14, %rdi
seta %r15b
ja 0x1ff242
vmovaps 0x20(%rsi), %xmm10
vmovaps 0x30(%rsi), %xmm11
vmovaps -0x40(%rbx), %xmm12
vmovaps -0x30(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
vtestps %xmm9, %xmm15
jne 0x1ff237
addq $-0x80, %rbx
vmaxps %xmm14, %xmm2, %xmm2
vminps %xmm12, %xmm3, %xmm3
vminps %xmm13, %xmm1, %xmm1
vmaxps %xmm13, %xmm0, %xmm0
cmpq %rbx, %rdi
seta %r15b
ja 0x1ff23f
vmovaps (%rbx), %xmm12
vmovaps 0x10(%rbx), %xmm14
vaddps %xmm14, %xmm12, %xmm13
vsubps %xmm10, %xmm13, %xmm15
vmulps %xmm11, %xmm15, %xmm15
vroundps $0x1, %xmm15, %xmm15
vcvtps2dq %xmm15, %xmm15
vpcmpgtd %xmm15, %xmm8, %xmm15
addq $-0x40, %rbx
incq %rdx
vtestps %xmm9, %xmm15
je 0x1ff13e
addq $0x40, %rbx
testb %r15b, %r15b
jne 0x1ff242
vmovaps (%rbx), %xmm10
vmovaps 0x10(%rbx), %xmm11
vminps %xmm10, %xmm4, %xmm4
vmaxps %xmm11, %xmm5, %xmm5
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmaxps %xmm10, %xmm7, %xmm7
incq %rax
vmovaps (%rdi), %xmm10
vmovaps 0x10(%rdi), %xmm11
vminps %xmm10, %xmm3, %xmm3
vmaxps %xmm11, %xmm2, %xmm2
vaddps %xmm11, %xmm10, %xmm10
vminps %xmm10, %xmm1, %xmm1
vmaxps %xmm10, %xmm0, %xmm0
incq %rdx
vmovaps (%rdi), %ymm10
movl 0x28(%rdi), %r14d
movl %r14d, 0x8(%rsp)
movq 0x20(%rdi), %r14
movq %r14, (%rsp)
vmovaps (%rbx), %ymm11
vmovaps %ymm11, (%rdi)
movl 0x28(%rbx), %r14d
movl %r14d, 0x28(%rdi)
movq 0x20(%rbx), %r14
movq %r14, 0x20(%rdi)
vmovaps %ymm10, (%rbx)
movq (%rsp), %r14
movq %r14, 0x20(%rbx)
movl 0x8(%rsp), %r14d
movl %r14d, 0x28(%rbx)
addq $0x40, %rdi
jmp 0x1ff08d
movq %r14, %rbx
jmp 0x1ff199
incq %rdx
subq %r11, %rdi
sarq $0x6, %rdi
vmovaps %xmm4, (%r9)
vmovaps %xmm5, 0x10(%r9)
vmovaps %xmm6, 0x20(%r9)
vmovaps %xmm7, 0x30(%r9)
movq %r10, 0x40(%r9)
movq %rdi, 0x48(%r9)
movq %rdi, 0x50(%r9)
vmovaps %xmm3, (%r8)
vmovaps %xmm2, 0x10(%r8)
vmovaps %xmm1, 0x20(%r8)
vmovaps %xmm0, 0x30(%r8)
movq %rdi, 0x40(%r8)
movq %rcx, 0x48(%r8)
movq %rcx, 0x50(%r8)
leaq -0x18(%rbp), %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::SubdivRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::splitByGeometry(embree::SetMB const&, embree::SetMB&, embree::SetMB&)
|
void splitByGeometry(const SetMB& set, SetMB& lset, SetMB& rset)
{
assert(set.size() > 1);
mvector<PrimRefMB>& prims = *set.prims;
const size_t begin = set.begin();
const size_t end = set.end();
PrimInfoMB left(empty);
PrimInfoMB right(empty);
unsigned int geomID = prims[begin].geomID();
size_t center = serial_partitioning(prims.data(),begin,end,left,right,
[&] ( const PrimRefMB& prim ) { return prim.geomID() == geomID; },
[ ] ( PrimInfoMB& dst, const PrimRefMB& prim ) { dst.add_primref(prim); });
new (&lset) SetMB(left, set.prims,range<size_t>(begin,center),set.time_range);
new (&rset) SetMB(right,set.prims,range<size_t>(center,end ),set.time_range);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x68, %rsp
movq %rdx, %rdi
movq 0x90(%rsi), %rax
movq 0x60(%rsi), %r9
movq 0x68(%rsi), %r8
movq 0x20(%rax), %rdx
imulq $0x50, %r9, %r10
leaq (%rdx,%r10), %rax
movl 0xc(%rdx,%r10), %ebx
imulq $0x50, %r8, %r12
addq %rdx, %r12
vpxor %xmm8, %xmm8, %xmm8
vmovss 0x1cecf1d(%rip), %xmm12 # 0x1eec714
vmovsd 0x1cecef1(%rip), %xmm0 # 0x1eec6f0
vbroadcastss 0x1cec218(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0x1ced373(%rip), %xmm1 # 0x1eecb84
xorl %r10d, %r10d
movabsq $0x1, %r14
vmovaps %xmm1, 0x20(%rsp)
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, %xmm10
vmovaps %xmm2, %xmm11
vmovaps %xmm1, %xmm9
vmovaps %xmm2, %xmm5
vmovaps %xmm1, (%rsp)
vmovaps %xmm2, -0x10(%rsp)
vmovaps %xmm1, %xmm14
vmovaps %xmm1, -0x20(%rsp)
xorl %r11d, %r11d
vmovaps %xmm2, %xmm15
vmovaps %xmm2, -0x30(%rsp)
vmovaps %xmm0, %xmm1
vmovaps %xmm12, -0x50(%rsp)
vxorps %xmm2, %xmm2, %xmm2
vmovaps %xmm2, -0x40(%rsp)
vmovaps %xmm1, -0x80(%rsp)
vmovaps %xmm9, 0x30(%rsp)
leaq -0x50(%r12), %r15
cmpq %r15, %rax
ja 0x1ff965
cmpl %ebx, 0xc(%rax)
jne 0x1ff965
vmovaps (%rax), %xmm2
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm6
vmovaps 0x30(%rax), %xmm4
vmovsd 0x40(%rax), %xmm9
vcmpltps %xmm9, %xmm12, %xmm7
vmovdqa %xmm8, %xmm1
vinsertps $0x50, %xmm9, %xmm12, %xmm8 # xmm8 = xmm12[0],xmm9[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm12[1],xmm9[2,3]
vblendvps %xmm7, %xmm8, %xmm12, %xmm12
vbroadcastss 0x1ced2b0(%rip), %xmm13 # 0x1eecb80
vmulps %xmm6, %xmm13, %xmm7
movl 0x3c(%rax), %r13d
cmpq %r13, %r11
setb %bpl
vmovd %ebp, %xmm8
vpshufd $0x50, %xmm8, %xmm8 # xmm8 = xmm8[0,0,1,1]
vpslld $0x1f, %xmm8, %xmm8
vblendvps %xmm8, %xmm9, %xmm0, %xmm0
vmulps %xmm2, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovdqa %xmm1, %xmm8
vminps %xmm2, %xmm11, %xmm11
vmaxps %xmm3, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm1
vminps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmovaps %xmm1, 0x20(%rsp)
movl 0x2c(%rax), %ebp
vpmovsxbq 0x1d4d3ea(%rip), %xmm1 # 0x1f4cd30
vpinsrq $0x1, %rbp, %xmm1, %xmm2
vminps %xmm7, %xmm15, %xmm15
cmovbeq %r13, %r11
vmaxps %xmm7, %xmm14, %xmm14
vpaddq %xmm2, %xmm8, %xmm8
addq $0x50, %rax
jmp 0x1ff881
cmpq %r15, %rax
seta %bpl
ja 0x1ffe59
vmovdqa %xmm8, -0x70(%rsp)
vmovaps %xmm0, -0x60(%rsp)
cmpl %ebx, -0x44(%r12)
vmovaps 0x30(%rsp), %xmm9
vmovaps -0x80(%rsp), %xmm1
je 0x1ffaba
movq %r10, %r12
vmovaps (%r15), %xmm2
vmovaps 0x10(%r15), %xmm3
vmovaps 0x20(%r15), %xmm4
vmovaps 0x30(%r15), %xmm6
vmovsd 0x40(%r15), %xmm7
vmovaps -0x50(%rsp), %xmm13
vcmpltps %xmm7, %xmm13, %xmm8
vmovaps %xmm5, %xmm0
vmovaps %xmm9, %xmm5
vinsertps $0x50, %xmm7, %xmm13, %xmm9 # xmm9 = xmm13[0],xmm7[1],xmm13[2,3]
vblendps $0x2, %xmm13, %xmm7, %xmm13 # xmm13 = xmm7[0],xmm13[1],xmm7[2,3]
vblendvps %xmm8, %xmm9, %xmm13, %xmm8
vmovaps %xmm8, -0x50(%rsp)
vbroadcastss 0x1ced197(%rip), %xmm13 # 0x1eecb80
vmulps %xmm4, %xmm13, %xmm8
vmulps %xmm2, %xmm13, %xmm9
vaddps %xmm8, %xmm9, %xmm8
movl 0x3c(%r15), %r10d
cmpq %r10, %r12
setb %bpl
vmovd %ebp, %xmm9
vpshufd $0x50, %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1]
vpslld $0x1f, %xmm9, %xmm9
vblendvps %xmm9, %xmm7, %xmm1, %xmm1
vmulps %xmm6, %xmm13, %xmm7
vmulps %xmm3, %xmm13, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm5, %xmm9
vmovaps %xmm0, %xmm5
vaddps %xmm7, %xmm8, %xmm7
vmovaps -0x30(%rsp), %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps -0x10(%rsp), %xmm0
vminps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vminps %xmm7, %xmm5, %xmm5
vmaxps %xmm7, %xmm9, %xmm9
movl 0x2c(%r15), %r13d
vpmovsxbq 0x1d4d2ae(%rip), %xmm0 # 0x1f4cd30
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
cmovaq %r12, %r10
leaq -0x50(%r15), %r12
cmpq %r12, %rax
seta %bpl
ja 0x1ffd23
cmpl %ebx, -0x44(%r15)
movq %r12, %r15
jne 0x1ff995
testb %bpl, %bpl
jne 0x1ffd23
vmovaps (%r15), %xmm7
vmovaps %xmm7, -0x80(%rsp)
vmovaps 0x10(%r15), %xmm2
vmovaps 0x20(%r15), %xmm3
vmovaps 0x30(%r15), %xmm4
vbroadcastss 0x1ced097(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm3, %xmm6
vmulps %xmm0, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm0, %xmm4, %xmm7
vmulps %xmm0, %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmaxps %xmm2, %xmm10, %xmm10
vmovaps 0x10(%rsp), %xmm0
vminps %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmaxps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vminps %xmm6, %xmm15, %xmm15
vmaxps %xmm6, %xmm14, %xmm14
vmovsd 0x40(%r15), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vinsertps $0x50, %xmm2, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm2[1],xmm12[2,3]
vblendps $0x2, %xmm12, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm12[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm12
movl 0x2c(%r15), %r12d
vmovq %r14, %xmm0
vmovdqa %xmm0, 0x30(%rsp)
vpinsrq $0x1, %r12, %xmm0, %xmm3
vmovdqa -0x70(%rsp), %xmm8
vpaddq %xmm3, %xmm8, %xmm8
movl 0x3c(%r15), %r12d
cmpq %r12, %r11
cmovbeq %r12, %r11
setb %bpl
vmovd %ebp, %xmm3
vpshufd $0x50, %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
vpslld $0x1f, %xmm3, %xmm3
vmovaps -0x60(%rsp), %xmm0
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x60(%rsp)
vmovaps (%rax), %xmm13
vmovaps 0x10(%rax), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x20(%rax), %xmm7
vmovaps %xmm7, -0x70(%rsp)
vmovaps 0x30(%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x40(%rax), %xmm2
vmovaps -0x50(%rsp), %xmm0
vcmpltps %xmm2, %xmm0, %xmm3
vinsertps $0x50, %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[1],xmm0[2,3]
vblendps $0x2, %xmm0, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm0[1],xmm2[2,3]
vblendvps %xmm3, %xmm4, %xmm6, %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x1cecf8e(%rip), %xmm0 # 0x1eecb80
vmulps %xmm0, %xmm7, %xmm3
vmulps %xmm0, %xmm13, %xmm4
vaddps %xmm3, %xmm4, %xmm3
movl 0x3c(%rax), %r12d
cmpq %r12, %r10
setb %bpl
vmovd %ebp, %xmm4
vpshufd $0x50, %xmm4, %xmm4 # xmm4 = xmm4[0,0,1,1]
vpslld $0x1f, %xmm4, %xmm4
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm2
vmovaps 0x50(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vminps %xmm2, %xmm5, %xmm5
vmaxps %xmm2, %xmm9, %xmm9
movl 0x2c(%rax), %r13d
vmovdqa 0x30(%rsp), %xmm0
vpinsrq $0x1, %r13, %xmm0, %xmm2
vmovaps -0x80(%rsp), %xmm3
vminps %xmm3, %xmm11, %xmm11
vmovdqa -0x40(%rsp), %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, -0x40(%rsp)
vmovaps %xmm3, (%rax)
vmovaps 0x10(%r15), %xmm2
vmovaps %xmm2, 0x10(%rax)
vmovaps 0x20(%r15), %xmm2
vmovaps %xmm2, 0x20(%rax)
vmovaps 0x30(%r15), %xmm2
vmovaps %xmm2, 0x30(%rax)
vmovss 0x40(%r15), %xmm2
vmovsd 0x40(%rax), %xmm3
vmovss %xmm2, 0x40(%rax)
vmovss 0x44(%r15), %xmm2
vmovss %xmm2, 0x44(%rax)
vmovaps -0x30(%rsp), %xmm0
vminps %xmm13, %xmm0, %xmm0
vmovaps %xmm0, -0x30(%rsp)
vmovaps %xmm13, (%r15)
vmovaps -0x20(%rsp), %xmm0
vmaxps %xmm7, %xmm0, %xmm0
vmovaps %xmm0, -0x20(%rsp)
vmovaps %xmm7, 0x10(%r15)
vmovaps -0x10(%rsp), %xmm0
vmovaps -0x70(%rsp), %xmm2
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, -0x10(%rsp)
vmovaps %xmm2, 0x20(%r15)
vmovaps (%rsp), %xmm0
vmaxps %xmm6, %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps -0x60(%rsp), %xmm0
cmovbeq %r12, %r10
vmovaps %xmm6, 0x30(%r15)
vmovsd %xmm3, 0x40(%r15)
addq $0x50, %rax
movq %r15, %r12
jmp 0x1ff870
vmovaps %xmm1, -0x80(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovdqa -0x70(%rsp), %xmm8
subq %rdx, %rax
movabsq $0x50, %rbx
cqto
idivq %rbx
vmovsd 0x88(%rsi), %xmm2
vcmpltps %xmm2, %xmm12, %xmm3
vblendps $0x2, %xmm12, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm12[1],xmm2[2,3]
vinsertps $0x50, %xmm2, %xmm12, %xmm2 # xmm2 = xmm12[0],xmm2[1],xmm12[2,3]
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq 0x90(%rsi), %rdx
vmovaps %xmm11, (%rdi)
vmovaps %xmm10, 0x10(%rdi)
vmovaps 0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x30(%rdi)
vmovaps %xmm15, 0x40(%rdi)
vmovaps %xmm14, 0x50(%rdi)
vmovdqu %xmm8, 0x68(%rdi)
movq %r11, 0x78(%rdi)
vmovlhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0]
vmovaps %xmm0, 0x80(%rdi)
movq %rdx, 0x90(%rdi)
movq %r9, 0x60(%rdi)
movq %rax, 0x68(%rdi)
vmovsd 0x88(%rsi), %xmm0
vmovaps -0x50(%rsp), %xmm3
vcmpltps %xmm0, %xmm3, %xmm1
vblendps $0x2, %xmm3, %xmm0, %xmm2 # xmm2 = xmm0[0],xmm3[1],xmm0[2,3]
vinsertps $0x50, %xmm0, %xmm3, %xmm0 # xmm0 = xmm3[0],xmm0[1],xmm3[2,3]
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
movq 0x90(%rsi), %rdx
vmovaps -0x30(%rsp), %xmm1
vmovaps %xmm1, (%rcx)
vmovaps -0x20(%rsp), %xmm1
vmovaps %xmm1, 0x10(%rcx)
vmovaps -0x10(%rsp), %xmm1
vmovaps %xmm1, 0x20(%rcx)
vmovaps (%rsp), %xmm1
vmovaps %xmm1, 0x30(%rcx)
vmovaps %xmm5, 0x40(%rcx)
vmovaps %xmm9, 0x50(%rcx)
vmovaps -0x40(%rsp), %xmm1
vmovups %xmm1, 0x68(%rcx)
vmovaps -0x80(%rsp), %xmm1
vmovlhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm0[0]
movq %r10, 0x78(%rcx)
vmovaps %xmm0, 0x80(%rcx)
movq %rdx, 0x90(%rcx)
movq %rax, 0x60(%rcx)
movq %r8, 0x68(%rcx)
addq $0x68, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps 0x30(%rsp), %xmm9
jmp 0x1ffd35
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::PrimInfoT<embree::BBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0_<embree::PrimInfoT<embree::BBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0<embree::Scene::Iterator<embree::SubdivMesh, false>, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>>, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long), embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoT<embree::BBox<embree::Vec3fa>>>&, embree::Scene::Iterator<embree::SubdivMesh, false>&, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long), unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>>, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0<embree::Scene::Iterator<embree::SubdivMesh, false>, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>>, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long), embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoT<embree::BBox<embree::Vec3fa>>>&, embree::Scene::Iterator<embree::SubdivMesh, false>&, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long, embree::range<unsigned long> const&, unsigned long), embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoT<embree::BBox<embree::Vec3fa>>>&, unsigned long, embree::Scene::Iterator<embree::SubdivMesh, false> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1BuilderSAH<4>::build()::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long)::operator()(unsigned long) const (.cold.1)
|
__forceinline const HalfEdge* getHalfEdge ( const size_t f ) const {
return &halfEdges[mesh->faceStartEdge[f]];
}
|
movq (%rdi), %rax
movq 0x278(%rax), %rax
movl (%rax,%rsi,4), %eax
shlq $0x5, %rax
addq 0x68(%rdi), %rax
movq %rax, %rcx
movq %rcx, %rdx
movslq 0xc(%rdx), %rsi
testq %rsi, %rsi
je 0x1ffea3
shlq $0x5, %rsi
leaq (%rdx,%rsi), %rdi
movslq 0x4(%rdx,%rsi), %rdx
shlq $0x5, %rdx
addq %rdi, %rdx
cmpq %rcx, %rdx
jne 0x1ffe7f
jmp 0x1ffea9
cmpb $0x1, 0x1d(%rcx)
jne 0x1ffebc
movslq 0x4(%rcx), %rdx
shlq $0x5, %rdx
addq %rdx, %rcx
cmpq %rax, %rcx
jne 0x1ffe7c
xorl %eax, %eax
retq
movb $0x1, %al
retq
|
/embree[P]embree/kernels/bvh/../common/scene_subdiv_mesh.h
|
embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0_<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0<embree::Scene::Iterator<embree::SubdivMesh, true>, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long), embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&, embree::Scene::Iterator<embree::SubdivMesh, true>&, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long), unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> embree::parallel_for_for_prefix_sum0<embree::Scene::Iterator<embree::SubdivMesh, true>, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long), embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&, embree::Scene::Iterator<embree::SubdivMesh, true>&, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long, embree::range<unsigned long> const&, unsigned long), embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&)>(embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&, unsigned long, embree::Scene::Iterator<embree::SubdivMesh, true> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long) const&, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::countSubPatches(unsigned long&, unsigned long&, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&) const&)::'lambda'(unsigned long)::operator()(unsigned long) const (.cold.1)
|
__forceinline const HalfEdge* getHalfEdge ( const size_t f ) const {
return &halfEdges[mesh->faceStartEdge[f]];
}
|
movq (%rdi), %rax
movq 0x278(%rax), %rax
movl (%rax,%rsi,4), %eax
shlq $0x5, %rax
addq (%rdx), %rax
movq %rax, %rcx
movq %rcx, %rdx
movslq 0xc(%rdx), %rsi
testq %rsi, %rsi
je 0x1fff7f
shlq $0x5, %rsi
leaq (%rdx,%rsi), %rdi
movslq 0x4(%rdx,%rsi), %rdx
shlq $0x5, %rdx
addq %rdi, %rdx
cmpq %rcx, %rdx
jne 0x1fff5b
jmp 0x1fff85
cmpb $0x1, 0x1d(%rcx)
jne 0x1fff98
movslq 0x4(%rcx), %rdx
shlq $0x5, %rdx
addq %rdx, %rcx
cmpq %rax, %rcx
jne 0x1fff58
xorl %eax, %eax
retq
movb $0x1, %al
retq
|
/embree[P]embree/kernels/bvh/../common/scene_subdiv_mesh.h
|
embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&)::operator()(embree::SubdivMesh*, embree::range<unsigned long> const&, unsigned long, unsigned long, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&) const (.cold.1)
|
__forceinline const HalfEdge* getHalfEdge ( const size_t f ) const {
return &halfEdges[mesh->faceStartEdge[f]];
}
|
movq (%rdi), %rax
movq 0x278(%rax), %rax
movl (%rax,%rsi,4), %eax
shlq $0x5, %rax
addq 0x68(%rdi), %rax
movq %rax, %rcx
movq %rcx, %rdx
movslq 0xc(%rdx), %rsi
testq %rsi, %rsi
je 0x1fffdf
movq %rsi, %rdi
shlq $0x5, %rdi
leaq (%rdx,%rdi), %r8
movslq 0x4(%rdx,%rdi), %rdx
shlq $0x5, %rdx
addq %r8, %rdx
cmpq %rcx, %rdx
jne 0x1fffb6
testl %esi, %esi
jne 0x1fffe5
cmpb $0x1, 0x1d(%rcx)
jne 0x1ffff8
movslq 0x4(%rcx), %rdx
shlq $0x5, %rdx
addq %rdx, %rcx
cmpq %rax, %rcx
jne 0x1fffb3
xorl %eax, %eax
retq
movb $0x1, %al
retq
|
/embree[P]embree/kernels/bvh/../common/scene_subdiv_mesh.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::SubdivRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::recurse(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator, bool) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x2001d2
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cec6fd(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d20ea0(%rip), %xmm1 # 0x1f20ec0
vbroadcastss 0x1d21abb(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d21aae(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d20e89(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf0fa0(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1cff007(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm7
vdivss %xmm7, %xmm0, %xmm7
vxorps %xmm1, %xmm8, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vsubss %xmm9, %xmm0, %xmm10
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmovaps -0x44(%rdx), %xmm11
vmovaps -0x34(%rdx), %xmm12
vmovaps -0x24(%rdx), %xmm13
vmulps %xmm9, %xmm13, %xmm14
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm15
vaddps %xmm15, %xmm14, %xmm14
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm10, %xmm12, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vsubss %xmm8, %xmm0, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm0, %xmm8
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm10
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vminps %xmm2, %xmm14, %xmm8
vmaxps %xmm3, %xmm9, %xmm9
vminps %xmm2, %xmm10, %xmm10
vmaxps %xmm3, %xmm7, %xmm7
vandps %xmm4, %xmm8, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm8, %xmm8
vandps %xmm4, %xmm9, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vandps %xmm4, %xmm10, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vsubps %xmm11, %xmm10, %xmm10
vandps %xmm4, %xmm7, %xmm11
vmulps %xmm5, %xmm11, %xmm11
vmovss %xmm8, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0x40(%rsi,%rax,4)
vaddps %xmm7, %xmm11, %xmm7
vextractps $0x2, %xmm8, 0x60(%rsi,%rax,4)
vmovss %xmm9, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0x50(%rsi,%rax,4)
vsubps %xmm8, %xmm10, %xmm8
vextractps $0x2, %xmm9, 0x70(%rsi,%rax,4)
vmovss %xmm8, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm8, 0xa0(%rsi,%rax,4)
vsubps %xmm9, %xmm7, %xmm7
vextractps $0x2, %xmm8, 0xc0(%rsi,%rax,4)
vmovss %xmm7, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm7, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm7, 0xd0(%rsi,%rax,4)
vmovss -0x4(%rdx), %xmm7
vmovss %xmm7, 0xe0(%rsi,%rax,4)
vmovss (%rdx), %xmm7
vcmpeqss %xmm0, %xmm7, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovss %xmm7, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x20004d
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMSMBlur::BuilderT<embree::NodeRefPtr<4>, embree::avx::SubdivRecalculatePrimRef, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator) (.cold.1)
|
__forceinline void operator() (const BuildRecord&, const BuildRecord*, NodeRef ref, NodeRecordMB4D* children, const size_t num) const
{
#if defined(DEBUG)
// check that empty children are only at the end of the child list
bool emptyChild = false;
for (size_t i=0; i<num; i++) {
emptyChild |= (children[i].ref == NodeRef::emptyNode);
assert(emptyChild == (children[i].ref == NodeRef::emptyNode));
}
#endif
if (likely(ref.isAABBNodeMB())) {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB()->set(i, children[i]);
} else {
for (size_t i=0; i<num; i++)
ref.getAABBNodeMB4D()->set(i, children[i]);
}
}
|
testb $0x1, %dil
jne 0x2003a8
andq $-0x10, %rsi
addq $0x54, %rdx
xorl %eax, %eax
vmovss 0x1cec525(%rip), %xmm0 # 0x1eec714
vbroadcastss 0x1d218ec(%rip), %xmm2 # 0x1f21ae4
vbroadcastss 0x1d218df(%rip), %xmm3 # 0x1f21ae0
vbroadcastss 0x1d20cba(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x1cf0dd1(%rip), %xmm5 # 0x1ef0fe4
vbroadcastss 0x1cfee38(%rip), %xmm6 # 0x1eff054
movq -0x54(%rdx), %rdi
movq %rdi, (%rsi,%rax,8)
vmovss (%rdx), %xmm7
vmovss -0x4(%rdx), %xmm8
vsubss %xmm8, %xmm7, %xmm9
vdivss %xmm9, %xmm0, %xmm9
vbroadcastss 0x1d20c80(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm10
vmulss %xmm10, %xmm9, %xmm10
vsubss %xmm10, %xmm0, %xmm11
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmovaps -0x44(%rdx), %xmm12
vmovaps -0x34(%rdx), %xmm13
vmovaps -0x24(%rdx), %xmm14
vmulps %xmm10, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm1
vaddps %xmm1, %xmm15, %xmm1
vmovaps -0x14(%rdx), %xmm15
vmulps %xmm10, %xmm15, %xmm10
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vsubss %xmm8, %xmm0, %xmm11
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm0, %xmm11
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm14, %xmm14
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm12
vaddps %xmm12, %xmm14, %xmm12
vmulps %xmm9, %xmm15, %xmm9
vmulps %xmm11, %xmm13, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm10, %xmm10
vminps %xmm2, %xmm12, %xmm11
vmaxps %xmm3, %xmm9, %xmm9
vandps %xmm4, %xmm1, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm1, %xmm1
vandps %xmm4, %xmm10, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm12, %xmm10, %xmm10
vandps %xmm4, %xmm11, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vsubps %xmm12, %xmm11, %xmm11
vandps %xmm4, %xmm9, %xmm12
vmulps %xmm5, %xmm12, %xmm12
vmovss %xmm1, 0x20(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0x40(%rsi,%rax,4)
vaddps %xmm12, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0x60(%rsi,%rax,4)
vmovss %xmm10, 0x30(%rsi,%rax,4)
vextractps $0x1, %xmm10, 0x50(%rsi,%rax,4)
vsubps %xmm1, %xmm11, %xmm1
vextractps $0x2, %xmm10, 0x70(%rsi,%rax,4)
vmovss %xmm1, 0x80(%rsi,%rax,4)
vextractps $0x1, %xmm1, 0xa0(%rsi,%rax,4)
vsubps %xmm10, %xmm9, %xmm9
vextractps $0x2, %xmm1, 0xc0(%rsi,%rax,4)
vmovss %xmm9, 0x90(%rsi,%rax,4)
vextractps $0x1, %xmm9, 0xb0(%rsi,%rax,4)
vextractps $0x2, %xmm9, 0xd0(%rsi,%rax,4)
vmovss %xmm8, 0xe0(%rsi,%rax,4)
vcmpeqss %xmm0, %xmm7, %xmm1
vblendvps %xmm1, %xmm6, %xmm7, %xmm1
vmovss %xmm1, 0xf0(%rsi,%rax,4)
incq %rax
addq $0x60, %rdx
cmpq %rax, %rcx
jne 0x20021c
retq
|
/embree[P]embree/kernels/bvh/bvh_node_aabb_mb4d.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<4>>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Create, embree::avx::SetBVHNBounds<4>, embree::avx::CreateMortonLeaf<4, embree::InstancePrimitive>, embree::avx::CalculateMeshBounds<embree::Instance>, embree::Scene::BuildProgressMonitorInterface>::recreateMortonCodes(embree::range<unsigned int> const&) const::'lambda0'(embree::range<unsigned int> const&)::operator()(embree::range<unsigned int> const&) const (.cold.1)
|
__forceinline QuaternionT( const T& r, const T& i, const T& j, const T& k ) : r(r), i(i), j(j), k(k) {}
|
vmovss 0x3c(%rdi), %xmm3
vmovss 0xc(%rdi), %xmm6
vmovss 0x1c(%rdi), %xmm4
vmovss 0x2c(%rdi), %xmm2
vmovaps (%rdi), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vshufps $0xe9, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,3]
vblendps $0x4, 0x10(%rdi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2],xmm1[3]
vmulss %xmm6, %xmm6, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm5
vbroadcastss 0x1d20800(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm4, %xmm10, %xmm7
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm5, %xmm7, %xmm5
vxorps %xmm2, %xmm10, %xmm10
vmulss %xmm2, %xmm10, %xmm10
vaddss %xmm5, %xmm10, %xmm11
vmulss %xmm2, %xmm3, %xmm5
vmulss %xmm4, %xmm6, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vsubss %xmm5, %xmm12, %xmm5
vmulss %xmm2, %xmm6, %xmm12
vsubss %xmm8, %xmm9, %xmm9
vmulss %xmm4, %xmm4, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm3, %xmm10
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm10, %xmm12, %xmm6
vmulss %xmm2, %xmm4, %xmm4
vaddss %xmm10, %xmm12, %xmm10
vaddss %xmm3, %xmm4, %xmm12
vsubss %xmm3, %xmm4, %xmm3
vaddss %xmm13, %xmm13, %xmm4
vaddss %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm9, %xmm7
vmulss %xmm2, %xmm2, %xmm2
vaddss %xmm7, %xmm2, %xmm7
vshufps $0x0, %xmm11, %xmm11, %xmm2 # xmm2 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps 0x1cebfb6(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x1cebf9a(%rip), %xmm11 # 0x1eec6f0
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x1cebfae(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vaddss %xmm5, %xmm5, %xmm4
vaddss %xmm12, %xmm12, %xmm5
vaddss %xmm10, %xmm10, %xmm10
vaddss %xmm3, %xmm3, %xmm12
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm10, %xmm10, %xmm4 # xmm4 = xmm10[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm1, %xmm5
vbroadcastss (%rdi), %xmm1
vmulps %xmm0, %xmm4, %xmm6
vmulps %xmm0, %xmm3, %xmm0
vaddps %xmm6, %xmm0, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x10(%rdi), %xmm1
vbroadcastss 0x14(%rdi), %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vbroadcastss 0x20(%rdi), %xmm6
vbroadcastss 0x24(%rdi), %xmm7
vbroadcastss 0x28(%rdi), %xmm8
vmulps %xmm4, %xmm8, %xmm8
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm6, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0x30(%rdi), %xmm7
vbroadcastss 0x34(%rdi), %xmm8
vbroadcastss 0x38(%rdi), %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
movq 0x58(%rsi), %rax
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm4
vminps 0x30(%rax), %xmm3, %xmm3
vmaxps 0x40(%rax), %xmm4, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm8
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm9
vbroadcastss 0x1ceb186(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1cec2dc(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm6
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm7, %xmm6
vaddps %xmm6, %xmm5, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm5, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm2
vminps %xmm2, %xmm10, %xmm5
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm6, %xmm0, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, (%rdx)
vmaxps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, (%rcx)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/math/quaternion.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<8>>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create, embree::avx::SetBVHNBounds<8>, embree::avx::CreateMortonLeaf<8, embree::InstancePrimitive>, embree::avx::CalculateMeshBounds<embree::Instance>, embree::Scene::BuildProgressMonitorInterface>::recreateMortonCodes(embree::range<unsigned int> const&) const::'lambda'(embree::range<unsigned int> const&)::operator()(embree::range<unsigned int> const&) const (.cold.1)
|
__forceinline BBox3fa bounds(size_t i) const {
assert(i == 0);
if (unlikely(gsubtype == GTY_SUBTYPE_INSTANCE_QUATERNION))
return xfmBounds(quaternionDecompositionToAffineSpace(local2world[0]),object->bounds.bounds());
return xfmBounds(local2world[0],object->bounds.bounds());
}
|
movq (%rdi), %rax
vmovss 0x3c(%rax), %xmm3
vmovss 0xc(%rax), %xmm6
vmovss 0x1c(%rax), %xmm4
vmovss 0x2c(%rax), %xmm2
vmovaps (%rax), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vshufps $0xe9, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,3]
vblendps $0x4, 0x10(%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2],xmm1[3]
vmulss %xmm6, %xmm6, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm5
vbroadcastss 0x1d20548(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm4, %xmm10, %xmm7
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm5, %xmm7, %xmm5
vxorps %xmm2, %xmm10, %xmm10
vmulss %xmm2, %xmm10, %xmm10
vaddss %xmm5, %xmm10, %xmm11
vmulss %xmm2, %xmm3, %xmm5
vmulss %xmm4, %xmm6, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vsubss %xmm5, %xmm12, %xmm5
vmulss %xmm2, %xmm6, %xmm12
vsubss %xmm8, %xmm9, %xmm9
vmulss %xmm4, %xmm4, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm3, %xmm10
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm10, %xmm12, %xmm6
vmulss %xmm2, %xmm4, %xmm4
vaddss %xmm10, %xmm12, %xmm10
vaddss %xmm3, %xmm4, %xmm12
vsubss %xmm3, %xmm4, %xmm3
vaddss %xmm13, %xmm13, %xmm4
vaddss %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm9, %xmm7
vmulss %xmm2, %xmm2, %xmm2
vaddss %xmm7, %xmm2, %xmm7
vshufps $0x0, %xmm11, %xmm11, %xmm2 # xmm2 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps 0x1cebcfe(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x1cebce2(%rip), %xmm11 # 0x1eec6f0
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x1cebcf6(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vaddss %xmm5, %xmm5, %xmm4
vaddss %xmm12, %xmm12, %xmm5
vaddss %xmm10, %xmm10, %xmm10
vaddss %xmm3, %xmm3, %xmm12
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm10, %xmm10, %xmm4 # xmm4 = xmm10[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm1, %xmm5
vbroadcastss (%rax), %xmm1
vmulps %xmm0, %xmm4, %xmm6
vmulps %xmm0, %xmm3, %xmm0
vaddps %xmm6, %xmm0, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x10(%rax), %xmm1
vbroadcastss 0x14(%rax), %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vbroadcastss 0x20(%rax), %xmm6
vbroadcastss 0x24(%rax), %xmm7
vbroadcastss 0x28(%rax), %xmm8
vmulps %xmm4, %xmm8, %xmm8
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm6, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0x30(%rax), %xmm7
vbroadcastss 0x34(%rax), %xmm8
vbroadcastss 0x38(%rax), %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
movq (%rsi), %rax
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm4
vminps 0x30(%rax), %xmm3, %xmm3
vmaxps 0x40(%rax), %xmm4, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm8
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm9
vbroadcastss 0x1ceaecf(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1cec025(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm6
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm7, %xmm6
vaddps %xmm6, %xmm5, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm5, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm2
vminps %xmm2, %xmm10, %xmm5
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm6, %xmm0, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, (%rdx)
vmaxps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, (%rcx)
retq
|
/embree[P]embree/kernels/bvh/../common/scene_instance.h
|
unsigned long embree::avx::createMortonCodeArray<embree::Instance>(embree::Instance*, embree::vector_t<embree::avx::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::avx::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)::'lambda0'(embree::range<unsigned long> const&)::operator()(embree::range<unsigned long> const&) const (.cold.1)
|
__forceinline QuaternionT( const T& r, const T& i, const T& j, const T& k ) : r(r), i(i), j(j), k(k) {}
|
vmovss 0x3c(%rdi), %xmm3
vmovss 0xc(%rdi), %xmm6
vmovss 0x1c(%rdi), %xmm4
vmovss 0x2c(%rdi), %xmm2
vmovaps (%rdi), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vshufps $0xe9, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,3]
vblendps $0x4, 0x10(%rdi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2],xmm1[3]
vmulss %xmm6, %xmm6, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm5
vbroadcastss 0x1d1fd28(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm4, %xmm10, %xmm7
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm5, %xmm7, %xmm5
vxorps %xmm2, %xmm10, %xmm10
vmulss %xmm2, %xmm10, %xmm10
vaddss %xmm5, %xmm10, %xmm11
vmulss %xmm2, %xmm3, %xmm5
vmulss %xmm4, %xmm6, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vsubss %xmm5, %xmm12, %xmm5
vmulss %xmm2, %xmm6, %xmm12
vsubss %xmm8, %xmm9, %xmm9
vmulss %xmm4, %xmm4, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm3, %xmm10
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm10, %xmm12, %xmm6
vmulss %xmm2, %xmm4, %xmm4
vaddss %xmm10, %xmm12, %xmm10
vaddss %xmm3, %xmm4, %xmm12
vsubss %xmm3, %xmm4, %xmm3
vaddss %xmm13, %xmm13, %xmm4
vaddss %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm9, %xmm7
vmulss %xmm2, %xmm2, %xmm2
vaddss %xmm7, %xmm2, %xmm7
vshufps $0x0, %xmm11, %xmm11, %xmm2 # xmm2 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps 0x1ceb4de(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x1ceb4c2(%rip), %xmm11 # 0x1eec6f0
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x1ceb4d6(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vaddss %xmm5, %xmm5, %xmm4
vaddss %xmm12, %xmm12, %xmm5
vaddss %xmm10, %xmm10, %xmm10
vaddss %xmm3, %xmm3, %xmm12
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm10, %xmm10, %xmm4 # xmm4 = xmm10[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm1, %xmm5
vbroadcastss (%rdi), %xmm1
vmulps %xmm0, %xmm4, %xmm6
vmulps %xmm0, %xmm3, %xmm0
vaddps %xmm6, %xmm0, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x10(%rdi), %xmm1
vbroadcastss 0x14(%rdi), %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vbroadcastss 0x20(%rdi), %xmm6
vbroadcastss 0x24(%rdi), %xmm7
vbroadcastss 0x28(%rdi), %xmm8
vmulps %xmm4, %xmm8, %xmm8
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm6, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0x30(%rdi), %xmm7
vbroadcastss 0x34(%rdi), %xmm8
vbroadcastss 0x38(%rdi), %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
movq 0x58(%rsi), %rax
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm4
vminps 0x30(%rax), %xmm3, %xmm3
vmaxps 0x40(%rax), %xmm4, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm8
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm9
vbroadcastss 0x1cea6ae(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1ceb804(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm6
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm7, %xmm6
vaddps %xmm6, %xmm5, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm5, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm2
vminps %xmm2, %xmm10, %xmm5
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm6, %xmm0, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, (%rdx)
vmaxps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, (%rcx)
retq
|
/embree[P]embree/kernels/builders/../common/../../common/math/quaternion.h
|
Subsets and Splits
SQL Console for LLM4Binary/decompile-bench
Filters out entries with file names ending in .cpp, providing a basic subset of the dataset that excludes C++ files.