name
string | code
string | asm
string | file
string |
|---|---|---|---|
unsigned long embree::avx::createMortonCodeArray<embree::Instance>(embree::Instance*, embree::vector_t<embree::avx::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::avx::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)::'lambda'(embree::range<unsigned long> const&, unsigned long)::operator()(embree::range<unsigned long> const&, unsigned long) const (.cold.1)
|
__forceinline QuaternionT( const T& r, const T& i, const T& j, const T& k ) : r(r), i(i), j(j), k(k) {}
|
vmovss 0x3c(%rdi), %xmm3
vmovss 0xc(%rdi), %xmm6
vmovss 0x1c(%rdi), %xmm4
vmovss 0x2c(%rdi), %xmm2
vmovaps (%rdi), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vshufps $0xe9, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,3]
vblendps $0x4, 0x10(%rdi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2],xmm1[3]
vmulss %xmm6, %xmm6, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm5
vbroadcastss 0x1d1fa73(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm4, %xmm10, %xmm7
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm5, %xmm7, %xmm5
vxorps %xmm2, %xmm10, %xmm10
vmulss %xmm2, %xmm10, %xmm10
vaddss %xmm5, %xmm10, %xmm11
vmulss %xmm2, %xmm3, %xmm5
vmulss %xmm4, %xmm6, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vsubss %xmm5, %xmm12, %xmm5
vmulss %xmm2, %xmm6, %xmm12
vsubss %xmm8, %xmm9, %xmm9
vmulss %xmm4, %xmm4, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm3, %xmm10
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm10, %xmm12, %xmm6
vmulss %xmm2, %xmm4, %xmm4
vaddss %xmm10, %xmm12, %xmm10
vaddss %xmm3, %xmm4, %xmm12
vsubss %xmm3, %xmm4, %xmm3
vaddss %xmm13, %xmm13, %xmm4
vaddss %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm9, %xmm7
vmulss %xmm2, %xmm2, %xmm2
vaddss %xmm7, %xmm2, %xmm7
vshufps $0x0, %xmm11, %xmm11, %xmm2 # xmm2 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps 0x1ceb229(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x1ceb20d(%rip), %xmm11 # 0x1eec6f0
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x1ceb221(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vaddss %xmm5, %xmm5, %xmm4
vaddss %xmm12, %xmm12, %xmm5
vaddss %xmm10, %xmm10, %xmm10
vaddss %xmm3, %xmm3, %xmm12
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm10, %xmm10, %xmm4 # xmm4 = xmm10[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm1, %xmm5
vbroadcastss (%rdi), %xmm1
vmulps %xmm0, %xmm4, %xmm6
vmulps %xmm0, %xmm3, %xmm0
vaddps %xmm6, %xmm0, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x10(%rdi), %xmm1
vbroadcastss 0x14(%rdi), %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vbroadcastss 0x20(%rdi), %xmm6
vbroadcastss 0x24(%rdi), %xmm7
vbroadcastss 0x28(%rdi), %xmm8
vmulps %xmm4, %xmm8, %xmm8
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm6, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0x30(%rdi), %xmm7
vbroadcastss 0x34(%rdi), %xmm8
vbroadcastss 0x38(%rdi), %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
movq 0x58(%rsi), %rax
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm4
vminps 0x30(%rax), %xmm3, %xmm3
vmaxps 0x40(%rax), %xmm4, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm8
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm9
vbroadcastss 0x1cea3f9(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1ceb54f(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm6
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm7, %xmm6
vaddps %xmm6, %xmm5, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm5, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm2
vminps %xmm2, %xmm10, %xmm5
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm6, %xmm0, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, (%rdx)
vmaxps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, (%rcx)
retq
|
/embree[P]embree/kernels/builders/../common/../../common/math/quaternion.h
|
unsigned long embree::avx::createMortonCodeArray<embree::Instance>(embree::Instance*, embree::vector_t<embree::avx::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::avx::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)::'lambda0'(embree::range<unsigned long> const&, unsigned long)::operator()(embree::range<unsigned long> const&, unsigned long) const (.cold.1)
|
__forceinline QuaternionT( const T& r, const T& i, const T& j, const T& k ) : r(r), i(i), j(j), k(k) {}
|
vmovss 0x3c(%rdi), %xmm3
vmovss 0xc(%rdi), %xmm6
vmovss 0x1c(%rdi), %xmm4
vmovss 0x2c(%rdi), %xmm2
vmovaps (%rdi), %xmm1
vxorps %xmm0, %xmm0, %xmm0
vshufps $0xe9, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,3]
vblendps $0x4, 0x10(%rdi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[2],xmm1[3]
vmulss %xmm6, %xmm6, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm5
vbroadcastss 0x1d1f7be(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm4, %xmm10, %xmm7
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm5, %xmm7, %xmm5
vxorps %xmm2, %xmm10, %xmm10
vmulss %xmm2, %xmm10, %xmm10
vaddss %xmm5, %xmm10, %xmm11
vmulss %xmm2, %xmm3, %xmm5
vmulss %xmm4, %xmm6, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vsubss %xmm5, %xmm12, %xmm5
vmulss %xmm2, %xmm6, %xmm12
vsubss %xmm8, %xmm9, %xmm9
vmulss %xmm4, %xmm4, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm3, %xmm10
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm10, %xmm12, %xmm6
vmulss %xmm2, %xmm4, %xmm4
vaddss %xmm10, %xmm12, %xmm10
vaddss %xmm3, %xmm4, %xmm12
vsubss %xmm3, %xmm4, %xmm3
vaddss %xmm13, %xmm13, %xmm4
vaddss %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm9, %xmm7
vmulss %xmm2, %xmm2, %xmm2
vaddss %xmm7, %xmm2, %xmm7
vshufps $0x0, %xmm11, %xmm11, %xmm2 # xmm2 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps 0x1ceaf74(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x1ceaf58(%rip), %xmm11 # 0x1eec6f0
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x1ceaf6c(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm2
vaddss %xmm5, %xmm5, %xmm4
vaddss %xmm12, %xmm12, %xmm5
vaddss %xmm10, %xmm10, %xmm10
vaddss %xmm3, %xmm3, %xmm12
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vmulps %xmm4, %xmm11, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm10, %xmm10, %xmm4 # xmm4 = xmm10[0,0,0,0]
vshufps $0x0, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm6, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm1, %xmm5
vbroadcastss (%rdi), %xmm1
vmulps %xmm0, %xmm4, %xmm6
vmulps %xmm0, %xmm3, %xmm0
vaddps %xmm6, %xmm0, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x10(%rdi), %xmm1
vbroadcastss 0x14(%rdi), %xmm7
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vbroadcastss 0x20(%rdi), %xmm6
vbroadcastss 0x24(%rdi), %xmm7
vbroadcastss 0x28(%rdi), %xmm8
vmulps %xmm4, %xmm8, %xmm8
vmulps %xmm3, %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm6, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0x30(%rdi), %xmm7
vbroadcastss 0x34(%rdi), %xmm8
vbroadcastss 0x38(%rdi), %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
movq 0x58(%rsi), %rax
vmovaps 0x10(%rax), %xmm3
vmovaps 0x20(%rax), %xmm4
vminps 0x30(%rax), %xmm3, %xmm3
vmaxps 0x40(%rax), %xmm4, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm1, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm8
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm9
vbroadcastss 0x1cea144(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1ceb29a(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm6
vaddps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm7, %xmm6
vaddps %xmm6, %xmm5, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm5, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm5, %xmm2
vminps %xmm2, %xmm10, %xmm5
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm6, %xmm0, %xmm4
vminps %xmm4, %xmm5, %xmm5
vmaxps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, (%rdx)
vmaxps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, (%rcx)
retq
nop
|
/embree[P]embree/kernels/builders/../common/../../common/math/quaternion.h
|
embree::avx::BVHNIntersector1<8, 257, false, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1388, %rsp # imm = 0x1388
movq %rdx, 0x18(%rsp)
movq %rdi, 0x10(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20210c
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x20210c
vmovaps 0x10(%rsi), %xmm2
vdpps $0x7f, %xmm2, %xmm2, %xmm5
vrsqrtss %xmm5, %xmm5, %xmm6
vmovss 0x1cead4b(%rip), %xmm3 # 0x1eec718
vmulss %xmm3, %xmm6, %xmm7
vmovss 0x1ceb1a7(%rip), %xmm4 # 0x1eecb80
vmulss %xmm4, %xmm5, %xmm5
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm7, %xmm8
vbroadcastss 0x10(%rsi), %ymm5
vmovups %ymm5, 0x180(%rsp)
vshufps $0x0, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,0]
vmovshdup %xmm6, %xmm7 # xmm7 = xmm6[1,1,3,3]
vbroadcastss 0x1d1f4a8(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm7, %xmm10, %xmm11
vxorps %xmm7, %xmm7, %xmm7
vunpckhps %xmm7, %xmm6, %xmm12 # xmm12 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovss %xmm11, %xmm7, %xmm11 # xmm11 = xmm11[0],xmm7[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm10, %xmm9, %xmm9
vinsertps $0x2a, %xmm6, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm6[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vblendvps %xmm10, %xmm11, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vbroadcastss 0x14(%rsi), %ymm11
vmovups %ymm11, 0x160(%rsp)
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm4, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm12
vmulss %xmm12, %xmm10, %xmm10
vbroadcastss 0x18(%rsi), %ymm12
vmovups %ymm12, 0x140(%rsp)
vmulss %xmm3, %xmm11, %xmm11
vsubss %xmm10, %xmm11, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm11 # xmm11 = xmm6[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
leaq 0x1a0(%rsp), %rcx
vmovss %xmm8, (%rcx)
vrsqrtss %xmm11, %xmm11, %xmm8
vmulss %xmm3, %xmm8, %xmm3
vmulss %xmm4, %xmm11, %xmm4
vmulss %xmm4, %xmm8, %xmm4
vmulss %xmm8, %xmm8, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vsubss %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vmulps %xmm6, %xmm5, %xmm4
vunpcklps %xmm4, %xmm9, %xmm5 # xmm5 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vunpcklps %xmm7, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vunpcklps %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovaps %xmm4, 0x10(%rcx)
vmovaps %xmm5, 0x20(%rcx)
vmovaps %xmm3, 0x30(%rcx)
leaq 0x1e8(%rsp), %r9
movq 0x70(%rax), %rax
vmaxss 0xc(%rsi), %xmm1, %xmm3
vbroadcastss 0x1d1f37b(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm4
vbroadcastss 0x1cef492(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm2, %xmm2
movq %rax, -0x8(%r9)
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1ceab9e(%rip), %xmm5 # 0x1eec714
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vbroadcastss (%rsi), %ymm15
vbroadcastss 0x4(%rsi), %ymm6
vbroadcastss 0x8(%rsi), %ymm14
vaddps %xmm2, %xmm4, %xmm2
xorl %r10d, %r10d
vucomiss %xmm1, %xmm2
setb %r10b
vshufps $0x0, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm7
vmovshdup %xmm2, %xmm4 # xmm4 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm8
vshufpd $0x1, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r10d
xorl %r14d, %r14d
vucomiss %xmm1, %xmm4
vinsertf128 $0x1, %xmm2, %ymm2, %ymm4
setb %r14b
shll $0x5, %r14d
orq $0x40, %r14
xorl %r13d, %r13d
vucomiss %xmm1, %xmm5
setb %r13b
shll $0x5, %r13d
orq $0x80, %r13
movq %r10, %rbp
xorq $0x20, %rbp
movq %r14, %r12
xorq $0x20, %r12
movq %r13, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm5
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm9
leaq 0x1e0(%rsp), %r11
vmovups %ymm15, 0x120(%rsp)
vmovups %ymm6, 0x20(%rsp)
vmovups %ymm14, 0x100(%rsp)
vmovups %ymm7, 0xe0(%rsp)
vmovups %ymm8, 0xc0(%rsp)
vmovups %ymm4, 0xa0(%rsp)
vmovups %ymm5, 0x80(%rsp)
vmovups %ymm9, 0x60(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
cmpq %r11, %r9
je 0x20210c
movq -0x8(%r9), %rcx
addq $-0x8, %r9
movq %rcx, %rax
andq $0xf, %rax
jne 0x201d56
vmovaps 0x40(%rcx,%r10), %ymm0
vsubps %ymm15, %ymm0, %ymm0
vmulps %ymm0, %ymm7, %ymm0
vmovaps 0x40(%rcx,%r14), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm8, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rcx,%r13), %ymm1
vsubps %ymm14, %ymm1, %ymm1
vmulps %ymm1, %ymm4, %ymm1
vmovaps 0x40(%rcx,%rbp), %ymm2
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm2, %ymm7, %ymm2
vmovaps 0x40(%rcx,%r12), %ymm3
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vminps %ymm3, %ymm2, %ymm2
vmovaps 0x40(%rcx,%rbx), %ymm3
vsubps %ymm14, %ymm3, %ymm3
vmulps %ymm3, %ymm4, %ymm3
vmaxps %ymm5, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vminps %ymm9, %ymm3, %ymm1
vminps %ymm1, %ymm2, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r15d
movb $0x1, %al
testb %al, %al
je 0x201d5f
testq %r15, %r15
je 0x201d63
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x201d68
movq %rax, %rcx
testl %edx, %edx
je 0x201c84
jmp 0x202065
cmpl $0x2, %eax
je 0x201daf
xorl %eax, %eax
jmp 0x201d11
pushq $0x6
jmp 0x201d65
pushq $0x4
popq %rdx
jmp 0x201d49
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x201daa
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x201d77
movq %r8, %rcx
jmp 0x201d49
movq %rcx, %rax
andq $-0x10, %rax
vmovaps 0x100(%rax), %ymm5
vmovups 0x140(%rsp), %ymm0
vmulps %ymm5, %ymm0, %ymm7
vmovaps 0x120(%rax), %ymm4
vmulps %ymm4, %ymm0, %ymm8
vmovaps 0x140(%rax), %ymm3
vmulps %ymm3, %ymm0, %ymm9
vmovaps 0x40(%rax), %ymm2
vmovaps 0x60(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovups %ymm0, 0x40(%rsp)
vmovaps 0xa0(%rax), %ymm6
vmovups 0x160(%rsp), %ymm12
vmulps %ymm6, %ymm12, %ymm10
vaddps %ymm7, %ymm10, %ymm10
vmovaps 0xc0(%rax), %ymm7
vmulps %ymm7, %ymm12, %ymm11
vaddps %ymm11, %ymm8, %ymm11
vmovaps 0xe0(%rax), %ymm8
vmulps %ymm8, %ymm12, %ymm12
vaddps %ymm12, %ymm9, %ymm9
vmovups 0x180(%rsp), %ymm13
vmulps %ymm2, %ymm13, %ymm12
vaddps %ymm12, %ymm10, %ymm10
vmulps %ymm1, %ymm13, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm0, %ymm13, %ymm12
vaddps %ymm12, %ymm9, %ymm9
vbroadcastss 0x1d1f060(%rip), %ymm13 # 0x1f20ec4
vandps %ymm13, %ymm10, %ymm12
vbroadcastss 0x1cef176(%rip), %ymm0 # 0x1ef0fe8
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm10, %ymm10
vandps %ymm13, %ymm11, %ymm12
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm11, %ymm11
vandps %ymm13, %ymm9, %ymm12
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm9, %ymm12
vrcpps %ymm10, %ymm9
vmulps %ymm10, %ymm9, %ymm10
vbroadcastss 0x1cea864(%rip), %ymm0 # 0x1eec714
vsubps %ymm10, %ymm0, %ymm10
vrcpps %ymm11, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vaddps %ymm10, %ymm9, %ymm9
vmulps %ymm11, %ymm13, %ymm10
vsubps %ymm10, %ymm0, %ymm10
vmulps %ymm10, %ymm13, %ymm10
vrcpps %ymm12, %ymm11
vaddps %ymm10, %ymm13, %ymm10
vmulps %ymm12, %ymm11, %ymm12
vsubps %ymm12, %ymm0, %ymm12
vmulps %ymm12, %ymm11, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm5, %ymm14, %ymm5
vaddps 0x160(%rax), %ymm5, %ymm5
vbroadcastss 0x1d1efba(%rip), %ymm0 # 0x1f20ec0
vxorps %ymm0, %ymm9, %ymm12
vmulps %ymm4, %ymm14, %ymm4
vaddps 0x180(%rax), %ymm4, %ymm4
vxorps %ymm0, %ymm10, %ymm13
vmulps %ymm3, %ymm14, %ymm3
vaddps 0x1a0(%rax), %ymm3, %ymm3
vxorps %ymm0, %ymm11, %ymm14
vmulps 0x20(%rsp), %ymm6, %ymm6
vaddps %ymm5, %ymm6, %ymm5
vmulps 0x20(%rsp), %ymm7, %ymm6
vaddps %ymm4, %ymm6, %ymm4
vmulps 0x20(%rsp), %ymm8, %ymm6
vaddps %ymm3, %ymm6, %ymm3
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm5, %ymm2, %ymm2
vmulps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm15, %ymm1
vaddps %ymm4, %ymm1, %ymm1
vmulps %ymm1, %ymm13, %ymm1
vmulps 0x40(%rsp), %ymm15, %ymm0
vaddps %ymm3, %ymm0, %ymm0
vmulps %ymm0, %ymm14, %ymm0
vaddps %ymm2, %ymm9, %ymm4
vaddps %ymm1, %ymm10, %ymm5
vaddps %ymm0, %ymm11, %ymm3
vextractf128 $0x1, %ymm4, %xmm6
vextractf128 $0x1, %ymm2, %xmm7
vpminsd %xmm6, %xmm7, %xmm8
vpminsd %xmm4, %xmm2, %xmm9
vinsertf128 $0x1, %xmm8, %ymm9, %ymm8
vextractf128 $0x1, %ymm5, %xmm9
vextractf128 $0x1, %ymm1, %xmm10
vpminsd %xmm9, %xmm10, %xmm11
vpminsd %xmm5, %xmm1, %xmm12
vinsertf128 $0x1, %xmm11, %ymm12, %ymm11
vextractf128 $0x1, %ymm3, %xmm12
vextractf128 $0x1, %ymm0, %xmm13
vpminsd %xmm12, %xmm13, %xmm14
vpminsd %xmm3, %xmm0, %xmm15
vinsertf128 $0x1, %xmm14, %ymm15, %ymm14
vmovups 0x120(%rsp), %ymm15
vmaxps %ymm14, %ymm11, %ymm11
vmovups 0x100(%rsp), %ymm14
vpmaxsd %xmm6, %xmm7, %xmm6
vmovups 0xe0(%rsp), %ymm7
vpmaxsd %xmm4, %xmm2, %xmm2
vinsertf128 $0x1, %xmm6, %ymm2, %ymm2
vmovups 0x20(%rsp), %ymm6
vpmaxsd %xmm9, %xmm10, %xmm4
vmovups 0x60(%rsp), %ymm9
vpmaxsd %xmm5, %xmm1, %xmm1
vmovups 0x80(%rsp), %ymm5
vinsertf128 $0x1, %xmm4, %ymm1, %ymm1
vpmaxsd %xmm12, %xmm13, %xmm4
vpmaxsd %xmm3, %xmm0, %xmm0
vinsertf128 $0x1, %xmm4, %ymm0, %ymm0
vmovups 0xa0(%rsp), %ymm4
vminps %ymm0, %ymm1, %ymm0
vmaxps %ymm8, %ymm5, %ymm1
vmovups 0xc0(%rsp), %ymm8
vmaxps %ymm11, %ymm1, %ymm1
vminps %ymm2, %ymm9, %ymm2
vminps %ymm0, %ymm2, %ymm0
vcmpleps %ymm0, %ymm1, %ymm0
jmp 0x201d0b
cmpl $0x6, %edx
jne 0x202103
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x10(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x1a0(%rsp), %rdi
movq 0x18(%rsp), %rdx
movq %r9, 0x40(%rsp)
vzeroupper
callq *0x8(%r8,%rax)
leaq 0x1e0(%rsp), %r11
vmovups 0x60(%rsp), %ymm9
vmovups 0x80(%rsp), %ymm5
vmovups 0xa0(%rsp), %ymm4
vmovups 0xc0(%rsp), %ymm8
vmovups 0xe0(%rsp), %ymm7
movq (%rsp), %r10
vmovups 0x100(%rsp), %ymm14
vmovups 0x20(%rsp), %ymm6
vmovups 0x120(%rsp), %ymm15
movq 0x40(%rsp), %r9
movq 0x8(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x202103
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x201c73
addq $0x1388, %rsp # imm = 0x1388
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16781328, false, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1408, %rsp # imm = 0x1408
movq %rdx, 0x18(%rsp)
movq %rdi, 0x10(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x202a31
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x202a31
vxorps %xmm12, %xmm12, %xmm12
vmovaps 0x10(%rsi), %xmm1
vdpps $0x7f, %xmm1, %xmm1, %xmm4
vrsqrtss %xmm4, %xmm4, %xmm5
vmovss 0x1cea59e(%rip), %xmm2 # 0x1eec718
vmulss %xmm2, %xmm5, %xmm6
vmovss 0x1cea9fa(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm4, %xmm4
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm5, %xmm5
vmulss %xmm5, %xmm4, %xmm4
vsubss %xmm4, %xmm6, %xmm7
vbroadcastss 0x10(%rsi), %ymm4
vmovups %ymm4, 0x200(%rsp)
vbroadcastss 0x14(%rsi), %ymm4
vmovups %ymm4, 0x1e0(%rsp)
vbroadcastss 0x18(%rsi), %ymm4
vmovups %ymm4, 0x1c0(%rsp)
vshufps $0x0, %xmm7, %xmm7, %xmm4 # xmm4 = xmm7[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm8 # xmm8 = xmm5[1,0]
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vbroadcastss 0x1d1ecde(%rip), %xmm9 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm10
vxorps %xmm6, %xmm6, %xmm6
vunpckhps %xmm6, %xmm5, %xmm11 # xmm11 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovss %xmm10, %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm9, %xmm8, %xmm8
vinsertps $0x2a, %xmm5, %xmm8, %xmm8 # xmm8 = xmm8[0],zero,xmm5[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm9
vdpps $0x7f, %xmm8, %xmm8, %xmm11
vcmpltps %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vblendvps %xmm9, %xmm10, %xmm8, %xmm8
vdpps $0x7f, %xmm8, %xmm8, %xmm9
vrsqrtss %xmm9, %xmm9, %xmm10
vmulss %xmm2, %xmm10, %xmm11
vmulss %xmm3, %xmm9, %xmm9
vmulss %xmm10, %xmm9, %xmm9
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm9, %xmm9
vsubss %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm9, %xmm8, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3]
vmulps %xmm8, %xmm10, %xmm10
vmulps %xmm5, %xmm9, %xmm9
vsubps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm9, %xmm9, %xmm10
leaq 0x220(%rsp), %rcx
vmovss %xmm7, (%rcx)
vrsqrtss %xmm10, %xmm10, %xmm7
vmulss %xmm2, %xmm7, %xmm2
vmulss %xmm3, %xmm10, %xmm3
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm7, %xmm7
vmulss %xmm7, %xmm3, %xmm3
vsubss %xmm3, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm5, %xmm4, %xmm3
vunpcklps %xmm3, %xmm8, %xmm4 # xmm4 = xmm8[0],xmm3[0],xmm8[1],xmm3[1]
vunpckhps %xmm3, %xmm8, %xmm3 # xmm3 = xmm8[2],xmm3[2],xmm8[3],xmm3[3]
vunpcklps %xmm6, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovaps %xmm3, 0x10(%rcx)
vmovaps %xmm4, 0x20(%rcx)
vmovaps %xmm2, 0x30(%rcx)
leaq 0x268(%rsp), %r9
vmaxss 0xc(%rsi), %xmm12, %xmm2
vbroadcastss 0x1d1ebd4(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1ceeceb(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
movq 0x70(%rax), %rax
vrcpps %xmm1, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vbroadcastss 0x1cea3f7(%rip), %xmm4 # 0x1eec714
vsubps %xmm1, %xmm4, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
vaddps %xmm1, %xmm3, %xmm1
xorl %r10d, %r10d
vucomiss %xmm12, %xmm1
movq %rax, -0x8(%r9)
setb %r10b
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vmovshdup %xmm1, %xmm3 # xmm3 = xmm1[1,1,3,3]
vshufps $0x55, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm10
vshufpd $0x1, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,0]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm11
shll $0x5, %r10d
xorl %r13d, %r13d
vucomiss %xmm12, %xmm3
setb %r13b
shll $0x5, %r13d
orq $0x40, %r13
xorl %r14d, %r14d
vucomiss %xmm12, %xmm4
setb %r14b
shll $0x5, %r14d
orq $0x80, %r14
movq %r10, %rbp
xorq $0x20, %rbp
movq %r13, %r12
xorq $0x20, %r12
movq %r14, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm13
leaq 0x260(%rsp), %r11
vmovups %ymm6, 0x60(%rsp)
vmovups %ymm7, 0x40(%rsp)
vmovups %ymm8, 0x20(%rsp)
vmovups %ymm9, 0x120(%rsp)
vmovups %ymm10, 0x100(%rsp)
vmovups %ymm11, 0xe0(%rsp)
vmovups %ymm12, 0xc0(%rsp)
vmovups %ymm13, 0xa0(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
cmpq %r11, %r9
je 0x202a31
movq -0x8(%r9), %rcx
addq $-0x8, %r9
testb $0x8, %cl
jne 0x202533
vmovss 0x1c(%rsi), %xmm1
movl %ecx, %edx
andl $0x7, %edx
movq %rcx, %rax
andq $-0x10, %rax
cmpq $0x3, %rdx
je 0x202588
vshufps $0x0, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmulps 0x100(%rax,%r10), %ymm0, %ymm1
vaddps 0x40(%rax,%r10), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r13), %ymm0, %ymm2
vaddps 0x40(%rax,%r13), %ymm2, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vmulps 0x100(%rax,%r14), %ymm0, %ymm3
vaddps 0x40(%rax,%r14), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmulps 0x100(%rax,%rbp), %ymm0, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vaddps 0x40(%rax,%rbp), %ymm4, %ymm3
vsubps %ymm6, %ymm3, %ymm3
vmulps 0x100(%rax,%r12), %ymm0, %ymm4
vaddps 0x40(%rax,%r12), %ymm4, %ymm4
vmulps %ymm3, %ymm9, %ymm3
vsubps %ymm7, %ymm4, %ymm4
vmulps 0x100(%rax,%rbx), %ymm0, %ymm5
vmulps %ymm4, %ymm10, %ymm4
vaddps 0x40(%rax,%rbx), %ymm5, %ymm5
vsubps %ymm8, %ymm5, %ymm5
vmulps %ymm5, %ymm11, %ymm5
vminps %ymm5, %ymm4, %ymm4
vmaxps %ymm1, %ymm12, %ymm1
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm3, %ymm13, %ymm2
vminps %ymm4, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
cmpl $0x6, %edx
je 0x202910
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
testb $0x8, %cl
jne 0x202581
testq %r15, %r15
je 0x202908
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x20293d
movq %rax, %rcx
testl %edx, %edx
je 0x20242f
jmp 0x202987
pushq $0x6
jmp 0x20290a
vmovaps 0x40(%rax), %ymm2
vmovups %ymm2, 0x160(%rsp)
vmovaps 0xa0(%rax), %ymm6
vmovaps 0xc0(%rax), %ymm14
vmovaps 0xe0(%rax), %ymm3
vmovups %ymm3, 0x140(%rsp)
vmovaps 0x100(%rax), %ymm9
vmovaps 0x120(%rax), %ymm5
vmovaps 0x140(%rax), %ymm7
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm12
vmovss 0x1cea132(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm13
vmulps 0x1c0(%rax), %ymm12, %ymm1
vmulps 0x1e0(%rax), %ymm12, %ymm4
vmulps 0x200(%rax), %ymm12, %ymm8
vmulss 0x1ce941e(%rip), %xmm13, %xmm10 # 0x1eeba24
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vinsertf128 $0x1, %xmm10, %ymm10, %ymm10
vaddps %ymm1, %ymm10, %ymm0
vmovups %ymm0, 0x80(%rsp)
vaddps %ymm4, %ymm10, %ymm0
vmovups %ymm0, 0x1a0(%rsp)
vaddps %ymm8, %ymm10, %ymm0
vmovups %ymm0, 0x180(%rsp)
vmovups 0x1c0(%rsp), %ymm0
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm5, %ymm0, %ymm4
vmulps %ymm7, %ymm0, %ymm10
vmovups 0x1e0(%rsp), %ymm0
vmulps %ymm6, %ymm0, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm0, %ymm14, %ymm11
vmovaps %ymm14, %ymm8
vaddps %ymm4, %ymm11, %ymm4
vmulps %ymm3, %ymm0, %ymm11
vaddps %ymm10, %ymm11, %ymm14
vmovups 0x200(%rsp), %ymm0
vmulps %ymm2, %ymm0, %ymm10
vaddps %ymm1, %ymm10, %ymm1
vmovaps 0x60(%rax), %ymm10
vmulps %ymm0, %ymm10, %ymm11
vaddps %ymm4, %ymm11, %ymm4
vmovaps 0x80(%rax), %ymm11
vmulps %ymm0, %ymm11, %ymm15
vaddps %ymm14, %ymm15, %ymm14
vbroadcastss 0x1d1e816(%rip), %ymm0 # 0x1f20ec4
vandps %ymm0, %ymm1, %ymm15
vbroadcastss 0x1cee92d(%rip), %ymm2 # 0x1ef0fe8
vcmpltps %ymm2, %ymm15, %ymm15
vblendvps %ymm15, %ymm2, %ymm1, %ymm1
vandps %ymm0, %ymm4, %ymm15
vcmpltps %ymm2, %ymm15, %ymm15
vblendvps %ymm15, %ymm2, %ymm4, %ymm4
vandps %ymm0, %ymm14, %ymm15
vcmpltps %ymm2, %ymm15, %ymm15
vblendvps %ymm15, %ymm2, %ymm14, %ymm3
vshufps $0x0, %xmm13, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
vmulps 0x220(%rax), %ymm12, %ymm14
vmulps 0x240(%rax), %ymm12, %ymm15
vinsertf128 $0x1, %xmm13, %ymm13, %ymm0
vmulps 0x260(%rax), %ymm12, %ymm2
vaddps %ymm0, %ymm14, %ymm12
vaddps %ymm0, %ymm15, %ymm13
vrcpps %ymm1, %ymm15
vaddps %ymm2, %ymm0, %ymm14
vmulps %ymm1, %ymm15, %ymm0
vbroadcastss 0x1ce9fef(%rip), %ymm2 # 0x1eec714
vsubps %ymm0, %ymm2, %ymm0
vmulps %ymm0, %ymm15, %ymm0
vaddps %ymm0, %ymm15, %ymm15
vrcpps %ymm4, %ymm0
vmulps %ymm0, %ymm4, %ymm1
vsubps %ymm1, %ymm2, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm1, %ymm0, %ymm4
vrcpps %ymm3, %ymm0
vmulps %ymm0, %ymm3, %ymm1
vsubps %ymm1, %ymm2, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm1, %ymm0, %ymm1
vmulps 0x20(%rsp), %ymm9, %ymm0
vaddps 0x160(%rax), %ymm0, %ymm0
vmulps 0x40(%rsp), %ymm6, %ymm2
vaddps %ymm0, %ymm2, %ymm0
vmulps 0x20(%rsp), %ymm5, %ymm2
vaddps 0x180(%rax), %ymm2, %ymm2
vmulps 0x40(%rsp), %ymm8, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x20(%rsp), %ymm7, %ymm3
vaddps 0x1a0(%rax), %ymm3, %ymm3
vmovups 0x140(%rsp), %ymm5
vmulps 0x40(%rsp), %ymm5, %ymm5
vaddps %ymm3, %ymm5, %ymm3
vmovups 0x160(%rsp), %ymm5
vmulps 0x60(%rsp), %ymm5, %ymm5
vaddps %ymm0, %ymm5, %ymm5
vmulps 0x60(%rsp), %ymm10, %ymm0
vaddps %ymm2, %ymm0, %ymm6
vmulps 0x60(%rsp), %ymm11, %ymm0
vaddps %ymm3, %ymm0, %ymm7
vmovups 0x80(%rsp), %ymm0
vsubps %ymm5, %ymm0, %ymm0
vmovups 0x1a0(%rsp), %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmovups 0x180(%rsp), %ymm3
vsubps %ymm7, %ymm3, %ymm8
vmulps %ymm0, %ymm15, %ymm3
vmulps %ymm4, %ymm2, %ymm2
vmulps %ymm1, %ymm8, %ymm0
vsubps %ymm5, %ymm12, %ymm5
vsubps %ymm6, %ymm13, %ymm6
vsubps %ymm7, %ymm14, %ymm7
vmulps %ymm5, %ymm15, %ymm5
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm1, %ymm7, %ymm1
vextractf128 $0x1, %ymm5, %xmm6
vextractf128 $0x1, %ymm3, %xmm7
vpminsd %xmm6, %xmm7, %xmm8
vpminsd %xmm5, %xmm3, %xmm9
vinsertf128 $0x1, %xmm8, %ymm9, %ymm8
vextractf128 $0x1, %ymm4, %xmm9
vextractf128 $0x1, %ymm2, %xmm10
vpminsd %xmm9, %xmm10, %xmm11
vpminsd %xmm4, %xmm2, %xmm12
vinsertf128 $0x1, %xmm11, %ymm12, %ymm11
vextractf128 $0x1, %ymm1, %xmm12
vextractf128 $0x1, %ymm0, %xmm13
vpminsd %xmm12, %xmm13, %xmm14
vpminsd %xmm1, %xmm0, %xmm15
vinsertf128 $0x1, %xmm14, %ymm15, %ymm14
vmaxps %ymm14, %ymm11, %ymm11
vpmaxsd %xmm6, %xmm7, %xmm6
vmovups 0x40(%rsp), %ymm7
vpmaxsd %xmm5, %xmm3, %xmm3
vinsertf128 $0x1, %xmm6, %ymm3, %ymm3
vmovups 0x60(%rsp), %ymm6
vpmaxsd %xmm9, %xmm10, %xmm5
vmovups 0x100(%rsp), %ymm10
vmovups 0x120(%rsp), %ymm9
vpmaxsd %xmm4, %xmm2, %xmm2
vinsertf128 $0x1, %xmm5, %ymm2, %ymm2
vpmaxsd %xmm12, %xmm13, %xmm4
vmovups 0xa0(%rsp), %ymm13
vmovups 0xc0(%rsp), %ymm12
vpmaxsd %xmm1, %xmm0, %xmm0
vinsertf128 $0x1, %xmm4, %ymm0, %ymm0
vminps %ymm0, %ymm2, %ymm0
vmaxps %ymm8, %ymm12, %ymm1
vmovups 0x20(%rsp), %ymm8
vmaxps %ymm11, %ymm1, %ymm1
vmovups 0xe0(%rsp), %ymm11
vminps %ymm3, %ymm13, %ymm2
vminps %ymm0, %ymm2, %ymm0
vcmpleps %ymm0, %ymm1, %ymm0
vmovmskps %ymm0, %r15d
jmp 0x202533
pushq $0x4
popq %rdx
jmp 0x202574
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x202522
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x20297f
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x20294c
movq %r8, %rcx
jmp 0x202574
cmpl $0x6, %edx
jne 0x202a28
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x10(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x220(%rsp), %rdi
movq 0x18(%rsp), %rdx
movq %r9, 0x80(%rsp)
vzeroupper
callq *0x8(%r8,%rax)
leaq 0x260(%rsp), %r11
vmovups 0xa0(%rsp), %ymm13
vmovups 0xc0(%rsp), %ymm12
vmovups 0xe0(%rsp), %ymm11
vmovups 0x100(%rsp), %ymm10
vmovups 0x120(%rsp), %ymm9
movq (%rsp), %r10
vmovups 0x20(%rsp), %ymm8
vmovups 0x40(%rsp), %ymm7
vmovups 0x60(%rsp), %ymm6
movq 0x80(%rsp), %r9
movq 0x8(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x202a28
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x20241e
addq $0x1408, %rsp # imm = 0x1408
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 257, true, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x13e8, %rsp # imm = 0x13E8
movq %rdx, 0x18(%rsp)
movq %rdi, 0x10(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20327b
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x20327b
vmovaps 0x10(%rsi), %xmm2
vdpps $0x7f, %xmm2, %xmm2, %xmm5
vrsqrtss %xmm5, %xmm5, %xmm6
vmovss 0x1ce9c7f(%rip), %xmm3 # 0x1eec718
vmulss %xmm3, %xmm6, %xmm7
vmovss 0x1cea0db(%rip), %xmm4 # 0x1eecb80
vmulss %xmm4, %xmm5, %xmm5
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm7, %xmm8
vbroadcastss 0x10(%rsi), %ymm5
vmovups %ymm5, 0x1e0(%rsp)
vbroadcastss 0x14(%rsi), %ymm5
vmovups %ymm5, 0x1c0(%rsp)
vbroadcastss 0x18(%rsi), %ymm5
vmovups %ymm5, 0x1a0(%rsp)
vshufps $0x0, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,0]
vmovshdup %xmm6, %xmm7 # xmm7 = xmm6[1,1,3,3]
vbroadcastss 0x1d1e3be(%rip), %xmm10 # 0x1f20ec0
vxorps %xmm7, %xmm10, %xmm11
vxorps %xmm7, %xmm7, %xmm7
vunpckhps %xmm7, %xmm6, %xmm12 # xmm12 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovss %xmm11, %xmm7, %xmm11 # xmm11 = xmm11[0],xmm7[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm10, %xmm9, %xmm9
vinsertps $0x2a, %xmm6, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm6[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vblendvps %xmm10, %xmm11, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm3, %xmm11, %xmm12
vmulss %xmm4, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm10, %xmm10
vsubss %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm11 # xmm11 = xmm6[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
leaq 0x200(%rsp), %rcx
vmovss %xmm8, (%rcx)
vrsqrtss %xmm11, %xmm11, %xmm8
vmulss %xmm3, %xmm8, %xmm3
vmulss %xmm4, %xmm11, %xmm4
vmulss %xmm4, %xmm8, %xmm4
vmulss %xmm8, %xmm8, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vsubss %xmm4, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vmulps %xmm6, %xmm5, %xmm4
vunpcklps %xmm4, %xmm9, %xmm5 # xmm5 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vunpcklps %xmm7, %xmm3, %xmm6 # xmm6 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
vunpcklps %xmm6, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovaps %xmm4, 0x10(%rcx)
vmovaps %xmm5, 0x20(%rcx)
vmovaps %xmm3, 0x30(%rcx)
leaq 0x248(%rsp), %r9
movq 0x70(%rax), %rax
vmaxss 0xc(%rsi), %xmm1, %xmm3
vbroadcastss 0x1d1e2af(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm4
vbroadcastss 0x1cee3c6(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1ce9ae4(%rip), %xmm5 # 0x1eec714
vdivps %xmm2, %xmm5, %xmm2
vbroadcastss 0x1d1e323(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm2, %xmm2
movq %rax, -0x8(%r9)
vbroadcastss 0x1d1d2c0(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm2, %xmm5
vbroadcastss 0x1d1d2b7(%rip), %xmm4 # 0x1f1ff14
vbroadcastss (%rsi), %ymm15
vbroadcastss 0x4(%rsi), %ymm7
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x8(%rsi), %ymm14
xorl %r10d, %r10d
vucomiss %xmm1, %xmm5
setb %r10b
vshufps $0x0, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm8
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vshufps $0x55, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vshufpd $0x1, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[1,0]
vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufps $0x0, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[0,0,0,0]
shll $0x5, %r10d
xorl %r14d, %r14d
vucomiss %xmm1, %xmm6
vinsertf128 $0x1, %xmm5, %ymm5, %ymm6
vshufps $0x55, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm11
setb %r14b
shll $0x5, %r14d
orq $0x40, %r14
xorl %r13d, %r13d
vucomiss %xmm1, %xmm4
setb %r13b
shll $0x5, %r13d
orq $0x80, %r13
movq %r10, %rbp
xorq $0x20, %rbp
movq %r14, %r12
xorq $0x20, %r12
movq %r13, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm12
leaq 0x240(%rsp), %r11
vmovups %ymm15, 0x180(%rsp)
vmovups %ymm7, 0x20(%rsp)
vmovups %ymm14, 0x40(%rsp)
vmovups %ymm8, 0x160(%rsp)
vmovups %ymm9, 0x140(%rsp)
vmovups %ymm10, 0x120(%rsp)
vmovups %ymm6, 0x100(%rsp)
vmovups %ymm5, 0xe0(%rsp)
vmovups %ymm11, 0xc0(%rsp)
vmovups %ymm4, 0xa0(%rsp)
vmovups %ymm12, 0x80(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
cmpq %r11, %r9
je 0x20327b
movq -0x8(%r9), %rcx
addq $-0x8, %r9
movq %rcx, %rax
andq $0xf, %rax
jne 0x202e71
vmovaps 0x40(%rcx,%r10), %ymm0
vsubps %ymm15, %ymm0, %ymm0
vmulps %ymm0, %ymm8, %ymm0
vmovaps 0x40(%rcx,%r14), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rcx,%r13), %ymm1
vsubps %ymm14, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmovaps 0x40(%rcx,%rbp), %ymm2
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm2, %ymm6, %ymm2
vmovaps 0x40(%rcx,%r12), %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm5, %ymm3
vminps %ymm3, %ymm2, %ymm2
vmovaps 0x40(%rcx,%rbx), %ymm3
vsubps %ymm14, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vminps %ymm12, %ymm3, %ymm1
vminps %ymm1, %ymm2, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r15d
movb $0x1, %al
testb %al, %al
je 0x202e7a
testq %r15, %r15
je 0x202e7e
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x202e83
movq %rax, %rcx
testl %edx, %edx
je 0x202d9f
jmp 0x2031b9
cmpl $0x2, %eax
je 0x202eca
xorl %eax, %eax
jmp 0x202e2c
pushq $0x6
jmp 0x202e80
pushq $0x4
popq %rdx
jmp 0x202e64
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x202ec5
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x202e92
movq %r8, %rcx
jmp 0x202e64
movq %rcx, %rax
andq $-0x10, %rax
vmovaps 0x100(%rax), %ymm5
vmovups 0x1a0(%rsp), %ymm0
vmulps %ymm5, %ymm0, %ymm7
vmovaps 0x120(%rax), %ymm3
vmulps %ymm3, %ymm0, %ymm8
vmovaps 0x140(%rax), %ymm4
vmulps %ymm4, %ymm0, %ymm9
vmovaps 0x40(%rax), %ymm2
vmovaps 0x60(%rax), %ymm1
vmovaps 0x80(%rax), %ymm0
vmovups %ymm0, 0x60(%rsp)
vmovaps 0xa0(%rax), %ymm6
vmovups 0x1c0(%rsp), %ymm12
vmulps %ymm6, %ymm12, %ymm10
vaddps %ymm7, %ymm10, %ymm10
vmovaps 0xc0(%rax), %ymm7
vmulps %ymm7, %ymm12, %ymm11
vaddps %ymm11, %ymm8, %ymm11
vmovaps 0xe0(%rax), %ymm8
vmulps %ymm8, %ymm12, %ymm12
vaddps %ymm12, %ymm9, %ymm9
vmovups 0x1e0(%rsp), %ymm13
vmulps %ymm2, %ymm13, %ymm12
vaddps %ymm12, %ymm10, %ymm10
vmulps %ymm1, %ymm13, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm0, %ymm13, %ymm12
vaddps %ymm12, %ymm9, %ymm9
vbroadcastss 0x1d1df45(%rip), %ymm13 # 0x1f20ec4
vandps %ymm13, %ymm10, %ymm12
vbroadcastss 0x1cee05b(%rip), %ymm0 # 0x1ef0fe8
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm10, %ymm10
vandps %ymm13, %ymm11, %ymm12
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm11, %ymm11
vandps %ymm13, %ymm9, %ymm12
vcmpltps %ymm0, %ymm12, %ymm12
vblendvps %ymm12, %ymm0, %ymm9, %ymm12
vrcpps %ymm10, %ymm9
vmulps %ymm10, %ymm9, %ymm10
vbroadcastss 0x1ce9749(%rip), %ymm0 # 0x1eec714
vsubps %ymm10, %ymm0, %ymm10
vmulps %ymm10, %ymm9, %ymm10
vaddps %ymm10, %ymm9, %ymm9
vrcpps %ymm11, %ymm10
vmulps %ymm11, %ymm10, %ymm11
vsubps %ymm11, %ymm0, %ymm11
vmulps %ymm11, %ymm10, %ymm11
vaddps %ymm11, %ymm10, %ymm10
vrcpps %ymm12, %ymm11
vmulps %ymm12, %ymm11, %ymm12
vsubps %ymm12, %ymm0, %ymm12
vmulps %ymm12, %ymm11, %ymm12
vaddps %ymm12, %ymm11, %ymm11
vbroadcastss 0x1d1deab(%rip), %ymm0 # 0x1f20ec0
vxorps %ymm0, %ymm9, %ymm12
vxorps %ymm0, %ymm10, %ymm13
vmulps %ymm5, %ymm14, %ymm5
vaddps 0x160(%rax), %ymm5, %ymm5
vxorps %ymm0, %ymm11, %ymm14
vmulps 0x40(%rsp), %ymm3, %ymm3
vaddps 0x180(%rax), %ymm3, %ymm3
vmulps 0x40(%rsp), %ymm4, %ymm4
vaddps 0x1a0(%rax), %ymm4, %ymm4
vmulps 0x20(%rsp), %ymm6, %ymm6
vaddps %ymm5, %ymm6, %ymm5
vmulps 0x20(%rsp), %ymm7, %ymm6
vaddps %ymm3, %ymm6, %ymm3
vmulps 0x20(%rsp), %ymm8, %ymm6
vaddps %ymm4, %ymm6, %ymm4
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm5, %ymm2, %ymm2
vmulps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm15, %ymm1
vaddps %ymm3, %ymm1, %ymm1
vmulps %ymm1, %ymm13, %ymm1
vmulps 0x60(%rsp), %ymm15, %ymm0
vaddps %ymm4, %ymm0, %ymm0
vmulps %ymm0, %ymm14, %ymm0
vaddps %ymm2, %ymm9, %ymm4
vaddps %ymm1, %ymm10, %ymm5
vaddps %ymm0, %ymm11, %ymm3
vextractf128 $0x1, %ymm4, %xmm6
vextractf128 $0x1, %ymm2, %xmm7
vpminsd %xmm6, %xmm7, %xmm8
vpminsd %xmm4, %xmm2, %xmm9
vinsertf128 $0x1, %xmm8, %ymm9, %ymm8
vextractf128 $0x1, %ymm5, %xmm9
vextractf128 $0x1, %ymm1, %xmm10
vpminsd %xmm9, %xmm10, %xmm11
vpminsd %xmm5, %xmm1, %xmm12
vinsertf128 $0x1, %xmm11, %ymm12, %ymm11
vextractf128 $0x1, %ymm3, %xmm12
vextractf128 $0x1, %ymm0, %xmm13
vpminsd %xmm12, %xmm13, %xmm14
vpminsd %xmm3, %xmm0, %xmm15
vinsertf128 $0x1, %xmm14, %ymm15, %ymm14
vmovups 0x180(%rsp), %ymm15
vmaxps %ymm14, %ymm11, %ymm11
vmovups 0x40(%rsp), %ymm14
vpmaxsd %xmm6, %xmm7, %xmm6
vmovups 0x20(%rsp), %ymm7
vpmaxsd %xmm4, %xmm2, %xmm2
vinsertf128 $0x1, %xmm6, %ymm2, %ymm2
vmovups 0x100(%rsp), %ymm6
vpmaxsd %xmm9, %xmm10, %xmm4
vmovups 0x120(%rsp), %ymm10
vmovups 0x140(%rsp), %ymm9
vpmaxsd %xmm5, %xmm1, %xmm1
vmovups 0xe0(%rsp), %ymm5
vinsertf128 $0x1, %xmm4, %ymm1, %ymm1
vpmaxsd %xmm12, %xmm13, %xmm4
vmovups 0x80(%rsp), %ymm12
vpmaxsd %xmm3, %xmm0, %xmm0
vinsertf128 $0x1, %xmm4, %ymm0, %ymm0
vmovups 0xa0(%rsp), %ymm4
vminps %ymm0, %ymm1, %ymm0
vmaxps %ymm8, %ymm4, %ymm1
vmovups 0x160(%rsp), %ymm8
vmaxps %ymm11, %ymm1, %ymm1
vmovups 0xc0(%rsp), %ymm11
vminps %ymm2, %ymm12, %ymm2
vminps %ymm0, %ymm2, %ymm0
vbroadcastss 0x1d1cd72(%rip), %ymm2 # 0x1f1ff10
vmulps %ymm2, %ymm1, %ymm1
vbroadcastss 0x1d1cd69(%rip), %ymm2 # 0x1f1ff14
vmulps %ymm2, %ymm0, %ymm0
vcmpleps %ymm0, %ymm1, %ymm0
jmp 0x202e26
cmpl $0x6, %edx
jne 0x203272
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x10(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x200(%rsp), %rdi
movq 0x18(%rsp), %rdx
movq %r9, 0x60(%rsp)
vzeroupper
callq *0x8(%r8,%rax)
leaq 0x240(%rsp), %r11
vmovups 0x80(%rsp), %ymm12
vmovups 0xa0(%rsp), %ymm4
vmovups 0xc0(%rsp), %ymm11
vmovups 0xe0(%rsp), %ymm5
vmovups 0x100(%rsp), %ymm6
vmovups 0x120(%rsp), %ymm10
vmovups 0x140(%rsp), %ymm9
vmovups 0x160(%rsp), %ymm8
movq (%rsp), %r10
vmovups 0x40(%rsp), %ymm14
vmovups 0x20(%rsp), %ymm7
vmovups 0x180(%rsp), %ymm15
movq 0x60(%rsp), %r9
movq 0x8(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x203272
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x202d8e
addq $0x13e8, %rsp # imm = 0x13E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16781328, true, embree::avx::VirtualCurveIntersector1>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1468, %rsp # imm = 0x1468
movq %rdx, 0x18(%rsp)
movq %rdi, 0x10(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x203c3a
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x203c3a
vxorps %xmm12, %xmm12, %xmm12
vmovaps 0x10(%rsi), %xmm1
vdpps $0x7f, %xmm1, %xmm1, %xmm4
vrsqrtss %xmm4, %xmm4, %xmm5
vmovss 0x1ce9430(%rip), %xmm2 # 0x1eec718
vmulss %xmm2, %xmm5, %xmm6
vmovss 0x1ce988c(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm4, %xmm4
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm5, %xmm5
vmulss %xmm5, %xmm4, %xmm4
vbroadcastss 0x10(%rsi), %ymm5
vmovups %ymm5, 0x260(%rsp)
vbroadcastss 0x14(%rsi), %ymm5
vmovups %ymm5, 0x240(%rsp)
vsubss %xmm4, %xmm6, %xmm7
vbroadcastss 0x18(%rsi), %ymm4
vmovups %ymm4, 0x220(%rsp)
leaq 0x280(%rsp), %rcx
vshufps $0x0, %xmm7, %xmm7, %xmm4 # xmm4 = xmm7[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm8 # xmm8 = xmm5[1,0]
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vbroadcastss 0x1d1db68(%rip), %xmm9 # 0x1f20ec0
vxorps %xmm6, %xmm9, %xmm10
vxorps %xmm6, %xmm6, %xmm6
vunpckhps %xmm6, %xmm5, %xmm11 # xmm11 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovss %xmm10, %xmm6, %xmm10 # xmm10 = xmm10[0],xmm6[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm9, %xmm8, %xmm8
vinsertps $0x2a, %xmm5, %xmm8, %xmm8 # xmm8 = xmm8[0],zero,xmm5[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm9
vdpps $0x7f, %xmm8, %xmm8, %xmm11
vcmpltps %xmm9, %xmm11, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vblendvps %xmm9, %xmm10, %xmm8, %xmm8
vdpps $0x7f, %xmm8, %xmm8, %xmm9
vmovss %xmm7, (%rcx)
vrsqrtss %xmm9, %xmm9, %xmm7
vmulss %xmm2, %xmm7, %xmm10
vmulss %xmm3, %xmm9, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vmulss %xmm7, %xmm7, %xmm7
vmulss %xmm7, %xmm9, %xmm7
vsubss %xmm7, %xmm10, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm8, %xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3]
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm5, %xmm8, %xmm8
vsubps %xmm9, %xmm8, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,2,0,3]
vdpps $0x7f, %xmm8, %xmm8, %xmm9
vrsqrtss %xmm9, %xmm9, %xmm10
vmulss %xmm2, %xmm10, %xmm2
vmulss %xmm3, %xmm9, %xmm3
vmulss %xmm3, %xmm10, %xmm3
vmulss %xmm10, %xmm10, %xmm9
vmulss %xmm3, %xmm9, %xmm3
vsubss %xmm3, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm8, %xmm2
vmulps %xmm5, %xmm4, %xmm3
vunpcklps %xmm3, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
vunpckhps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
vunpcklps %xmm6, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovaps %xmm3, 0x10(%rcx)
vmovaps %xmm4, 0x20(%rcx)
vmovaps %xmm2, 0x30(%rcx)
leaq 0x2c8(%rsp), %r9
movq 0x70(%rax), %rax
vmaxss 0xc(%rsi), %xmm12, %xmm2
movq %rax, -0x8(%r9)
vbroadcastss 0x1d1da65(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1cedb7c(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1ce929a(%rip), %xmm4 # 0x1eec714
vdivps %xmm1, %xmm4, %xmm1
vbroadcastss 0x1d1dad9(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d1ca7a(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm1, %xmm3
vbroadcastss 0x1d1ca71(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
xorl %r10d, %r10d
vucomiss %xmm12, %xmm3
setb %r10b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm11
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm13
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm14
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm15
shll $0x5, %r10d
xorl %r13d, %r13d
vucomiss %xmm12, %xmm4
setb %r13b
shll $0x5, %r13d
orq $0x40, %r13
xorl %r14d, %r14d
vucomiss %xmm12, %xmm5
setb %r14b
shll $0x5, %r14d
orq $0x80, %r14
movq %r10, %rbp
xorq $0x20, %rbp
movq %r13, %r12
xorq $0x20, %r12
movq %r14, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm5
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm12
leaq 0x2c0(%rsp), %r11
vmovups %ymm6, 0x60(%rsp)
vmovups %ymm7, 0x40(%rsp)
vmovups %ymm8, 0x20(%rsp)
vmovups %ymm9, 0x180(%rsp)
vmovups %ymm10, 0x160(%rsp)
vmovups %ymm11, 0x140(%rsp)
vmovups %ymm13, 0x120(%rsp)
vmovups %ymm14, 0x100(%rsp)
vmovups %ymm15, 0xe0(%rsp)
vmovups %ymm5, 0xc0(%rsp)
vmovups %ymm12, 0xa0(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
cmpq %r11, %r9
je 0x203c3a
movq -0x8(%r9), %rcx
addq $-0x8, %r9
testb $0x8, %cl
jne 0x2036e9
vmovss 0x1c(%rsi), %xmm2
movl %ecx, %edx
andl $0x7, %edx
movq %rcx, %rax
andq $-0x10, %rax
cmpq $0x3, %rdx
je 0x20373e
vshufps $0x0, %xmm2, %xmm2, %xmm0 # xmm0 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmulps 0x100(%rax,%r10), %ymm0, %ymm1
vaddps 0x40(%rax,%r10), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r13), %ymm0, %ymm2
vaddps 0x40(%rax,%r13), %ymm2, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vmulps 0x100(%rax,%r14), %ymm0, %ymm3
vaddps 0x40(%rax,%r14), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm1, %ymm5, %ymm1
vmulps 0x100(%rax,%rbp), %ymm0, %ymm3
vaddps 0x40(%rax,%rbp), %ymm3, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vsubps %ymm6, %ymm3, %ymm2
vmulps 0x100(%rax,%r12), %ymm0, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vaddps 0x40(%rax,%r12), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps 0x100(%rax,%rbx), %ymm0, %ymm4
vaddps 0x40(%rax,%rbx), %ymm4, %ymm4
vmulps %ymm3, %ymm14, %ymm3
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm2, %ymm12, %ymm2
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
cmpl $0x6, %edx
je 0x203afe
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
testb $0x8, %cl
jne 0x203737
testq %r15, %r15
je 0x203af6
andq $-0x10, %rcx
bsfq %r15, %rax
leaq -0x1(%r15), %rdi
xorl %edx, %edx
movq (%rcx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r15, %rdi
jne 0x203b2b
movq %rax, %rcx
testl %edx, %edx
je 0x2035e5
jmp 0x203b75
pushq $0x6
jmp 0x203af8
vmovaps 0xa0(%rax), %ymm4
vmovaps 0xc0(%rax), %ymm15
vmovaps 0xe0(%rax), %ymm1
vmovaps 0x100(%rax), %ymm10
vmovaps 0x120(%rax), %ymm6
vmovaps 0x140(%rax), %ymm3
vshufps $0x0, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm7
vmovss 0x1ce8f93(%rip), %xmm0 # 0x1eec714
vsubss %xmm2, %xmm0, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[0,0,0,0]
vmulps 0x1c0(%rax), %ymm7, %ymm8
vmulps 0x1e0(%rax), %ymm7, %ymm9
vinsertf128 $0x1, %xmm5, %ymm5, %ymm12
vmulps 0x200(%rax), %ymm7, %ymm11
vmulss 0x1ce8274(%rip), %xmm2, %xmm2 # 0x1eeba24
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vaddps %ymm2, %ymm8, %ymm0
vmovups %ymm0, 0x80(%rsp)
vaddps %ymm2, %ymm9, %ymm0
vmovups %ymm0, 0x200(%rsp)
vmulps 0x220(%rax), %ymm7, %ymm8
vmulps 0x240(%rax), %ymm7, %ymm9
vaddps %ymm2, %ymm11, %ymm11
vmulps 0x260(%rax), %ymm7, %ymm2
vaddps %ymm8, %ymm12, %ymm0
vmovups %ymm0, 0x1e0(%rsp)
vaddps %ymm9, %ymm12, %ymm0
vmovups %ymm0, 0x1c0(%rsp)
vaddps %ymm2, %ymm12, %ymm0
vmovups %ymm0, 0x1a0(%rsp)
vmovups 0x220(%rsp), %ymm0
vmulps %ymm0, %ymm10, %ymm2
vmulps %ymm6, %ymm0, %ymm12
vmulps %ymm3, %ymm0, %ymm13
vmovups 0x240(%rsp), %ymm0
vmulps %ymm4, %ymm0, %ymm14
vaddps %ymm2, %ymm14, %ymm2
vmulps %ymm0, %ymm15, %ymm14
vmovaps %ymm15, %ymm8
vaddps %ymm12, %ymm14, %ymm14
vmulps %ymm1, %ymm0, %ymm12
vmovaps %ymm1, %ymm9
vaddps %ymm13, %ymm12, %ymm15
vmovaps 0x40(%rax), %ymm12
vmovups 0x260(%rsp), %ymm0
vmulps %ymm0, %ymm12, %ymm13
vaddps %ymm2, %ymm13, %ymm2
vmovaps 0x60(%rax), %ymm13
vmulps %ymm0, %ymm13, %ymm5
vaddps %ymm5, %ymm14, %ymm5
vmovaps 0x80(%rax), %ymm14
vmulps %ymm0, %ymm14, %ymm7
vaddps %ymm7, %ymm15, %ymm7
vbroadcastss 0x1d1d62d(%rip), %ymm0 # 0x1f20ec4
vandps %ymm0, %ymm2, %ymm15
vbroadcastss 0x1ced744(%rip), %ymm1 # 0x1ef0fe8
vcmpltps %ymm1, %ymm15, %ymm15
vblendvps %ymm15, %ymm1, %ymm2, %ymm2
vandps %ymm0, %ymm5, %ymm15
vcmpltps %ymm1, %ymm15, %ymm15
vblendvps %ymm15, %ymm1, %ymm5, %ymm5
vandps %ymm0, %ymm7, %ymm15
vcmpltps %ymm1, %ymm15, %ymm15
vblendvps %ymm15, %ymm1, %ymm7, %ymm7
vrcpps %ymm2, %ymm15
vmulps %ymm2, %ymm15, %ymm2
vbroadcastss 0x1ce8e36(%rip), %ymm1 # 0x1eec714
vsubps %ymm2, %ymm1, %ymm2
vrcpps %ymm5, %ymm0
vmulps %ymm2, %ymm15, %ymm2
vaddps %ymm2, %ymm15, %ymm15
vmulps %ymm0, %ymm5, %ymm2
vsubps %ymm2, %ymm1, %ymm2
vmulps %ymm2, %ymm0, %ymm2
vrcpps %ymm7, %ymm5
vaddps %ymm2, %ymm0, %ymm2
vmulps %ymm5, %ymm7, %ymm0
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm0, %ymm5, %ymm0
vaddps %ymm0, %ymm5, %ymm5
vmulps 0x20(%rsp), %ymm10, %ymm0
vaddps 0x160(%rax), %ymm0, %ymm0
vmulps 0x20(%rsp), %ymm6, %ymm6
vaddps 0x180(%rax), %ymm6, %ymm6
vmulps 0x40(%rsp), %ymm4, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x40(%rsp), %ymm8, %ymm1
vaddps %ymm6, %ymm1, %ymm1
vmulps 0x20(%rsp), %ymm3, %ymm3
vaddps 0x1a0(%rax), %ymm3, %ymm3
vmulps 0x40(%rsp), %ymm9, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vmulps 0x60(%rsp), %ymm12, %ymm4
vaddps %ymm0, %ymm4, %ymm4
vmulps 0x60(%rsp), %ymm13, %ymm0
vaddps %ymm1, %ymm0, %ymm6
vmulps 0x60(%rsp), %ymm14, %ymm0
vaddps %ymm3, %ymm0, %ymm7
vmovups 0x80(%rsp), %ymm0
vsubps %ymm4, %ymm0, %ymm0
vmovups 0x200(%rsp), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vsubps %ymm7, %ymm11, %ymm10
vmulps %ymm0, %ymm15, %ymm3
vmulps %ymm2, %ymm1, %ymm1
vmulps %ymm5, %ymm10, %ymm0
vmovups 0x1e0(%rsp), %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmovups 0x1c0(%rsp), %ymm8
vsubps %ymm6, %ymm8, %ymm6
vmovups 0x1a0(%rsp), %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm4, %ymm15, %ymm4
vmulps %ymm2, %ymm6, %ymm6
vmulps %ymm5, %ymm7, %ymm2
vextractf128 $0x1, %ymm4, %xmm5
vextractf128 $0x1, %ymm3, %xmm7
vpminsd %xmm5, %xmm7, %xmm8
vpminsd %xmm4, %xmm3, %xmm9
vinsertf128 $0x1, %xmm8, %ymm9, %ymm8
vextractf128 $0x1, %ymm6, %xmm9
vextractf128 $0x1, %ymm1, %xmm10
vpminsd %xmm9, %xmm10, %xmm11
vpminsd %xmm6, %xmm1, %xmm12
vinsertf128 $0x1, %xmm11, %ymm12, %ymm11
vextractf128 $0x1, %ymm2, %xmm12
vextractf128 $0x1, %ymm0, %xmm13
vpminsd %xmm12, %xmm13, %xmm14
vpminsd %xmm2, %xmm0, %xmm15
vinsertf128 $0x1, %xmm14, %ymm15, %ymm14
vmovups 0xe0(%rsp), %ymm15
vmaxps %ymm14, %ymm11, %ymm11
vmovups 0x100(%rsp), %ymm14
vpmaxsd %xmm5, %xmm7, %xmm5
vmovups 0x40(%rsp), %ymm7
vpmaxsd %xmm4, %xmm3, %xmm3
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vmovups 0xc0(%rsp), %ymm5
vpmaxsd %xmm9, %xmm10, %xmm4
vmovups 0x160(%rsp), %ymm10
vmovups 0x180(%rsp), %ymm9
vpmaxsd %xmm6, %xmm1, %xmm1
vmovups 0x60(%rsp), %ymm6
vinsertf128 $0x1, %xmm4, %ymm1, %ymm1
vpmaxsd %xmm12, %xmm13, %xmm4
vmovups 0xa0(%rsp), %ymm12
vmovups 0x120(%rsp), %ymm13
vpmaxsd %xmm2, %xmm0, %xmm0
vinsertf128 $0x1, %xmm4, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmaxps %ymm8, %ymm5, %ymm1
vmovups 0x20(%rsp), %ymm8
vmaxps %ymm11, %ymm1, %ymm1
vmovups 0x140(%rsp), %ymm11
vminps %ymm3, %ymm12, %ymm2
vminps %ymm0, %ymm2, %ymm0
vbroadcastss 0x1d1c439(%rip), %ymm2 # 0x1f1ff10
vmulps %ymm2, %ymm1, %ymm1
vbroadcastss 0x1d1c430(%rip), %ymm2 # 0x1f1ff14
vmulps %ymm2, %ymm0, %ymm0
vcmpleps %ymm0, %ymm1, %ymm0
vmovmskps %ymm0, %r15d
jmp 0x2036e9
pushq $0x4
popq %rdx
jmp 0x20372a
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x2036d8
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdi, %r8
leaq -0x1(%rdi), %rax
movq (%rcx,%r8,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
andq %rdi, %rax
je 0x203b6d
movq %r8, (%r9)
addq $0x8, %r9
bsfq %rax, %r8
leaq -0x1(%rax), %rdi
jmp 0x203b3a
movq %r8, %rcx
jmp 0x20372a
cmpl $0x6, %edx
jne 0x203c31
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0x10(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x280(%rsp), %rdi
movq 0x18(%rsp), %rdx
movq %r9, 0x80(%rsp)
vzeroupper
callq *0x8(%r8,%rax)
leaq 0x2c0(%rsp), %r11
vmovups 0xa0(%rsp), %ymm12
vmovups 0xc0(%rsp), %ymm5
vmovups 0xe0(%rsp), %ymm15
vmovups 0x100(%rsp), %ymm14
vmovups 0x120(%rsp), %ymm13
vmovups 0x140(%rsp), %ymm11
vmovups 0x160(%rsp), %ymm10
vmovups 0x180(%rsp), %ymm9
movq (%rsp), %r10
vmovups 0x20(%rsp), %ymm8
vmovups 0x40(%rsp), %ymm7
vmovups 0x60(%rsp), %ymm6
movq 0x80(%rsp), %r9
movq 0x8(%rsp), %rsi
xorl %edx, %edx
testb %al, %al
je 0x203c31
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rdx
cmpl $0x3, %edx
jne 0x2035d4
addq $0x1468, %rsp # imm = 0x1468
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1498, %rsp # imm = 0x1498
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x203c80
addq $0x1498, %rsp # imm = 0x1498
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x203c6b
leaq 0x2f8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%r14), %xmm2, %xmm1
vmovaps 0x10(%r14), %xmm3
vbroadcastss 0x1d1d20d(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ced324(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1ce8a34(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %ymm6
vbroadcastss 0x4(%r14), %ymm7
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss 0x8(%r14), %ymm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rcx
xorq $0x20, %rcx
movq %r10, %r13
xorq $0x20, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x1f4c1ef(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x2f0(%rsp), %rbp
vmovups %ymm6, 0x2d0(%rsp)
vmovups %ymm7, 0x2b0(%rsp)
vmovups %ymm8, 0x290(%rsp)
vmovups %ymm9, 0x270(%rsp)
vmovups %ymm10, 0x250(%rsp)
vmovups %ymm3, 0x230(%rsp)
movq %r13, 0x58(%rsp)
vmovups %ymm4, 0x210(%rsp)
vmovups %ymm5, 0x1f0(%rsp)
cmpq %rbp, %rdi
je 0x203c6b
movq -0x8(%rdi), %rbx
addq $-0x8, %rdi
testb $0x8, %bl
jne 0x203e89
vmovaps 0x40(%rbx,%r8), %ymm0
vsubps %ymm6, %ymm0, %ymm0
vmulps %ymm0, %ymm9, %ymm0
vmovaps 0x40(%rbx,%r9), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r10), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r11), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%rbx,%rcx), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%rbx,%r13), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %bl
jne 0x203ecf
testq %r12, %r12
je 0x203ed3
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rsi
xorl %r15d, %r15d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rsi
jne 0x203ed9
movq %rax, %rbx
testl %r15d, %r15d
je 0x203e08
jmp 0x203f25
pushq $0x6
jmp 0x203ed5
pushq $0x4
popq %r15
jmp 0x203ec4
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rsi, %rax
je 0x203f1a
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x203eeb
movq %rcx, %rbx
movq %r15, %rcx
xorl %r15d, %r15d
jmp 0x203ec4
cmpl $0x6, %r15d
jne 0x2044dd
movl %ebx, %esi
andl $0xf, %esi
addq $-0x8, %rsi
setne %r15b
je 0x2044da
andq $-0x10, %rbx
xorl %ebp, %ebp
imulq $0xb0, %rbp, %r13
vmovaps 0x80(%rbx,%r13), %xmm9
vmovaps 0x40(%rbx,%r13), %xmm6
vmulps %xmm6, %xmm9, %xmm0
vmovaps 0x70(%rbx,%r13), %xmm10
vmovaps 0x50(%rbx,%r13), %xmm7
vmulps %xmm7, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps 0x60(%rbx,%r13), %xmm11
vmulps %xmm7, %xmm11, %xmm1
vmovaps (%rbx,%r13), %xmm3
vmovaps 0x10(%rbx,%r13), %xmm13
vmovaps 0x20(%rbx,%r13), %xmm0
vmovaps 0x30(%rbx,%r13), %xmm8
vmulps %xmm8, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm5
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm6, %xmm11, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vbroadcastss (%r14), %xmm12
vsubps %xmm12, %xmm3, %xmm2
vbroadcastss 0x4(%r14), %xmm12
vsubps %xmm12, %xmm13, %xmm3
vbroadcastss 0x8(%r14), %xmm12
vsubps %xmm12, %xmm0, %xmm1
vbroadcastss 0x14(%r14), %xmm12
vbroadcastss 0x18(%r14), %xmm13
vmulps %xmm1, %xmm12, %xmm14
vmulps %xmm3, %xmm13, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x10(%r14), %xmm15
vmulps %xmm2, %xmm13, %xmm0
vmovaps %xmm1, 0x60(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm3, 0x40(%rsp)
vmulps %xmm3, %xmm15, %xmm1
vmovaps %xmm2, 0x70(%rsp)
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm4, 0x20(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vmovaps (%rsp), %xmm13
vmovaps %xmm5, 0x30(%rsp)
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm15, %xmm13, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm14, %xmm11, %xmm10
vaddps %xmm9, %xmm10, %xmm10
vmulps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmovddup 0x1d1cef3(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps %xmm14, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d1ce1b(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm6
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x120(%rsp), %xmm10
jne 0x2040e4
incq %rbp
cmpq %rsi, %rbp
setb %r15b
jne 0x203f48
jmp 0x204485
vandps 0x120(%rsp), %xmm10, %xmm10
vmovaps 0x60(%rsp), %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm0
vmovaps 0x40(%rsp), %xmm1
vmulps 0x30(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x70(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm4
vtestps %xmm10, %xmm4
je 0x2040cf
addq %rbx, %r13
vandps %xmm4, %xmm10, %xmm0
vmovaps %xmm7, 0x130(%rsp)
vmovaps %xmm8, 0x140(%rsp)
vmovaps %xmm3, 0x150(%rsp)
vmovaps %xmm6, 0x160(%rsp)
vmovaps %xmm0, 0x180(%rsp)
vmovaps %xmm13, 0x1c0(%rsp)
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm1, 0x1d0(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x1e0(%rsp)
movq (%rdx), %rax
movq %rax, 0x70(%rsp)
vrcpps %xmm6, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vbroadcastss 0x1ce8560(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x150(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1b0(%rsp)
vmulps 0x130(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x190(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vmovmskps %xmm0, %eax
movq %rcx, 0x10(%rsp)
movq %rax, (%rsp)
bsfq %rax, %rcx
movq %rcx, 0x20(%rsp)
movl 0x90(%r13,%rcx,4), %eax
movq 0x70(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x40(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x30(%rsp)
testl %ecx, 0x34(%rax)
je 0x20425c
movq 0x10(%rdx), %rax
movq %rax, 0x60(%rsp)
cmpq $0x0, 0x10(%rax)
movq 0x10(%rsp), %rcx
jne 0x20428e
movq 0x30(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x20428e
xorl %eax, %eax
jmp 0x204274
movq 0x20(%rsp), %rax
movq (%rsp), %rcx
btcq %rax, %rcx
movq %rcx, (%rsp)
movb $0x1, %al
movq 0x10(%rsp), %rcx
testb %al, %al
je 0x2044ec
movq (%rsp), %rax
testq %rax, %rax
jne 0x2041ff
jmp 0x2040cf
movq %rsi, 0x88(%rsp)
movq %r11, 0x90(%rsp)
movq %r10, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movq %r8, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
movq 0x20(%rsp), %rsi
vmovss 0x190(%rsp,%rsi,4), %xmm0
vmovss 0x1a0(%rsp,%rsi,4), %xmm1
movq %rdx, 0xb8(%rsp)
movq 0x8(%rdx), %rcx
movl 0xa0(%r13,%rsi,4), %edx
vmovss 0x1c0(%rsp,%rsi,4), %xmm2
vmovss 0x1d0(%rsp,%rsi,4), %xmm3
vmovss 0x1e0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edx, 0xd4(%rsp)
movq 0x40(%rsp), %rax
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x40(%rsp)
vmovss 0x1b0(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x30(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %r14, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x2043e3
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x204424
movq 0x60(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x204420
movq 0x60(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x204406
movq 0x30(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x204413
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x204424
xorl %eax, %eax
jmp 0x204443
vmovss 0x40(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq (%rsp), %rax
movq 0x20(%rsp), %rcx
btcq %rcx, %rax
movq %rax, (%rsp)
movb $0x1, %al
movq 0xb8(%rsp), %rdx
movq 0xb0(%rsp), %rdi
movq 0xa8(%rsp), %r8
movq 0xa0(%rsp), %r9
movq 0x98(%rsp), %r10
movq 0x90(%rsp), %r11
movq 0x10(%rsp), %rcx
movq 0x88(%rsp), %rsi
jmp 0x204274
vmovups 0x2d0(%rsp), %ymm6
vmovups 0x2b0(%rsp), %ymm7
vmovups 0x290(%rsp), %ymm8
vmovups 0x270(%rsp), %ymm9
vmovups 0x250(%rsp), %ymm10
vmovups 0x230(%rsp), %ymm3
movq 0x58(%rsp), %r13
vmovups 0x210(%rsp), %ymm4
vmovups 0x1f0(%rsp), %ymm5
leaq 0x2f0(%rsp), %rbp
xorl %r15d, %r15d
cmpl $0x3, %r15d
jne 0x203df7
jmp 0x203c6b
testb $0x1, %r15b
vmovups 0x2d0(%rsp), %ymm6
vmovups 0x2b0(%rsp), %ymm7
vmovups 0x290(%rsp), %ymm8
vmovups 0x270(%rsp), %ymm9
vmovups 0x250(%rsp), %ymm10
vmovups 0x230(%rsp), %ymm3
movq 0x58(%rsp), %r13
vmovups 0x210(%rsp), %ymm4
vmovups 0x1f0(%rsp), %ymm5
leaq 0x2f0(%rsp), %rbp
movl $0x0, %r15d
je 0x2044dd
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %r15
jmp 0x2044dd
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMvIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1558, %rsp # imm = 0x1558
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x2059ea
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x2059ea
leaq 0x3b8(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d1bf69(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1cec080(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1ce779e(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d1bfdd(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d1af7e(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm5
vmovups %ymm5, 0x390(%rsp)
vbroadcastss 0x8(%rsi), %ymm5
vmovups %ymm5, 0x370(%rsp)
vbroadcastss 0x1d1af52(%rip), %xmm5 # 0x1f1ff14
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vmulps %xmm5, %xmm3, %xmm5
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x350(%rsp)
vmovshdup %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x330(%rsp)
vshufpd $0x1, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x310(%rsp)
vshufps $0x0, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x2f0(%rsp)
shll $0x5, %r9d
xorl %r10d, %r10d
vucomiss %xmm2, %xmm6
vshufps $0x55, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x2d0(%rsp)
vshufps $0xaa, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x2b0(%rsp)
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r11d, %r11d
vucomiss %xmm2, %xmm3
setb %r11b
shll $0x5, %r11d
orq $0x80, %r11
movq %r9, %r14
xorq $0x20, %r14
movq %r10, %r15
xorq $0x20, %r15
movq %r11, %rax
xorq $0x20, %rax
movq %rax, 0xa8(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovups %ymm1, 0x290(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x270(%rsp)
leaq 0x1f4aeb5(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovups %ymm7, 0x120(%rsp)
movq %r14, 0x88(%rsp)
movq %r15, 0x80(%rsp)
leaq 0x3b0(%rsp), %rax
cmpq %rax, %r8
je 0x2059ea
movq -0x8(%r8), %rbx
addq $-0x8, %r8
testb $0x8, %bl
jne 0x2051d3
vmovaps 0x40(%rbx,%r9), %ymm0
vsubps %ymm7, %ymm0, %ymm0
vmulps 0x350(%rsp), %ymm0, %ymm0
vmovaps 0x40(%rbx,%r10), %ymm1
vmovups 0x390(%rsp), %ymm3
vsubps %ymm3, %ymm1, %ymm1
vmulps 0x330(%rsp), %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r11), %ymm1
vmovups 0x370(%rsp), %ymm4
vsubps %ymm4, %ymm1, %ymm1
vmulps 0x310(%rsp), %ymm1, %ymm1
vmaxps 0x290(%rsp), %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r14), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps 0x2f0(%rsp), %ymm1, %ymm1
vmovaps 0x40(%rbx,%r15), %ymm2
vsubps %ymm3, %ymm2, %ymm2
vmulps 0x2d0(%rsp), %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
movq 0xa8(%rsp), %rax
vmovaps 0x40(%rbx,%rax), %ymm2
vsubps %ymm4, %ymm2, %ymm2
vmulps 0x2b0(%rsp), %ymm2, %ymm2
vminps 0x270(%rsp), %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %bl
jne 0x205217
testq %r12, %r12
je 0x20521b
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdi
xorl %ebp, %ebp
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rdi
jne 0x205220
movq %rax, %rbx
testl %ebp, %ebp
je 0x20510e
jmp 0x205263
pushq $0x6
jmp 0x20521d
pushq $0x4
popq %rbp
jmp 0x20520d
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x20525e
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x20522f
movq %rcx, %rbx
jmp 0x20520d
cmpl $0x6, %ebp
jne 0x2059e1
movl %ebx, %edi
andl $0xf, %edi
xorl %ebp, %ebp
addq $-0x8, %rdi
setne %al
je 0x2059e1
andq $-0x10, %rbx
xorl %ecx, %ecx
movb %al, 0xe(%rsp)
movq %rcx, 0xa0(%rsp)
imulq $0xb0, %rcx, %rcx
leaq 0xf(%rsp), %rax
movq %rax, 0x1f0(%rsp)
vbroadcastss (%rsi), %xmm1
vbroadcastss 0x4(%rsi), %xmm2
vbroadcastss 0x8(%rsi), %xmm3
vmovaps (%rbx,%rcx), %xmm0
vmovaps 0x10(%rbx,%rcx), %xmm4
vmovaps 0x20(%rbx,%rcx), %xmm5
vmovaps 0x30(%rbx,%rcx), %xmm6
vsubps %xmm1, %xmm0, %xmm7
vsubps %xmm2, %xmm4, %xmm14
vsubps %xmm3, %xmm5, %xmm10
vsubps %xmm1, %xmm6, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x40(%rbx,%rcx), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovaps 0x50(%rbx,%rcx), %xmm4
vsubps %xmm3, %xmm4, %xmm6
vmovaps 0x60(%rbx,%rcx), %xmm4
vsubps %xmm1, %xmm4, %xmm5
vmovaps 0x70(%rbx,%rcx), %xmm1
vsubps %xmm2, %xmm1, %xmm2
vmovaps %xmm2, 0x30(%rsp)
movq %rcx, 0x10(%rsp)
vmovaps 0x80(%rbx,%rcx), %xmm1
vsubps %xmm3, %xmm1, %xmm3
vmovaps %xmm3, 0x40(%rsp)
vsubps %xmm7, %xmm5, %xmm13
vsubps %xmm14, %xmm2, %xmm15
vsubps %xmm10, %xmm3, %xmm0
vaddps %xmm2, %xmm14, %xmm1
vaddps %xmm3, %xmm10, %xmm4
vmulps %xmm0, %xmm1, %xmm8
vmulps %xmm4, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm12
vaddps %xmm5, %xmm7, %xmm8
vmulps %xmm4, %xmm13, %xmm4
vmovaps %xmm0, 0x170(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm4, %xmm9, %xmm4
vmovaps %xmm15, 0x180(%rsp)
vmulps %xmm15, %xmm8, %xmm8
vmovaps %xmm13, 0x190(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vsubps %xmm8, %xmm1, %xmm1
vbroadcastss 0x18(%rsi), %xmm0
vmulps %xmm1, %xmm0, %xmm1
vbroadcastss 0x14(%rsi), %xmm13
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vbroadcastss 0x10(%rsi), %xmm11
vmulps %xmm12, %xmm11, %xmm4
vaddps %xmm1, %xmm4, %xmm9
vmovaps 0x20(%rsp), %xmm3
vsubps %xmm3, %xmm14, %xmm15
vsubps %xmm6, %xmm10, %xmm1
vmovaps %xmm14, 0x1b0(%rsp)
vaddps %xmm3, %xmm14, %xmm2
vmovaps %xmm10, 0x1a0(%rsp)
vaddps %xmm6, %xmm10, %xmm4
vmovaps %xmm6, %xmm10
vmovaps %xmm0, %xmm6
vmulps %xmm1, %xmm2, %xmm12
vmulps %xmm4, %xmm15, %xmm14
vsubps %xmm12, %xmm14, %xmm12
vmovaps 0x50(%rsp), %xmm0
vsubps %xmm0, %xmm7, %xmm14
vmulps %xmm4, %xmm14, %xmm4
vmovaps %xmm7, 0x90(%rsp)
vaddps %xmm0, %xmm7, %xmm7
vmovaps %xmm1, 0x150(%rsp)
vmulps %xmm1, %xmm7, %xmm8
vsubps %xmm4, %xmm8, %xmm4
vbroadcastss 0x1d1ba9f(%rip), %xmm8 # 0x1f20ec4
vmovaps %xmm15, 0x160(%rsp)
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm2, %xmm14, %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm1
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vmulps %xmm12, %xmm11, %xmm4
vaddps %xmm1, %xmm4, %xmm4
vsubps %xmm5, %xmm0, %xmm1
vaddps %xmm5, %xmm0, %xmm7
vmovaps 0x30(%rsp), %xmm0
vsubps %xmm0, %xmm3, %xmm12
vaddps %xmm0, %xmm3, %xmm0
vmovaps 0x40(%rsp), %xmm2
vsubps %xmm2, %xmm10, %xmm15
vaddps %xmm2, %xmm10, %xmm2
vmulps %xmm0, %xmm15, %xmm3
vmulps %xmm2, %xmm12, %xmm5
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm2, %xmm1, %xmm2
vmulps %xmm7, %xmm15, %xmm5
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm7, %xmm12, %xmm5
vmovups 0x120(%rsp), %ymm7
vmulps %xmm0, %xmm1, %xmm0
vsubps %xmm5, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm0
vmovaps %xmm13, 0x50(%rsp)
vmulps %xmm2, %xmm13, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm11, 0x40(%rsp)
vmulps %xmm3, %xmm11, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm4, %xmm9, %xmm2
vaddps %xmm2, %xmm0, %xmm13
vminps %xmm4, %xmm9, %xmm2
vminps %xmm0, %xmm2, %xmm2
vandps %xmm8, %xmm13, %xmm5
vbroadcastss 0x1d1b9ee(%rip), %xmm3 # 0x1f20ecc
vmovaps %xmm5, 0x20(%rsp)
vmulps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d1b9cf(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm3, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm9, 0x30(%rsp)
vmaxps %xmm4, %xmm9, %xmm5
vmaxps %xmm0, %xmm5, %xmm0
vcmpleps %xmm3, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vtestps 0x110(%rsp), %xmm0
je 0x205997
vmovaps %xmm6, %xmm10
vmovaps 0x170(%rsp), %xmm7
vmovaps 0x160(%rsp), %xmm11
vmovaps %xmm0, 0x140(%rsp)
vmulps %xmm7, %xmm11, %xmm0
vmovaps 0x180(%rsp), %xmm9
vmovaps 0x150(%rsp), %xmm6
vmulps %xmm6, %xmm9, %xmm2
vsubps %xmm0, %xmm2, %xmm2
vmulps %xmm6, %xmm12, %xmm3
vmulps %xmm15, %xmm11, %xmm5
vsubps %xmm3, %xmm5, %xmm5
vandps %xmm0, %xmm8, %xmm0
vandps %xmm3, %xmm8, %xmm3
vcmpltps %xmm3, %xmm0, %xmm0
vblendvps %xmm0, %xmm2, %xmm5, %xmm0
vmulps %xmm15, %xmm14, %xmm2
vmulps %xmm7, %xmm14, %xmm3
vmovaps 0x190(%rsp), %xmm7
vmulps %xmm7, %xmm6, %xmm5
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm5, %xmm8, %xmm5
vandps %xmm2, %xmm8, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps %xmm1, %xmm11, %xmm1
vmulps %xmm7, %xmm11, %xmm3
vmulps %xmm9, %xmm14, %xmm5
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm5, %xmm8, %xmm5
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm1, %xmm5, %xmm1
vblendvps %xmm1, %xmm3, %xmm6, %xmm1
vmulps %xmm1, %xmm10, %xmm3
vmulps 0x50(%rsp), %xmm2, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vmulps 0x40(%rsp), %xmm0, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x1a0(%rsp), %xmm1, %xmm3
vmulps 0x1b0(%rsp), %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vmulps 0x90(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1ce70e0(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovups 0x120(%rsp), %ymm7
vcmpneqps 0x1ce63a0(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x140(%rsp), %xmm5
vandps 0x110(%rsp), %xmm5, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x205997
addq %rbx, 0x10(%rsp)
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x30(%rsp), %xmm6
vmovaps %xmm6, 0x1c0(%rsp)
vmovaps %xmm4, 0x1d0(%rsp)
vmovaps %xmm13, 0x1e0(%rsp)
movq %rax, 0x1f0(%rsp)
vmovaps %xmm5, 0x200(%rsp)
vmovaps %xmm3, 0x230(%rsp)
vmovaps %xmm0, 0x240(%rsp)
vmovaps %xmm2, 0x250(%rsp)
vmovaps %xmm1, 0x260(%rsp)
movq (%rdx), %rax
movq %rax, 0x50(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1ce7000(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ceb8bf(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x20(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x210(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovmskps %xmm5, %r15d
bsfq %r15, %r13
movq 0x10(%rsp), %rax
movl 0x90(%rax,%r13,4), %eax
movq 0x50(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rbp
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbp)
je 0x20579d
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x2057b7
cmpq $0x0, 0x48(%rbp)
jne 0x2057b7
xorl %eax, %eax
jmp 0x2057a3
btcq %r13, %r15
movb $0x1, %al
xorl %ebp, %ebp
testb %al, %al
je 0x2059c0
testq %r15, %r15
jne 0x20575e
jmp 0x205997
movq %rdi, 0x40(%rsp)
vmovss 0x210(%rsp,%r13,4), %xmm0
vmovss 0x220(%rsp,%r13,4), %xmm1
movq 0x8(%rdx), %rcx
movq 0x10(%rsp), %rdi
movl 0xa0(%rdi,%r13,4), %edi
vmovss 0x240(%rsp,%r13,4), %xmm2
vmovss 0x250(%rsp,%r13,4), %xmm3
vmovss 0x260(%rsp,%r13,4), %xmm4
vmovss %xmm2, 0xb0(%rsp)
vmovss %xmm3, 0xb4(%rsp)
vmovss %xmm4, 0xb8(%rsp)
vmovss %xmm0, 0xbc(%rsp)
vmovss %xmm1, 0xc0(%rsp)
movl %edi, 0xc4(%rsp)
movl %eax, 0xc8(%rsp)
movl (%rcx), %eax
movl %eax, 0xcc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xd0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x30(%rsp)
vmovss 0x230(%rsp,%r13,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0xe8(%rsp)
movq %rcx, 0xf0(%rsp)
movq %rsi, 0xf8(%rsp)
leaq 0xb0(%rsp), %rax
movq %rax, 0x100(%rsp)
movl $0x1, 0x108(%rsp)
movq 0x48(%rbp), %rax
testq %rax, %rax
movq %r8, 0x78(%rsp)
movq %r9, 0x70(%rsp)
movq %r10, 0x68(%rsp)
movq %r11, 0x60(%rsp)
je 0x20591e
leaq 0xe0(%rsp), %rdi
movq %rdx, 0x20(%rsp)
movq %rsi, 0x90(%rsp)
vzeroupper
callq *%rax
movq 0x60(%rsp), %r11
movq 0x68(%rsp), %r10
movq 0x70(%rsp), %r9
vmovups 0x120(%rsp), %ymm7
movq 0x78(%rsp), %r8
movq 0x90(%rsp), %rsi
movq 0x20(%rsp), %rdx
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20597a
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x205976
testb $0x2, (%r14)
jne 0x205933
testb $0x40, 0x3e(%rbp)
je 0x205969
leaq 0xe0(%rsp), %rdi
movq %rdx, %r14
movq %rsi, %rbp
vzeroupper
callq *%rax
movq 0x60(%rsp), %r11
movq 0x68(%rsp), %r10
movq 0x70(%rsp), %r9
vmovups 0x120(%rsp), %ymm7
movq 0x78(%rsp), %r8
movq %rbp, %rsi
movq %r14, %rdx
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20597a
xorl %eax, %eax
jmp 0x20598b
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r13, %r15
movb $0x1, %al
xorl %ebp, %ebp
movq 0x40(%rsp), %rdi
jmp 0x2057a5
movq 0xa0(%rsp), %rcx
incq %rcx
cmpq %rdi, %rcx
setb %al
jne 0x205286
movq 0x88(%rsp), %r14
movq 0x80(%rsp), %r15
jmp 0x2059e1
testb $0x1, 0xe(%rsp)
movq 0x88(%rsp), %r14
movq 0x80(%rsp), %r15
je 0x2059e1
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
cmpl $0x3, %ebp
jne 0x2050f5
addq $0x1558, %rsp # imm = 0x1558
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMvMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1498, %rsp # imm = 0x1498
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x207040
addq $0x1498, %rsp # imm = 0x1498
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x20702b
leaq 0x2f8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%r14), %xmm2, %xmm1
vmovaps 0x10(%r14), %xmm3
vbroadcastss 0x1d19e4d(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce9f64(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1ce5674(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %ymm6
vbroadcastss 0x4(%r14), %ymm7
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss 0x8(%r14), %ymm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
vinsertf128 $0x1, %xmm3, %ymm3, %ymm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %r15
xorq $0x20, %r15
movq %r10, %r13
xorq $0x20, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm11
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm12
leaq 0x1f48e2f(%rip), %rax # 0x214ff80
vmovdqa 0xf0(%rax), %xmm0
vmovdqa %xmm0, 0x120(%rsp)
leaq 0x2f0(%rsp), %rbp
vmovups %ymm6, 0x2d0(%rsp)
vmovups %ymm7, 0x2b0(%rsp)
vmovups %ymm8, 0x290(%rsp)
vmovups %ymm9, 0x270(%rsp)
vmovups %ymm10, 0x250(%rsp)
vmovups %ymm5, 0x230(%rsp)
movq %r15, 0x68(%rsp)
movq %r13, 0x60(%rsp)
vmovups %ymm11, 0x210(%rsp)
vmovups %ymm12, 0x1f0(%rsp)
cmpq %rbp, %rdi
je 0x20702b
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x2072be
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%r14), %ymm0
vmulps 0x100(%rax,%r8), %ymm0, %ymm1
vaddps 0x40(%rax,%r8), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm2
vaddps 0x40(%rax,%r9), %ymm2, %ymm2
vmaxps %ymm1, %ymm11, %ymm1
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm5, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r11), %ymm3, %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vminps %ymm2, %ymm12, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vmulps 0x100(%rax,%r13), %ymm0, %ymm4
vaddps 0x40(%rax,%r13), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm5, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x207306
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %ebx
testb $0x8, %r12b
jne 0x207302
testq %rbx, %rbx
je 0x207333
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rsi
xorl %ecx, %ecx
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rsi
jne 0x207338
movq %rax, %r12
testl %ecx, %ecx
je 0x2071cd
jmp 0x207380
pushq $0x6
jmp 0x207335
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x2072ae
pushq $0x4
popq %rcx
jmp 0x2072f8
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rsi, %rax
je 0x207376
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x207347
movq %rcx, %r12
xorl %ecx, %ecx
jmp 0x2072f8
cmpl $0x6, %ecx
jne 0x20799d
movl %r12d, %esi
andl $0xf, %esi
xorl %ecx, %ecx
addq $-0x8, %rsi
setne %bpl
je 0x207995
andq $-0x10, %r12
xorl %r15d, %r15d
imulq $0x140, %r15, %r13 # imm = 0x140
vbroadcastss 0x1c(%r14), %xmm0
vmulps 0x90(%r12,%r13), %xmm0, %xmm1
vaddps (%r12,%r13), %xmm1, %xmm7
vmulps 0xa0(%r12,%r13), %xmm0, %xmm1
vaddps 0x10(%r12,%r13), %xmm1, %xmm5
vmulps 0xb0(%r12,%r13), %xmm0, %xmm1
vaddps 0x20(%r12,%r13), %xmm1, %xmm6
vmulps 0xc0(%r12,%r13), %xmm0, %xmm1
vaddps 0x30(%r12,%r13), %xmm1, %xmm1
vmulps 0xd0(%r12,%r13), %xmm0, %xmm2
vaddps 0x40(%r12,%r13), %xmm2, %xmm2
vmulps 0xe0(%r12,%r13), %xmm0, %xmm4
vaddps 0x50(%r12,%r13), %xmm4, %xmm8
vmulps 0xf0(%r12,%r13), %xmm0, %xmm4
vaddps 0x60(%r12,%r13), %xmm4, %xmm9
vmulps 0x100(%r12,%r13), %xmm0, %xmm4
vaddps 0x70(%r12,%r13), %xmm4, %xmm10
vmulps 0x110(%r12,%r13), %xmm0, %xmm0
vaddps 0x80(%r12,%r13), %xmm0, %xmm0
vsubps %xmm1, %xmm7, %xmm3
vmovaps %xmm3, (%rsp)
vsubps %xmm2, %xmm5, %xmm12
vmovaps %xmm12, 0x40(%rsp)
vsubps %xmm8, %xmm6, %xmm8
vsubps %xmm7, %xmm9, %xmm9
vsubps %xmm5, %xmm10, %xmm10
vsubps %xmm6, %xmm0, %xmm11
vmulps %xmm11, %xmm12, %xmm0
vmulps %xmm10, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmulps %xmm9, %xmm8, %xmm1
vmulps %xmm3, %xmm11, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm3, %xmm10, %xmm2
vmulps %xmm9, %xmm12, %xmm12
vsubps %xmm2, %xmm12, %xmm3
vbroadcastss (%r14), %xmm12
vbroadcastss 0x4(%r14), %xmm13
vbroadcastss 0x8(%r14), %xmm14
vbroadcastss 0x14(%r14), %xmm15
vsubps %xmm12, %xmm7, %xmm2
vbroadcastss 0x18(%r14), %xmm12
vsubps %xmm13, %xmm5, %xmm5
vsubps %xmm14, %xmm6, %xmm6
vmulps %xmm6, %xmm15, %xmm13
vmulps %xmm5, %xmm12, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vbroadcastss 0x10(%r14), %xmm14
vmulps %xmm2, %xmm12, %xmm0
vmulps %xmm6, %xmm14, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmulps %xmm5, %xmm14, %xmm1
vmovaps %xmm2, 0x20(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm3, 0x70(%rsp)
vmulps %xmm3, %xmm12, %xmm2
vmulps %xmm4, %xmm15, %xmm12
vmovaps %xmm4, %xmm15
vaddps %xmm2, %xmm12, %xmm2
vmovaps 0x30(%rsp), %xmm3
vmulps %xmm3, %xmm14, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm11, %xmm11
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm13, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm10
vmulps %xmm1, %xmm8, %xmm1
vmulps 0x40(%rsp), %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vmovddup 0x1d19a1a(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps (%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d19942(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm4
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x120(%rsp), %xmm10
jne 0x2075bd
incq %r15
cmpq %rsi, %r15
setb %bpl
jne 0x2073a6
jmp 0x207943
vmovaps %xmm3, %xmm14
vandps 0x120(%rsp), %xmm10, %xmm10
vmulps 0x70(%rsp), %xmm6, %xmm0
vmulps %xmm5, %xmm15, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x20(%rsp), %xmm3, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm4, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm4, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm5
vtestps %xmm10, %xmm5
je 0x2075a8
addq %r12, %r13
vandps %xmm5, %xmm10, %xmm0
vmovaps %xmm7, 0x130(%rsp)
vmovaps %xmm8, 0x140(%rsp)
vmovaps %xmm3, 0x150(%rsp)
vmovaps %xmm4, 0x160(%rsp)
vmovaps %xmm0, 0x180(%rsp)
vmovaps %xmm14, 0x1c0(%rsp)
vmovaps %xmm15, 0x1d0(%rsp)
vmovaps 0x70(%rsp), %xmm1
vmovaps %xmm1, 0x1e0(%rsp)
movq (%rdx), %rax
movq %rax, 0x70(%rsp)
vrcpps %xmm4, %xmm1
vmulps %xmm1, %xmm4, %xmm2
vbroadcastss 0x1ce5097(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x150(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1b0(%rsp)
vmulps 0x130(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x190(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vmovmskps %xmm0, %eax
movq %rax, (%rsp)
bsfq %rax, %rcx
movq %rcx, 0x30(%rsp)
movl 0x120(%r13,%rcx,4), %eax
movq 0x70(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x20(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x40(%rsp)
testl %ecx, 0x34(%rax)
je 0x207720
movq 0x10(%rdx), %rax
movq %rax, 0x58(%rsp)
cmpq $0x0, 0x10(%rax)
movl $0x0, %ecx
jne 0x20774f
movq 0x40(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x20774f
xorl %eax, %eax
jmp 0x207735
movq 0x30(%rsp), %rax
movq (%rsp), %rcx
btcq %rax, %rcx
movq %rcx, (%rsp)
movb $0x1, %al
xorl %ecx, %ecx
testb %al, %al
je 0x2079ab
movq (%rsp), %rax
testq %rax, %rax
jne 0x2076c3
jmp 0x2075a8
movq %rsi, 0x88(%rsp)
movq %r11, 0x90(%rsp)
movq %r10, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movq %r8, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
movq 0x30(%rsp), %rsi
vmovss 0x190(%rsp,%rsi,4), %xmm0
vmovss 0x1a0(%rsp,%rsi,4), %xmm1
movq %rdx, 0xb8(%rsp)
movq 0x8(%rdx), %rcx
movl 0x130(%r13,%rsi,4), %edx
vmovss 0x1c0(%rsp,%rsi,4), %xmm2
vmovss 0x1d0(%rsp,%rsi,4), %xmm3
vmovss 0x1e0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edx, 0xd4(%rsp)
movq 0x20(%rsp), %rax
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x1b0(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x40(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %r14, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x2078a4
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x2078e5
movq 0x58(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x2078e1
movq 0x58(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x2078c7
movq 0x40(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x2078d4
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x2078e5
xorl %eax, %eax
jmp 0x207904
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq (%rsp), %rax
movq 0x30(%rsp), %rcx
btcq %rcx, %rax
movq %rax, (%rsp)
movb $0x1, %al
movq 0xb8(%rsp), %rdx
movq 0xb0(%rsp), %rdi
movq 0xa8(%rsp), %r8
movq 0xa0(%rsp), %r9
movq 0x98(%rsp), %r10
movq 0x90(%rsp), %r11
xorl %ecx, %ecx
movq 0x88(%rsp), %rsi
jmp 0x207735
vmovups 0x2d0(%rsp), %ymm6
vmovups 0x2b0(%rsp), %ymm7
vmovups 0x290(%rsp), %ymm8
vmovups 0x270(%rsp), %ymm9
vmovups 0x250(%rsp), %ymm10
vmovups 0x230(%rsp), %ymm5
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
vmovups 0x210(%rsp), %ymm11
vmovups 0x1f0(%rsp), %ymm12
leaq 0x2f0(%rsp), %rbp
cmpl $0x3, %ecx
jne 0x2071bc
jmp 0x20702b
testb $0x1, %bpl
vmovups 0x2d0(%rsp), %ymm6
vmovups 0x2b0(%rsp), %ymm7
vmovups 0x290(%rsp), %ymm8
vmovups 0x270(%rsp), %ymm9
vmovups 0x250(%rsp), %ymm10
vmovups 0x230(%rsp), %ymm5
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
vmovups 0x210(%rsp), %ymm11
vmovups 0x1f0(%rsp), %ymm12
leaq 0x2f0(%rsp), %rbp
je 0x20799d
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rcx
jmp 0x20799d
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x14f8, %rsp # imm = 0x14F8
movq %rdx, 0x8(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x207a4d
addq $0x14f8, %rsp # imm = 0x14F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x207a38
leaq 0x358(%rsp), %rdi
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm5, %xmm1
vbroadcastss 0x1d19449(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1ce9560(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq %rax, -0x8(%rdi)
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1ce4c6c(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
vaddps %xmm2, %xmm3, %xmm2
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm10
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
vinsertf128 $0x1, %xmm2, %ymm2, %ymm11
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rbx
xorq $0x20, %rbx
movq %r10, %r14
xorq $0x20, %r14
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm13
leaq 0x1f48427(%rip), %rax # 0x214ff80
vmovdqa 0xf0(%rax), %xmm0
vmovdqa %xmm0, 0x140(%rsp)
leaq 0x350(%rsp), %r15
movq %rsi, 0x10(%rsp)
vmovups %ymm6, 0x330(%rsp)
vmovups %ymm7, 0x310(%rsp)
vmovups %ymm8, 0x2f0(%rsp)
movq %r8, 0x80(%rsp)
vmovups %ymm9, 0x2d0(%rsp)
vmovups %ymm10, 0x2b0(%rsp)
movq %r9, 0x78(%rsp)
vmovups %ymm11, 0x290(%rsp)
movq %r10, 0x70(%rsp)
movq %r11, 0x68(%rsp)
movq %rbx, 0x60(%rsp)
movq %r14, 0x58(%rsp)
vmovups %ymm12, 0x270(%rsp)
vmovups %ymm13, 0x250(%rsp)
cmpq %r15, %rdi
je 0x207a38
movq -0x8(%rdi), %r13
addq $-0x8, %rdi
testb $0x8, %r13b
jne 0x207ce1
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r8), %ymm0, %ymm1
vaddps 0x40(%rax,%r8), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm2
vaddps 0x40(%rax,%r9), %ymm2, %ymm2
vmaxps %ymm1, %ymm12, %ymm1
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r11), %ymm3, %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vminps %ymm2, %ymm13, %ymm2
vmulps 0x100(%rax,%rbx), %ymm0, %ymm3
vaddps 0x40(%rax,%rbx), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vmulps 0x100(%rax,%r14), %ymm0, %ymm4
vaddps 0x40(%rax,%r14), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm11, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x207d2b
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r12d
testb $0x8, %r13b
jne 0x207d27
testq %r12, %r12
je 0x207d58
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %ebp, %ebp
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rdx
jne 0x207d5d
movq %rax, %r13
testl %ebp, %ebp
je 0x207bf1
jmp 0x207da4
pushq $0x6
jmp 0x207d5a
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x207cd0
pushq $0x4
popq %rbp
jmp 0x207d1d
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x207d9c
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x207d6c
movq %rcx, %r13
jmp 0x207d1d
cmpl $0x6, %ebp
jne 0x20860c
movl %r13d, %eax
andl $0xf, %eax
xorl %ebp, %ebp
addq $-0x8, %rax
movq %rax, 0xd0(%rsp)
setne %cl
je 0x20860c
movq %r12, 0x88(%rsp)
movq %rdi, 0x90(%rsp)
andq $-0x10, %r13
movq 0x8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x98(%rsp)
xorl %eax, %eax
movb %cl, 0x7(%rsp)
movq %rax, 0xd8(%rsp)
imulq $0x50, %rax, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x30(%r13,%rax), %ecx
movq 0x98(%rsp), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rcx
vmovss 0x28(%rcx), %xmm1
vmovss 0x2c(%rcx), %xmm2
vmovss 0x30(%rcx), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm6
vroundss $0x9, %xmm6, %xmm6, %xmm0
vaddss 0x1ce8b82(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm0, %xmm0
vmaxss %xmm0, %xmm5, %xmm2
vcvttss2si %xmm2, %edx
movslq %edx, %rdx
movq 0xe0(%rcx), %rcx
imulq $0x38, %rdx, %rsi
movl (%r13,%rax), %ebp
movl 0x4(%r13,%rax), %edx
movq (%rcx,%rsi), %r12
movq 0x38(%rcx,%rsi), %rcx
vmovups (%r12,%rbp,4), %xmm4
movl 0x10(%r13,%rax), %r15d
vmovups (%r12,%r15,4), %xmm5
movl 0x20(%r13,%rax), %esi
movq %rsi, 0x40(%rsp)
vmovups (%r12,%rsi,4), %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovups (%r12,%rdx,4), %xmm8
movl 0x14(%r13,%rax), %edi
vmovups (%r12,%rdi,4), %xmm7
movl 0x24(%r13,%rax), %esi
vmovups (%r12,%rsi,4), %xmm0
vmovaps %xmm0, 0x20(%rsp)
movl 0x8(%r13,%rax), %ebx
vmovups (%r12,%rbx,4), %xmm10
movl 0x18(%r13,%rax), %r10d
vmovups (%r12,%r10,4), %xmm9
movl 0x28(%r13,%rax), %r8d
vmovups (%r12,%r8,4), %xmm11
movl 0xc(%r13,%rax), %r14d
vmovups (%r12,%r14,4), %xmm1
movl 0x1c(%r13,%rax), %r11d
vmovups (%r12,%r11,4), %xmm14
movl 0x2c(%r13,%rax), %r9d
vmovups (%r12,%r9,4), %xmm12
vmovups (%rcx,%rbp,4), %xmm15
vmovups (%rcx,%r15,4), %xmm13
vsubss %xmm2, %xmm6, %xmm6
vunpcklps %xmm10, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm1, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
vunpckhps %xmm1, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
vmovups (%rcx,%rdx,4), %xmm3
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovaps %xmm1, 0x160(%rsp)
vunpcklps %xmm0, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vmovaps %xmm0, 0xb0(%rsp)
vunpcklps %xmm9, %xmm5, %xmm0 # xmm0 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm1 # xmm1 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm14, %xmm7, %xmm2 # xmm2 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
vunpckhps %xmm14, %xmm7, %xmm5 # xmm5 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
vmovups (%rcx,%rbx,4), %xmm14
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vmovaps %xmm1, 0xa0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x150(%rsp)
vmovaps 0x30(%rsp), %xmm1
vunpcklps %xmm11, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
vunpckhps %xmm11, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
vmovaps 0x20(%rsp), %xmm2
vunpcklps %xmm12, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
vunpckhps %xmm12, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm12[2],xmm2[3],xmm12[3]
vmovups (%rcx,%r14,4), %xmm11
vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vmovaps %xmm1, 0x30(%rsp)
vunpcklps %xmm5, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vmovaps %xmm1, 0xc0(%rsp)
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vmovaps %xmm0, 0x20(%rsp)
vunpcklps %xmm14, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
vunpckhps %xmm14, %xmm15, %xmm1 # xmm1 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
vunpcklps %xmm11, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vmovups (%rcx,%r10,4), %xmm11
vunpcklps %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpcklps %xmm5, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vunpcklps %xmm11, %xmm13, %xmm0 # xmm0 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
vunpckhps %xmm11, %xmm13, %xmm5 # xmm5 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
vmovups (%rcx,%rdi,4), %xmm11
vmovups (%rcx,%r11,4), %xmm12
vunpcklps %xmm12, %xmm11, %xmm13 # xmm13 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpckhps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
vunpcklps %xmm11, %xmm5, %xmm11 # xmm11 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpcklps %xmm13, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
vunpckhps %xmm13, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
movq 0x40(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
vmovups (%rcx,%r8,4), %xmm5
vunpcklps %xmm5, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vmovups (%rcx,%rsi,4), %xmm5
movq 0x10(%rsp), %rsi
vmovups (%rcx,%r9,4), %xmm12
vunpcklps %xmm12, %xmm5, %xmm15 # xmm15 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
vunpckhps %xmm12, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
vunpcklps %xmm5, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpcklps %xmm15, %xmm13, %xmm14 # xmm14 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
vunpckhps %xmm15, %xmm13, %xmm15 # xmm15 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
vshufps $0x0, %xmm6, %xmm6, %xmm0 # xmm0 = xmm6[0,0,0,0]
vmovss 0x1ce46af(%rip), %xmm5 # 0x1eec714
vsubss %xmm6, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm13 # xmm13 = xmm5[0,0,0,0]
vmulps %xmm3, %xmm0, %xmm3
vmulps %xmm8, %xmm13, %xmm5
vaddps %xmm3, %xmm5, %xmm12
vmulps %xmm2, %xmm0, %xmm2
vmulps 0xb0(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm5
vmulps %xmm4, %xmm0, %xmm2
vmulps 0x160(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm6
vmulps %xmm1, %xmm0, %xmm1
vmulps %xmm9, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm7, %xmm0, %xmm2
vmulps 0x150(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmovaps 0x30(%r13,%rax), %xmm3
vmovaps %xmm3, 0x180(%rsp)
vmovaps 0x40(%r13,%rax), %xmm3
movq 0xd8(%rsp), %rax
vmulps %xmm0, %xmm11, %xmm7
vmulps 0xa0(%rsp), %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm9
vmulps %xmm0, %xmm14, %xmm7
vmulps %xmm0, %xmm15, %xmm8
vmulps %xmm0, %xmm10, %xmm0
vmulps 0xc0(%rsp), %xmm13, %xmm10
vaddps %xmm7, %xmm10, %xmm10
vmulps 0x20(%rsp), %xmm13, %xmm7
vaddps %xmm7, %xmm8, %xmm11
vmulps 0x30(%rsp), %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vmovaps %xmm3, 0x170(%rsp)
vsubps %xmm1, %xmm12, %xmm7
vmovaps %xmm5, %xmm1
vmovaps %xmm5, 0xb0(%rsp)
vsubps %xmm2, %xmm5, %xmm8
vsubps %xmm9, %xmm6, %xmm9
vsubps %xmm12, %xmm10, %xmm10
vmovaps %xmm12, %xmm5
vsubps %xmm1, %xmm11, %xmm11
vsubps %xmm6, %xmm0, %xmm12
vmulps %xmm12, %xmm8, %xmm0
vmulps %xmm11, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmulps %xmm10, %xmm9, %xmm1
vmulps %xmm7, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm7, %xmm11, %xmm2
vmulps %xmm10, %xmm8, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss (%rsi), %xmm0
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vbroadcastss 0x14(%rsi), %xmm15
vsubps %xmm0, %xmm5, %xmm2
vbroadcastss 0x18(%rsi), %xmm0
vmovaps 0xb0(%rsp), %xmm1
vsubps %xmm13, %xmm1, %xmm5
vsubps %xmm14, %xmm6, %xmm1
vmulps %xmm1, %xmm15, %xmm6
vmulps %xmm0, %xmm5, %xmm13
vsubps %xmm6, %xmm13, %xmm6
vbroadcastss 0x10(%rsi), %xmm13
vmulps %xmm0, %xmm2, %xmm14
vmovaps %xmm1, 0xc0(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmovaps %xmm5, 0x20(%rsp)
vmulps %xmm5, %xmm13, %xmm14
vmovaps %xmm2, 0x30(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm14, %xmm2, %xmm2
vmovaps %xmm3, 0xa0(%rsp)
vmulps %xmm0, %xmm3, %xmm0
vmulps %xmm4, %xmm15, %xmm14
vaddps %xmm0, %xmm14, %xmm0
vmovaps 0x40(%rsp), %xmm14
vmulps %xmm13, %xmm14, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm2, %xmm12, %xmm12
vmulps %xmm1, %xmm11, %xmm11
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmovddup 0x1d18d2b(%rip), %xmm2 # xmm2 = mem[0,0]
vandps %xmm2, %xmm0, %xmm9
vxorps %xmm10, %xmm9, %xmm8
vmulps %xmm6, %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vxorps %xmm1, %xmm9, %xmm7
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm8, %xmm1
vcmpnltps %xmm10, %xmm7, %xmm2
vandps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1d18c54(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm6
vcmpneqps %xmm0, %xmm10, %xmm0
vandps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x140(%rsp), %xmm10
jne 0x2082b3
incq %rax
cmpq 0xd0(%rsp), %rax
setb %cl
vxorps %xmm5, %xmm5, %xmm5
jne 0x207df0
jmp 0x208589
vmovaps %xmm4, %xmm15
vandps 0x140(%rsp), %xmm10, %xmm10
vmovaps 0xc0(%rsp), %xmm0
vmulps 0xa0(%rsp), %xmm0, %xmm0
vmulps 0x20(%rsp), %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x30(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%rsi), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%rsi), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm4
vtestps %xmm10, %xmm4
je 0x208296
vandps %xmm4, %xmm10, %xmm0
vmovaps %xmm8, 0x190(%rsp)
vmovaps %xmm7, 0x1a0(%rsp)
vmovaps %xmm3, 0x1b0(%rsp)
vmovaps %xmm6, 0x1c0(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm14, 0x220(%rsp)
vmovaps %xmm15, 0x230(%rsp)
vmovaps 0xa0(%rsp), %xmm1
vmovaps %xmm1, 0x240(%rsp)
vrcpps %xmm6, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vbroadcastss 0x1ce439b(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x1b0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmulps 0x190(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1f0(%rsp)
vmulps 0x1a0(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x200(%rsp)
vmovmskps %xmm0, %r15d
movq %rax, %rbx
bsfq %r15, %r12
movl 0x180(%rsp,%r12,4), %eax
movq 0x98(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rbp
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbp)
je 0x208404
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x20841f
cmpq $0x0, 0x48(%rbp)
jne 0x20841f
xorl %eax, %eax
jmp 0x20840a
btcq %r12, %r15
movb $0x1, %al
testb %al, %al
je 0x20861a
testq %r15, %r15
movq %rbx, %rax
jne 0x2083bf
jmp 0x208296
vmovss 0x1f0(%rsp,%r12,4), %xmm0
vmovss 0x200(%rsp,%r12,4), %xmm1
movq 0x8(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x170(%rsp,%r12,4), %edx
vmovss 0x220(%rsp,%r12,4), %xmm2
vmovss 0x230(%rsp,%r12,4), %xmm3
vmovss 0x240(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0xe0(%rsp)
vmovss %xmm3, 0xe4(%rsp)
vmovss %xmm4, 0xe8(%rsp)
vmovss %xmm0, 0xec(%rsp)
vmovss %xmm1, 0xf0(%rsp)
movl %edx, 0xf4(%rsp)
movl %eax, 0xf8(%rsp)
movl (%rcx), %eax
movl %eax, 0xfc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x100(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x40(%rsp)
vmovss 0x210(%rsp,%r12,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq %rsi, 0x128(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%rbp), %rax
testq %rax, %rax
je 0x208536
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x208569
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x208565
testb $0x2, (%r14)
jne 0x20854b
testb $0x40, 0x3e(%rbp)
je 0x208558
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x208569
xorl %eax, %eax
jmp 0x20857f
movq 0x10(%rsp), %rax
vmovss 0x40(%rsp), %xmm0
vmovss %xmm0, 0x20(%rax)
btcq %r12, %r15
movb $0x1, %al
movq 0x10(%rsp), %rsi
jmp 0x20840a
movq 0x90(%rsp), %rdi
vmovups 0x330(%rsp), %ymm6
vmovups 0x310(%rsp), %ymm7
vmovups 0x2f0(%rsp), %ymm8
movq 0x80(%rsp), %r8
vmovups 0x2d0(%rsp), %ymm9
vmovups 0x2b0(%rsp), %ymm10
movq 0x78(%rsp), %r9
vmovups 0x290(%rsp), %ymm11
movq 0x70(%rsp), %r10
movq 0x68(%rsp), %r11
movq 0x60(%rsp), %rbx
movq 0x58(%rsp), %r14
vmovups 0x270(%rsp), %ymm12
vmovups 0x250(%rsp), %ymm13
leaq 0x350(%rsp), %r15
movq 0x88(%rsp), %r12
xorl %ebp, %ebp
cmpl $0x3, %ebp
jne 0x207be0
jmp 0x207a38
testb $0x1, 0x7(%rsp)
vxorps %xmm5, %xmm5, %xmm5
movq 0x90(%rsp), %rdi
vmovups 0x330(%rsp), %ymm6
vmovups 0x310(%rsp), %ymm7
vmovups 0x2f0(%rsp), %ymm8
movq 0x80(%rsp), %r8
vmovups 0x2d0(%rsp), %ymm9
vmovups 0x2b0(%rsp), %ymm10
movq 0x78(%rsp), %r9
vmovups 0x290(%rsp), %ymm11
movq 0x70(%rsp), %r10
movq 0x68(%rsp), %r11
movq 0x60(%rsp), %rbx
movq 0x58(%rsp), %r14
vmovups 0x270(%rsp), %ymm12
vmovups 0x250(%rsp), %ymm13
leaq 0x350(%rsp), %r15
movq 0x88(%rsp), %r12
movl $0x0, %ebp
je 0x20860c
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
jmp 0x20860c
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMvMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1568, %rsp # imm = 0x1568
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x2092de
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x2092de
leaq 0x3c8(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d187b1(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce88c8(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1ce3fe6(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d18825(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d177c6(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm5
vmovups %ymm5, 0x3a0(%rsp)
vbroadcastss 0x8(%rsi), %ymm5
vmovups %ymm5, 0x380(%rsp)
vbroadcastss 0x1d1779a(%rip), %xmm5 # 0x1f1ff14
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vmulps %xmm5, %xmm3, %xmm5
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x360(%rsp)
vmovshdup %xmm4, %xmm6 # xmm6 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x340(%rsp)
vshufpd $0x1, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x320(%rsp)
vshufps $0x0, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x300(%rsp)
shll $0x5, %r9d
xorl %r10d, %r10d
vucomiss %xmm2, %xmm6
vshufps $0x55, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x2e0(%rsp)
vshufps $0xaa, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x2c0(%rsp)
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r11d, %r11d
vucomiss %xmm2, %xmm3
setb %r11b
shll $0x5, %r11d
orq $0x80, %r11
movq %r9, %r14
xorq $0x20, %r14
movq %r10, %r15
xorq $0x20, %r15
movq %r11, %r13
xorq $0x20, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovdqu %ymm1, 0x2a0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x280(%rsp)
leaq 0x1f47705(%rip), %rax # 0x214ff80
vmovdqa 0xf0(%rax), %xmm0
vmovdqa %xmm0, 0x110(%rsp)
leaq 0x3c0(%rsp), %rbp
vmovups %ymm7, 0x120(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x68(%rsp)
movq %r13, 0x60(%rsp)
cmpq %rbp, %r8
je 0x2092de
movq -0x8(%r8), %r12
addq $-0x8, %r8
testb $0x8, %r12b
jne 0x2089ee
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r9), %ymm0, %ymm1
vaddps 0x40(%rax,%r9), %ymm1, %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps 0x360(%rsp), %ymm1, %ymm1
vmulps 0x100(%rax,%r10), %ymm0, %ymm2
vaddps 0x40(%rax,%r10), %ymm2, %ymm2
vmovups 0x2a0(%rsp), %ymm3
vmaxps %ymm1, %ymm3, %ymm1
vmovups 0x3a0(%rsp), %ymm4
vsubps %ymm4, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmulps 0x340(%rsp), %ymm2, %ymm2
vaddps 0x40(%rax,%r11), %ymm3, %ymm3
vmovups 0x380(%rsp), %ymm5
vsubps %ymm5, %ymm3, %ymm3
vmulps 0x320(%rsp), %ymm3, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r14), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r14), %ymm3, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x300(%rsp), %ymm2, %ymm2
vmovups 0x280(%rsp), %ymm3
vminps %ymm2, %ymm3, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm4, %ymm3, %ymm3
vmulps 0x2e0(%rsp), %ymm3, %ymm3
vmulps 0x100(%rax,%r13), %ymm0, %ymm4
vaddps 0x40(%rax,%r13), %ymm4, %ymm4
vsubps %ymm5, %ymm4, %ymm4
vmulps 0x2c0(%rsp), %ymm4, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x208a36
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %ebx
testb $0x8, %r12b
jne 0x208a32
testq %rbx, %rbx
je 0x208a63
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdi
xorl %ecx, %ecx
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdi
jne 0x208a68
movq %rax, %r12
testl %ecx, %ecx
je 0x2088bd
jmp 0x208ab0
pushq $0x6
jmp 0x208a65
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x2089de
pushq $0x4
popq %rcx
jmp 0x208a28
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x208aa6
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x208a77
movq %rcx, %r12
xorl %ecx, %ecx
jmp 0x208a28
cmpl $0x6, %ecx
jne 0x2092d5
movl %r12d, %edi
andl $0xf, %edi
xorl %ecx, %ecx
addq $-0x8, %rdi
setne %al
je 0x2092d5
andq $-0x10, %r12
xorl %r14d, %r14d
movb %al, 0xe(%rsp)
imulq $0x140, %r14, %rax # imm = 0x140
vbroadcastss 0x1c(%rsi), %xmm0
vmulps 0x90(%r12,%rax), %xmm0, %xmm1
vaddps (%r12,%rax), %xmm1, %xmm1
vmulps 0xa0(%r12,%rax), %xmm0, %xmm2
vaddps 0x10(%r12,%rax), %xmm2, %xmm2
vmulps 0xb0(%r12,%rax), %xmm0, %xmm3
vaddps 0x20(%r12,%rax), %xmm3, %xmm11
vmulps 0xc0(%r12,%rax), %xmm0, %xmm4
vaddps 0x30(%r12,%rax), %xmm4, %xmm4
vmulps 0xd0(%r12,%rax), %xmm0, %xmm5
vaddps 0x40(%r12,%rax), %xmm5, %xmm5
vmulps 0xe0(%r12,%rax), %xmm0, %xmm6
vaddps 0x50(%r12,%rax), %xmm6, %xmm12
vmulps 0xf0(%r12,%rax), %xmm0, %xmm6
vaddps 0x60(%r12,%rax), %xmm6, %xmm8
vmulps 0x100(%r12,%rax), %xmm0, %xmm6
vaddps 0x70(%r12,%rax), %xmm6, %xmm9
vmulps 0x110(%r12,%rax), %xmm0, %xmm0
movq %rax, 0x10(%rsp)
vaddps 0x80(%r12,%rax), %xmm0, %xmm0
vbroadcastss (%rsi), %xmm10
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm10, %xmm1, %xmm7
vsubps %xmm13, %xmm2, %xmm3
vsubps %xmm14, %xmm11, %xmm1
vsubps %xmm10, %xmm4, %xmm2
vmovaps %xmm2, 0x30(%rsp)
vsubps %xmm13, %xmm5, %xmm11
vsubps %xmm14, %xmm12, %xmm5
vsubps %xmm10, %xmm8, %xmm12
vsubps %xmm13, %xmm9, %xmm6
vsubps %xmm14, %xmm0, %xmm4
vmovaps %xmm4, 0x20(%rsp)
vsubps %xmm7, %xmm12, %xmm14
vsubps %xmm3, %xmm6, %xmm15
vsubps %xmm1, %xmm4, %xmm0
vaddps %xmm3, %xmm6, %xmm2
vaddps %xmm1, %xmm4, %xmm4
vmulps %xmm2, %xmm0, %xmm8
vmulps %xmm4, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm7, %xmm12, %xmm8
vmulps %xmm4, %xmm14, %xmm4
vmovaps %xmm0, 0x190(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm4, %xmm9, %xmm4
vmovaps %xmm15, 0x1a0(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x1b0(%rsp)
vmulps %xmm2, %xmm14, %xmm0
vsubps %xmm8, %xmm0, %xmm0
vbroadcastss 0x18(%rsi), %xmm10
vmulps %xmm0, %xmm10, %xmm0
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm4, %xmm15, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vbroadcastss 0x10(%rsi), %xmm2
vmovaps %xmm2, 0x80(%rsp)
vmulps %xmm2, %xmm13, %xmm4
vaddps %xmm0, %xmm4, %xmm9
vmovaps %xmm11, %xmm4
vsubps %xmm11, %xmm3, %xmm2
vmovaps %xmm5, %xmm8
vsubps %xmm5, %xmm1, %xmm11
vmovaps %xmm3, 0x90(%rsp)
vaddps %xmm4, %xmm3, %xmm0
vmovaps %xmm4, %xmm5
vmovaps %xmm1, 0x1c0(%rsp)
vaddps %xmm1, %xmm8, %xmm4
vmovaps %xmm8, %xmm1
vmulps %xmm0, %xmm11, %xmm14
vmulps %xmm4, %xmm2, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vmovaps 0x30(%rsp), %xmm14
vsubps %xmm14, %xmm7, %xmm13
vmulps %xmm4, %xmm13, %xmm4
vmovaps %xmm7, 0xa0(%rsp)
vaddps %xmm7, %xmm14, %xmm7
vmulps %xmm7, %xmm11, %xmm8
vsubps %xmm4, %xmm8, %xmm4
vmovaps %xmm2, 0x180(%rsp)
vmulps %xmm7, %xmm2, %xmm7
vmovaps %xmm13, 0x170(%rsp)
vmulps %xmm0, %xmm13, %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm4, %xmm15, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vmovaps 0x80(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm3
vaddps %xmm0, %xmm3, %xmm0
vsubps %xmm12, %xmm14, %xmm8
vaddps %xmm12, %xmm14, %xmm3
vsubps %xmm6, %xmm5, %xmm12
vaddps %xmm6, %xmm5, %xmm2
vmovaps 0x20(%rsp), %xmm5
vsubps %xmm5, %xmm1, %xmm4
vaddps %xmm5, %xmm1, %xmm1
vmulps %xmm2, %xmm4, %xmm5
vmulps %xmm1, %xmm12, %xmm7
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm1, %xmm8, %xmm1
vmulps %xmm3, %xmm4, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmovups 0x120(%rsp), %ymm7
vmulps %xmm3, %xmm12, %xmm3
vmulps %xmm2, %xmm8, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm10, 0x20(%rsp)
vmulps %xmm2, %xmm10, %xmm2
vmovaps %xmm15, 0x30(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm5, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm9, %xmm2
vaddps %xmm2, %xmm1, %xmm13
vminps %xmm0, %xmm9, %xmm2
vminps %xmm1, %xmm2, %xmm2
vbroadcastss 0x1d18146(%rip), %xmm3 # 0x1f20ec4
vmovaps %xmm3, %xmm10
vandps %xmm3, %xmm13, %xmm5
vbroadcastss 0x1d1813d(%rip), %xmm3 # 0x1f20ecc
vmovaps %xmm5, 0x140(%rsp)
vmulps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d1811b(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm3, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm9, 0x160(%rsp)
vmovaps %xmm0, 0x150(%rsp)
vmaxps %xmm0, %xmm9, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm3, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm0
leaq 0xf(%rsp), %rax
movq %rax, 0x200(%rsp)
vtestps 0x110(%rsp), %xmm0
je 0x209285
vmovaps 0x190(%rsp), %xmm7
vmovaps 0x180(%rsp), %xmm15
vmulps %xmm7, %xmm15, %xmm1
vmovaps 0x1a0(%rsp), %xmm9
vmulps %xmm11, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm12, %xmm11, %xmm3
vmulps %xmm4, %xmm15, %xmm5
vsubps %xmm3, %xmm5, %xmm5
vandps %xmm1, %xmm10, %xmm1
vandps %xmm3, %xmm10, %xmm3
vcmpltps %xmm3, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm5
vmovaps 0x170(%rsp), %xmm14
vmulps %xmm4, %xmm14, %xmm1
vmulps %xmm7, %xmm14, %xmm2
vmovaps 0x1b0(%rsp), %xmm7
vmulps %xmm7, %xmm11, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm8, %xmm11, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm3, %xmm10, %xmm3
vandps %xmm1, %xmm10, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmovaps %xmm0, %xmm4
vmulps %xmm8, %xmm15, %xmm0
vmulps %xmm7, %xmm15, %xmm2
vmulps %xmm14, %xmm9, %xmm3
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm0, %xmm6, %xmm6
vandps %xmm3, %xmm10, %xmm3
vandps %xmm0, %xmm10, %xmm0
vcmpltps %xmm0, %xmm3, %xmm0
vblendvps %xmm0, %xmm2, %xmm6, %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm2
vmulps 0x30(%rsp), %xmm1, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps 0x80(%rsp), %xmm5, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm2, %xmm3
vmulps 0x1c0(%rsp), %xmm0, %xmm2
vmulps 0x90(%rsp), %xmm1, %xmm6
vaddps %xmm2, %xmm6, %xmm2
vmulps 0xa0(%rsp), %xmm5, %xmm6
vaddps %xmm2, %xmm6, %xmm2
vaddps %xmm2, %xmm2, %xmm2
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm7
vbroadcastss 0x1ce3815(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm2, %xmm2
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm2, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm2, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovups 0x120(%rsp), %ymm7
vcmpneqps 0x1ce2ad5(%rip), %xmm3, %xmm3 # 0x1eeba10
vandps %xmm6, %xmm3, %xmm6
vandps 0x110(%rsp), %xmm4, %xmm3
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm3, %xmm6
je 0x209285
addq %r12, 0x10(%rsp)
vandps %xmm3, %xmm6, %xmm3
vmovaps 0x160(%rsp), %xmm6
vmovaps %xmm6, 0x1d0(%rsp)
vmovaps 0x150(%rsp), %xmm4
vmovaps %xmm4, 0x1e0(%rsp)
vmovaps %xmm13, 0x1f0(%rsp)
movq %rax, 0x200(%rsp)
vmovaps %xmm3, 0x210(%rsp)
vmovaps %xmm2, 0x240(%rsp)
vmovaps %xmm5, 0x250(%rsp)
vmovaps %xmm1, 0x260(%rsp)
vmovaps %xmm0, 0x270(%rsp)
movq (%rdx), %rax
movq %rax, 0x80(%rsp)
vrcpps %xmm13, %xmm0
vmulps %xmm0, %xmm13, %xmm1
vbroadcastss 0x1ce372f(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ce7fee(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x140(%rsp), %xmm5
vcmpnltps %xmm1, %xmm5, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovmskps %xmm3, %ebp
movq %r14, 0x78(%rsp)
bsfq %rbp, %r15
movq 0x10(%rsp), %rax
movl 0x120(%rax,%r15,4), %eax
movq 0x80(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r13)
je 0x209084
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
movl $0x0, %ecx
jne 0x20909e
cmpq $0x0, 0x48(%r13)
jne 0x20909e
xorl %eax, %eax
movq 0x78(%rsp), %r14
jmp 0x20908c
btcq %r15, %rbp
movb $0x1, %al
xorl %ecx, %ecx
testb %al, %al
je 0x2092ad
testq %rbp, %rbp
jne 0x209037
jmp 0x209285
movq %rdi, 0x30(%rsp)
vmovss 0x220(%rsp,%r15,4), %xmm0
vmovss 0x230(%rsp,%r15,4), %xmm1
movq 0x8(%rdx), %rcx
movq 0x10(%rsp), %rdi
movl 0x130(%rdi,%r15,4), %edi
vmovss 0x250(%rsp,%r15,4), %xmm2
vmovss 0x260(%rsp,%r15,4), %xmm3
vmovss 0x270(%rsp,%r15,4), %xmm4
vmovss %xmm2, 0xb0(%rsp)
vmovss %xmm3, 0xb4(%rsp)
vmovss %xmm4, 0xb8(%rsp)
vmovss %xmm0, 0xbc(%rsp)
vmovss %xmm1, 0xc0(%rsp)
movl %edi, 0xc4(%rsp)
movl %eax, 0xc8(%rsp)
movl (%rcx), %eax
movl %eax, 0xcc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xd0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x240(%rsp,%r15,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xe0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xe8(%rsp)
movq %rcx, 0xf0(%rsp)
movq %rsi, 0xf8(%rsp)
leaq 0xb0(%rsp), %rax
movq %rax, 0x100(%rsp)
movl $0x1, 0x108(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r8, 0x58(%rsp)
movq %r9, 0x50(%rsp)
movq %r10, 0x48(%rsp)
movq %r11, 0x40(%rsp)
je 0x20920b
leaq 0xe0(%rsp), %rdi
movq %rdx, 0xa0(%rsp)
movq %rsi, 0x90(%rsp)
vzeroupper
callq *%rax
movq 0x40(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x50(%rsp), %r9
vmovups 0x120(%rsp), %ymm7
movq 0x58(%rsp), %r8
movq 0x90(%rsp), %rsi
movq 0xa0(%rsp), %rdx
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x209268
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x209264
testb $0x2, (%r14)
jne 0x209221
testb $0x40, 0x3e(%r13)
je 0x209257
leaq 0xe0(%rsp), %rdi
movq %rdx, %r14
movq %rsi, %r13
vzeroupper
callq *%rax
movq 0x40(%rsp), %r11
movq 0x48(%rsp), %r10
movq 0x50(%rsp), %r9
vmovups 0x120(%rsp), %ymm7
movq 0x58(%rsp), %r8
movq %r13, %rsi
movq %r14, %rdx
movq 0xe0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x209268
xorl %eax, %eax
jmp 0x209279
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r15, %rbp
movb $0x1, %al
xorl %ecx, %ecx
movq 0x30(%rsp), %rdi
jmp 0x20907d
incq %r14
cmpq %rdi, %r14
setb %al
leaq 0x3c0(%rsp), %rbp
jne 0x208ad5
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
jmp 0x2092d5
testb $0x1, 0xe(%rsp)
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
movq 0x60(%rsp), %r13
leaq 0x3c0(%rsp), %rbp
je 0x2092d5
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rcx
cmpl $0x3, %ecx
jne 0x2088ac
addq $0x1568, %rsp # imm = 0x1568
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x15a8, %rsp # imm = 0x15A8
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20a1a0
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x20a1a0
leaq 0x408(%rsp), %r8
movq 0x70(%rax), %rax
movq %rax, -0x8(%r8)
vxorps %xmm6, %xmm6, %xmm6
vmaxss 0xc(%rsi), %xmm6, %xmm1
vmovaps 0x10(%rsi), %xmm2
vbroadcastss 0x1d17b77(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1ce7c8e(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1ce33ac(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d17beb(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d16b8c(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d16b83(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm4
vmovups %ymm4, 0x3e0(%rsp)
vbroadcastss 0x8(%rsi), %ymm4
vmovups %ymm4, 0x3c0(%rsp)
xorl %r9d, %r9d
vucomiss %xmm6, %xmm3
setb %r9b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x3a0(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vmovups %ymm5, 0x380(%rsp)
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x360(%rsp)
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x340(%rsp)
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovups %ymm3, 0x320(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmovups %ymm2, 0x300(%rsp)
shll $0x5, %r9d
xorl %r10d, %r10d
vucomiss %xmm6, %xmm4
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r11d, %r11d
vucomiss %xmm6, %xmm5
setb %r11b
shll $0x5, %r11d
orq $0x80, %r11
movq %r9, %r14
xorq $0x20, %r14
movq %r10, %r15
xorq $0x20, %r15
movq %r11, %r13
xorq $0x20, %r13
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovdqu %ymm1, 0x2e0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x2c0(%rsp)
leaq 0x1f46acb(%rip), %rax # 0x214ff80
vmovdqa 0xf0(%rax), %xmm0
vmovdqa %xmm0, 0x150(%rsp)
movq %rdx, 0x48(%rsp)
movq %rsi, 0x40(%rsp)
vmovups %ymm7, 0x160(%rsp)
movq %r9, 0xb0(%rsp)
movq %r10, 0xa8(%rsp)
movq %r11, 0xa0(%rsp)
movq %r14, 0x98(%rsp)
movq %r15, 0x90(%rsp)
movq %r13, 0x88(%rsp)
leaq 0x400(%rsp), %rax
cmpq %rax, %r8
je 0x20a1a0
movq -0x8(%r8), %r12
addq $-0x8, %r8
testb $0x8, %r12b
jne 0x209653
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r9), %ymm0, %ymm1
vaddps 0x40(%rax,%r9), %ymm1, %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps 0x3a0(%rsp), %ymm1, %ymm1
vmulps 0x100(%rax,%r10), %ymm0, %ymm2
vaddps 0x40(%rax,%r10), %ymm2, %ymm2
vmovups 0x2e0(%rsp), %ymm3
vmaxps %ymm1, %ymm3, %ymm1
vmovups 0x3e0(%rsp), %ymm4
vsubps %ymm4, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmulps 0x380(%rsp), %ymm2, %ymm2
vaddps 0x40(%rax,%r11), %ymm3, %ymm3
vmovups 0x3c0(%rsp), %ymm5
vsubps %ymm5, %ymm3, %ymm3
vmulps 0x360(%rsp), %ymm3, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r14), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r14), %ymm3, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x340(%rsp), %ymm2, %ymm2
vmovups 0x2c0(%rsp), %ymm3
vminps %ymm2, %ymm3, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm4, %ymm3, %ymm3
vmulps 0x320(%rsp), %ymm3, %ymm3
vmulps 0x100(%rax,%r13), %ymm0, %ymm4
vaddps 0x40(%rax,%r13), %ymm4, %ymm4
vsubps %ymm5, %ymm4, %ymm4
vmulps 0x300(%rsp), %ymm4, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x20969b
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %ebx
testb $0x8, %r12b
jne 0x209697
testq %rbx, %rbx
je 0x2096c8
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdi
xorl %ebp, %ebp
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdi
jne 0x2096cd
movq %rax, %r12
testl %ebp, %ebp
je 0x209522
jmp 0x209713
pushq $0x6
jmp 0x2096ca
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x209643
pushq $0x4
popq %rbp
jmp 0x20968d
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x20970b
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x2096dc
movq %rcx, %r12
jmp 0x20968d
cmpl $0x6, %ebp
jne 0x20a197
movl %r12d, %eax
andl $0xf, %eax
xorl %ebp, %ebp
addq $-0x8, %rax
setne %cl
je 0x20a197
movq %r8, 0xb8(%rsp)
andq $-0x10, %r12
movq (%rdx), %r8
xorl %edi, %edi
movq %rax, 0xe0(%rsp)
movq %r8, 0x50(%rsp)
movb %cl, 0xe(%rsp)
movq %rdi, 0xe8(%rsp)
imulq $0x50, %rdi, %rdi
vmovss 0x1c(%rsi), %xmm0
movl 0x30(%r12,%rdi), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %rax
vmovss 0x28(%rax), %xmm1
vmovss 0x2c(%rax), %xmm2
vmovss 0x30(%rax), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm6
vroundss $0x9, %xmm6, %xmm6, %xmm0
vaddss 0x1ce722b(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmaxss %xmm0, %xmm1, %xmm2
vcvttss2si %xmm2, %ecx
movslq %ecx, %rcx
movq 0xe0(%rax), %rdx
imulq $0x38, %rcx, %rcx
movl (%r12,%rdi), %r13d
movl 0x4(%r12,%rdi), %r15d
movq (%rdx,%rcx), %rax
movq 0x38(%rdx,%rcx), %rcx
vmovups (%rax,%r13,4), %xmm4
movl 0x10(%r12,%rdi), %esi
vmovups (%rax,%rsi,4), %xmm5
movl 0x20(%r12,%rdi), %edx
movq %rdx, 0x30(%rsp)
vmovups (%rax,%rdx,4), %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmovups (%rax,%r15,4), %xmm7
movl 0x14(%r12,%rdi), %edx
vmovups (%rax,%rdx,4), %xmm8
movl 0x24(%r12,%rdi), %r8d
movq %r8, 0x70(%rsp)
vmovups (%rax,%r8,4), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movl 0x8(%r12,%rdi), %r14d
vmovups (%rax,%r14,4), %xmm9
movl 0x18(%r12,%rdi), %r10d
vmovups (%rax,%r10,4), %xmm11
movl 0x28(%r12,%rdi), %r8d
vmovups (%rax,%r8,4), %xmm10
movl 0xc(%r12,%rdi), %ebp
vmovups (%rax,%rbp,4), %xmm1
movl 0x1c(%r12,%rdi), %r11d
vmovups (%rax,%r11,4), %xmm14
movl 0x2c(%r12,%rdi), %r9d
vmovups (%rax,%r9,4), %xmm12
vmovups (%rcx,%r13,4), %xmm15
vmovups (%rcx,%rsi,4), %xmm13
vsubss %xmm2, %xmm6, %xmm6
vunpcklps %xmm9, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm9[0],xmm4[1],xmm9[1]
vunpckhps %xmm9, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm9[2],xmm4[3],xmm9[3]
vunpcklps %xmm1, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
vunpckhps %xmm1, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm1[2],xmm7[3],xmm1[3]
vmovups (%rcx,%r15,4), %xmm3
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovaps %xmm1, 0xc0(%rsp)
vunpcklps %xmm0, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm7 # xmm7 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vunpcklps %xmm11, %xmm5, %xmm0 # xmm0 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm1 # xmm1 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vunpcklps %xmm14, %xmm8, %xmm2 # xmm2 = xmm8[0],xmm14[0],xmm8[1],xmm14[1]
vunpckhps %xmm14, %xmm8, %xmm5 # xmm5 = xmm8[2],xmm14[2],xmm8[3],xmm14[3]
vmovups (%rcx,%r14,4), %xmm11
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vmovaps %xmm1, 0x60(%rsp)
vunpcklps %xmm2, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm5 # xmm5 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps 0x20(%rsp), %xmm1
vunpcklps %xmm10, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
vunpckhps %xmm10, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm10[2],xmm1[3],xmm10[3]
vmovaps 0x10(%rsp), %xmm4
vunpcklps %xmm12, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm12[0],xmm4[1],xmm12[1]
vunpckhps %xmm12, %xmm4, %xmm10 # xmm10 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
vmovups (%rcx,%rbp,4), %xmm12
vunpcklps %xmm10, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm10[0],xmm1[1],xmm10[1]
vmovaps %xmm1, 0x20(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0xd0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x10(%rsp)
vunpcklps %xmm11, %xmm15, %xmm10 # xmm10 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm0 # xmm0 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm12, %xmm3, %xmm11 # xmm11 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vmovups (%rcx,%r10,4), %xmm12
vunpcklps %xmm3, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm11, %xmm10, %xmm3 # xmm3 = xmm10[0],xmm11[0],xmm10[1],xmm11[1]
vunpckhps %xmm11, %xmm10, %xmm1 # xmm1 = xmm10[2],xmm11[2],xmm10[3],xmm11[3]
vunpcklps %xmm12, %xmm13, %xmm11 # xmm11 = xmm13[0],xmm12[0],xmm13[1],xmm12[1]
vunpckhps %xmm12, %xmm13, %xmm10 # xmm10 = xmm13[2],xmm12[2],xmm13[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vmovups (%rcx,%r11,4), %xmm13
vunpcklps %xmm13, %xmm12, %xmm14 # xmm14 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
vunpcklps %xmm14, %xmm11, %xmm0 # xmm0 = xmm11[0],xmm14[0],xmm11[1],xmm14[1]
vunpckhps %xmm14, %xmm11, %xmm2 # xmm2 = xmm11[2],xmm14[2],xmm11[3],xmm14[3]
movq 0x30(%rsp), %rax
vmovups (%rcx,%rax,4), %xmm11
vmovups (%rcx,%r8,4), %xmm12
movq 0x50(%rsp), %r8
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
vunpcklps %xmm12, %xmm11, %xmm14 # xmm14 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpckhps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
movq 0x70(%rsp), %rax
vmovups (%rcx,%rax,4), %xmm12
vmovups (%rcx,%r9,4), %xmm13
vunpcklps %xmm13, %xmm12, %xmm15 # xmm15 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpcklps %xmm15, %xmm14, %xmm13 # xmm13 = xmm14[0],xmm15[0],xmm14[1],xmm15[1]
vunpckhps %xmm15, %xmm14, %xmm14 # xmm14 = xmm14[2],xmm15[2],xmm14[3],xmm15[3]
vshufps $0x0, %xmm6, %xmm6, %xmm15 # xmm15 = xmm6[0,0,0,0]
vmovss 0x1ce2d53(%rip), %xmm12 # 0x1eec714
vsubss %xmm6, %xmm12, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm12 # xmm12 = xmm6[0,0,0,0]
vmulps %xmm3, %xmm15, %xmm3
vmulps %xmm9, %xmm12, %xmm6
vaddps %xmm3, %xmm6, %xmm6
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm7, %xmm12, %xmm3
vaddps %xmm1, %xmm3, %xmm7
vmulps %xmm4, %xmm15, %xmm1
vmulps 0xc0(%rsp), %xmm12, %xmm3
vaddps %xmm1, %xmm3, %xmm4
vmulps %xmm0, %xmm15, %xmm0
vmulps %xmm8, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm8
vmulps %xmm2, %xmm15, %xmm0
vmulps %xmm5, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm5
vmovaps 0x30(%r12,%rdi), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x40(%r12,%rdi), %xmm0
movb 0xe(%rsp), %dil
vmulps %xmm10, %xmm15, %xmm1
vmulps 0x60(%rsp), %xmm12, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm13, %xmm15, %xmm2
vmulps %xmm14, %xmm15, %xmm3
vmulps %xmm11, %xmm15, %xmm9
vmulps 0xd0(%rsp), %xmm12, %xmm10
vaddps %xmm2, %xmm10, %xmm2
vmulps 0x10(%rsp), %xmm12, %xmm10
vaddps %xmm3, %xmm10, %xmm15
vmulps 0x20(%rsp), %xmm12, %xmm3
vaddps %xmm3, %xmm9, %xmm9
vmovaps %xmm0, 0x1f0(%rsp)
vbroadcastss (%rsi), %xmm12
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm12, %xmm6, %xmm10
vsubps %xmm13, %xmm7, %xmm3
vsubps %xmm14, %xmm4, %xmm7
vsubps %xmm12, %xmm8, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vsubps %xmm13, %xmm5, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vsubps %xmm14, %xmm1, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vsubps %xmm12, %xmm2, %xmm12
vsubps %xmm13, %xmm15, %xmm2
vmovaps %xmm2, 0x60(%rsp)
vsubps %xmm14, %xmm9, %xmm1
vmovaps %xmm1, 0x70(%rsp)
vsubps %xmm10, %xmm12, %xmm14
vsubps %xmm3, %xmm2, %xmm15
vsubps %xmm7, %xmm1, %xmm0
vaddps %xmm3, %xmm2, %xmm4
vaddps %xmm7, %xmm1, %xmm5
vmulps %xmm4, %xmm0, %xmm8
vmulps %xmm5, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm10, %xmm12, %xmm8
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm0, 0x1b0(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm5
vmovaps %xmm15, 0x1c0(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x1d0(%rsp)
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vbroadcastss 0x18(%rsi), %xmm6
vmulps %xmm4, %xmm6, %xmm4
vbroadcastss 0x14(%rsi), %xmm15
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vbroadcastss 0x10(%rsi), %xmm11
vmulps %xmm13, %xmm11, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x10(%rsp), %xmm2
vsubps %xmm2, %xmm3, %xmm0
vmovaps 0x20(%rsp), %xmm13
vsubps %xmm13, %xmm7, %xmm14
vmovaps %xmm3, 0xc0(%rsp)
vaddps %xmm2, %xmm3, %xmm4
vmovaps %xmm7, 0x1e0(%rsp)
vaddps %xmm7, %xmm13, %xmm5
vmulps %xmm4, %xmm14, %xmm7
vmulps %xmm5, %xmm0, %xmm3
vsubps %xmm7, %xmm3, %xmm3
vmovaps 0x30(%rsp), %xmm8
vsubps %xmm8, %xmm10, %xmm1
vmulps %xmm5, %xmm1, %xmm5
vmovaps %xmm10, 0xd0(%rsp)
vaddps %xmm8, %xmm10, %xmm7
vmovaps %xmm8, %xmm10
vmovaps %xmm14, 0x190(%rsp)
vmulps %xmm7, %xmm14, %xmm8
vmovaps %xmm1, %xmm14
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm0, 0x1a0(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vmulps %xmm4, %xmm1, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm3, %xmm11, %xmm3
vaddps %xmm4, %xmm3, %xmm4
vsubps %xmm12, %xmm10, %xmm5
vaddps %xmm12, %xmm10, %xmm3
vmovaps 0x60(%rsp), %xmm0
vsubps %xmm0, %xmm2, %xmm12
vaddps %xmm0, %xmm2, %xmm2
vmovaps 0x70(%rsp), %xmm0
vsubps %xmm0, %xmm13, %xmm8
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm2, %xmm8, %xmm1
vmulps %xmm0, %xmm12, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm3, %xmm8, %xmm7
vsubps %xmm0, %xmm7, %xmm0
vmovups 0x160(%rsp), %ymm7
vmulps %xmm3, %xmm12, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm6, 0x30(%rsp)
vmulps %xmm2, %xmm6, %xmm2
vmovaps %xmm15, 0x70(%rsp)
vmulps %xmm0, %xmm15, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm11, 0x60(%rsp)
vmulps %xmm1, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm4, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm11
vminps %xmm4, %xmm9, %xmm1
vminps %xmm0, %xmm1, %xmm1
vbroadcastss 0x1d17247(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm11, %xmm3
vbroadcastss 0x1d17242(%rip), %xmm2 # 0x1f20ecc
vmovaps %xmm3, 0x10(%rsp)
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1d17223(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm2, %xmm3
vcmpnltps %xmm3, %xmm1, %xmm1
vmovaps %xmm9, 0x20(%rsp)
vmaxps %xmm4, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm2, %xmm0, %xmm0
vorps %xmm0, %xmm1, %xmm0
movb $0x0, 0xf(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x240(%rsp)
vtestps 0x150(%rsp), %xmm0
je 0x20a0f4
vmovaps 0x1b0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm10
vmovaps %xmm0, 0x180(%rsp)
vmulps %xmm10, %xmm9, %xmm0
vmovaps 0x1c0(%rsp), %xmm15
vmovaps 0x190(%rsp), %xmm6
vmulps %xmm6, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm6, %xmm12, %xmm2
vmulps %xmm8, %xmm10, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d17195(%rip), %xmm7 # 0x1f20ec4
vandps %xmm7, %xmm0, %xmm0
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm0
vmulps %xmm8, %xmm14, %xmm1
vmulps %xmm14, %xmm9, %xmm2
vmovaps 0x1d0(%rsp), %xmm9
vmulps %xmm6, %xmm9, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm5, %xmm6, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm7, %xmm3, %xmm3
vandps %xmm7, %xmm1, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmulps %xmm5, %xmm10, %xmm2
vmulps %xmm10, %xmm9, %xmm3
vmulps %xmm14, %xmm15, %xmm5
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm7, %xmm5, %xmm5
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps 0x30(%rsp), %xmm2, %xmm3
vmulps 0x70(%rsp), %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vmulps 0x60(%rsp), %xmm0, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x1e0(%rsp), %xmm2, %xmm3
vmulps 0xc0(%rsp), %xmm1, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps 0xd0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1ce2918(%rip), %xmm9 # 0x1eec714
vsubps %xmm7, %xmm9, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vmovups 0x160(%rsp), %ymm7
vcmpneqps 0x1ce1bd8(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x180(%rsp), %xmm5
vandps 0x150(%rsp), %xmm5, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x20a0f4
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x20(%rsp), %xmm8
vmovaps %xmm8, 0x210(%rsp)
vmovaps %xmm4, 0x220(%rsp)
vmovaps %xmm11, 0x230(%rsp)
movq %rax, 0x240(%rsp)
vmovaps %xmm5, 0x250(%rsp)
vmovaps %xmm3, 0x280(%rsp)
vmovaps %xmm0, 0x290(%rsp)
vmovaps %xmm1, 0x2a0(%rsp)
vmovaps %xmm2, 0x2b0(%rsp)
vrcpps %xmm11, %xmm0
vmulps %xmm0, %xmm11, %xmm1
vbroadcastss 0x1ce2845(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ce7104(%rip), %xmm1 # 0x1ef0fe8
vmovaps 0x10(%rsp), %xmm3
vcmpnltps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm8, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x260(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovmskps %xmm5, %r13d
bsfq %r13, %rbp
movl 0x200(%rsp,%rbp,4), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %r15
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r15)
je 0x209f4e
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x209f68
cmpq $0x0, 0x48(%r15)
jne 0x209f68
xorl %eax, %eax
jmp 0x209f54
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
testb %al, %al
je 0x20a14f
testq %r13, %r13
jne 0x209f19
jmp 0x20a0f4
vmovss 0x260(%rsp,%rbp,4), %xmm0
vmovss 0x270(%rsp,%rbp,4), %xmm1
movq 0x8(%rdx), %rcx
movl 0x1f0(%rsp,%rbp,4), %edi
vmovss 0x290(%rsp,%rbp,4), %xmm2
vmovss 0x2a0(%rsp,%rbp,4), %xmm3
vmovss 0x2b0(%rsp,%rbp,4), %xmm4
vmovss %xmm2, 0xf0(%rsp)
vmovss %xmm3, 0xf4(%rsp)
vmovss %xmm4, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl %edi, 0x104(%rsp)
movl %eax, 0x108(%rsp)
movl (%rcx), %eax
movl %eax, 0x10c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x110(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x30(%rsp)
vmovss 0x280(%rsp,%rbp,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x5c(%rsp)
leaq 0x5c(%rsp), %rax
movq %rax, 0x120(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x128(%rsp)
movq %rcx, 0x130(%rsp)
movq %rsi, 0x138(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0x140(%rsp)
movl $0x1, 0x148(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x20a08b
leaq 0x120(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x50(%rsp), %r8
vmovups 0x160(%rsp), %ymm7
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x120(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20a0d7
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x20a0d3
testb $0x2, (%r14)
jne 0x20a0a1
testb $0x40, 0x3e(%r15)
je 0x20a0c6
leaq 0x120(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x50(%rsp), %r8
vmovups 0x160(%rsp), %ymm7
movq 0x40(%rsp), %rsi
movq 0x48(%rsp), %rdx
movq 0x120(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20a0d7
xorl %eax, %eax
jmp 0x20a0e8
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %rbp, %r13
movb $0x1, %al
xorl %ebp, %ebp
movb 0xe(%rsp), %dil
jmp 0x209f56
movq 0xe8(%rsp), %rdi
incq %rdi
movq 0xe0(%rsp), %rax
cmpq %rax, %rdi
setb %cl
jne 0x20974f
movq 0xb8(%rsp), %r8
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r10
movq 0xa0(%rsp), %r11
movq 0x98(%rsp), %r14
movq 0x90(%rsp), %r15
movq 0x88(%rsp), %r13
xorl %ebp, %ebp
jmp 0x20a197
testb $0x1, %dil
movq 0xb8(%rsp), %r8
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r10
movq 0xa0(%rsp), %r11
movq 0x98(%rsp), %r14
movq 0x90(%rsp), %r15
movq 0x88(%rsp), %r13
je 0x20a197
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rbp
cmpl $0x3, %ebp
jne 0x209509
addq $0x15a8, %rsp # imm = 0x15A8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::QuadMvIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1640, %rsp # imm = 0x1640
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x20a1ea
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x20a1d8
leaq 0x488(%rsp), %rdi
vmovaps 0x10(%r14), %xmm3
vmaxss 0xc(%r14), %xmm2, %xmm1
vbroadcastss 0x1d16cab(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce6dc2(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq 0x70(%rax), %rax
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1ce24ce(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %ymm6
vbroadcastss 0x4(%r14), %ymm7
vbroadcastss 0x8(%r14), %ymm8
vaddps %xmm3, %xmm4, %xmm3
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
movq %rax, -0x8(%rdi)
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rcx
xorq $0x20, %rcx
movq %r10, %r15
xorq $0x20, %r15
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x1f45c85(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x260(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x480(%rsp), %rbx
vbroadcastss 0x1ce66a8(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1ce23e7(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x2a0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps %ymm6, 0x240(%rsp)
vmovaps %ymm7, 0x220(%rsp)
vmovaps %ymm8, 0x200(%rsp)
vmovaps %ymm9, 0x1e0(%rsp)
vmovaps %ymm10, 0x1c0(%rsp)
vmovaps %ymm3, 0x1a0(%rsp)
movq %r15, 0x38(%rsp)
vmovaps %ymm4, 0x180(%rsp)
vmovaps %ymm5, 0x160(%rsp)
cmpq %rbx, %rdi
je 0x20a1d8
movq -0x8(%rdi), %r13
addq $-0x8, %rdi
testb $0x8, %r13b
jne 0x20a426
vmovaps 0x40(%r13,%r8), %ymm0
vsubps %ymm6, %ymm0, %ymm0
vmulps %ymm0, %ymm9, %ymm0
vmovaps 0x40(%r13,%r9), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r13,%r10), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r13,%r11), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%r13,%rcx), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r13,%r15), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %r13b
jne 0x20a46c
testq %r12, %r12
je 0x20a470
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rsi
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rsi
jne 0x20a475
movq %rax, %r13
xorl %esi, %esi
testl %esi, %esi
je 0x20a3a3
jmp 0x20a4d5
pushq $0x6
jmp 0x20a472
pushq $0x4
popq %rsi
jmp 0x20a462
movq %rdx, 0xc0(%rsp)
movq %rbx, %rdx
movq %rcx, %rbx
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rsi, %rax
je 0x20a4c2
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x20a492
movq %rcx, %r13
movq %rbx, %rcx
movq %rdx, %rbx
movq 0xc0(%rsp), %rdx
jmp 0x20a460
cmpl $0x6, %esi
jne 0x20ab4a
movl %r13d, %eax
andl $0xf, %eax
xorl %esi, %esi
addq $-0x8, %rax
movq %rax, 0x98(%rsp)
setne %bl
je 0x20ab42
andq $-0x10, %r13
xorl %r15d, %r15d
imulq $0xe0, %r15, %rax
vbroadcastf128 0xd0(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vbroadcastf128 0xc0(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vmovaps %ymm1, 0x2e0(%rsp)
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps (%r13,%rax), %xmm0
vmovaps 0x10(%r13,%rax), %xmm1
vmovaps 0x20(%r13,%rax), %xmm2
vinsertf128 $0x1, 0x60(%r13,%rax), %ymm0, %ymm3
vinsertf128 $0x1, 0x70(%r13,%rax), %ymm1, %ymm5
vinsertf128 $0x1, 0x80(%r13,%rax), %ymm2, %ymm7
vbroadcastf128 0x30(%r13,%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vbroadcastf128 0x40(%r13,%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vbroadcastf128 0x50(%r13,%rax), %ymm2 # ymm2 = mem[0,1,0,1]
vbroadcastf128 0x90(%r13,%rax), %ymm9 # ymm9 = mem[0,1,0,1]
vbroadcastf128 0xa0(%r13,%rax), %ymm10 # ymm10 = mem[0,1,0,1]
vbroadcastf128 0xb0(%r13,%rax), %ymm11 # ymm11 = mem[0,1,0,1]
vsubps %ymm0, %ymm3, %ymm4
vsubps %ymm1, %ymm5, %ymm12
vmovaps %ymm12, 0x40(%rsp)
vsubps %ymm2, %ymm7, %ymm8
vsubps %ymm3, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm11, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm4, 0xc0(%rsp)
vmulps %ymm4, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm4, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm4
vbroadcastss (%r14), %ymm12
vbroadcastss 0x4(%r14), %ymm13
vbroadcastss 0x8(%r14), %ymm14
vbroadcastss 0x14(%r14), %ymm15
vsubps %ymm12, %ymm3, %ymm3
vbroadcastss 0x18(%r14), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%r14), %ymm14
vmulps %ymm3, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmulps %ymm3, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm4, 0xa0(%rsp)
vmulps %ymm4, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x140(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm12
vmovaps %ymm4, %ymm14
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x40(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d16823(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0xc0(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d167ef(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x260(%rsp), %ymm10
jne 0x20a714
incq %r15
cmpq 0x98(%rsp), %r15
setb %bl
jne 0x20a502
jmp 0x20aaf5
vandps 0x260(%rsp), %ymm10, %ymm10
vmulps 0xa0(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm3, %ymm14, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%r14), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%r14), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x20a6fb
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm8, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
vmovaps %ymm4, 0x360(%rsp)
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps 0x320(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x2a0(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x300(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x280(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x420(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x440(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0xa0(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x460(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1ce1ef9(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x340(%rsp), %ymm1, %ymm3
movq (%rdx), %rax
movq %rax, 0x90(%rsp)
vmovaps %ymm3, 0x400(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x3c0(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovmskps %ymm0, %eax
movq %rcx, 0x20(%rsp)
movq %rax, 0x40(%rsp)
bsfq %rax, %rcx
movq %rcx, 0xa0(%rsp)
movl 0x2e0(%rsp,%rcx,4), %eax
movq 0x90(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x18(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x140(%rsp)
testl %ecx, 0x34(%rax)
je 0x20a8d9
movq 0x10(%rdx), %rax
movq %rax, 0x30(%rsp)
cmpq $0x0, 0x10(%rax)
movq 0x20(%rsp), %rcx
jne 0x20a911
movq 0x140(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x20a911
xorl %eax, %eax
jmp 0x20a8f6
movq 0xa0(%rsp), %rax
movq 0x40(%rsp), %rcx
btcq %rax, %rcx
movq %rcx, 0x40(%rsp)
movb $0x1, %al
movq 0x20(%rsp), %rcx
testb %al, %al
je 0x20ab58
movq 0x40(%rsp), %rax
testq %rax, %rax
jne 0x20a870
jmp 0x20a6fb
movq %r11, 0x68(%rsp)
movq %r10, 0x70(%rsp)
movq %r9, 0x78(%rsp)
movq %r8, 0x80(%rsp)
movq %rdi, 0x88(%rsp)
movq 0xa0(%rsp), %rsi
vmovss 0x3c0(%rsp,%rsi,4), %xmm0
vmovss 0x3e0(%rsp,%rsi,4), %xmm1
movq %rdx, 0xc0(%rsp)
movq 0x8(%rdx), %rcx
movl 0x2c0(%rsp,%rsi,4), %edx
vmovss 0x420(%rsp,%rsi,4), %xmm2
vmovss 0x440(%rsp,%rsi,4), %xmm3
vmovss 0x460(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xe0(%rsp)
vmovss %xmm3, 0xe4(%rsp)
vmovss %xmm4, 0xe8(%rsp)
vmovss %xmm0, 0xec(%rsp)
vmovss %xmm1, 0xf0(%rsp)
movl %edx, 0xf4(%rsp)
movq 0x18(%rsp), %rax
movl %eax, 0xf8(%rsp)
movl (%rcx), %eax
movl %eax, 0xfc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x100(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x18(%rsp)
vmovss 0x400(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x140(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq %r14, 0x128(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x20aa5a
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20aa9e
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x20aa9a
movq 0x30(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x20aa80
movq 0x140(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x20aa8d
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20aa9e
xorl %eax, %eax
jmp 0x20aac2
vmovss 0x18(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x40(%rsp), %rax
movq 0xa0(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x40(%rsp)
movb $0x1, %al
movq 0xc0(%rsp), %rdx
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r8
movq 0x78(%rsp), %r9
movq 0x70(%rsp), %r10
movq 0x68(%rsp), %r11
movq 0x20(%rsp), %rcx
xorl %esi, %esi
jmp 0x20a8f6
vmovaps 0x240(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm7
vmovaps 0x200(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm10
vmovaps 0x1a0(%rsp), %ymm3
movq 0x38(%rsp), %r15
vmovaps 0x180(%rsp), %ymm4
vmovaps 0x160(%rsp), %ymm5
leaq 0x480(%rsp), %rbx
cmpl $0x3, %esi
jne 0x20a392
jmp 0x20a1d8
testb $0x1, %bl
vmovaps 0x240(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm7
vmovaps 0x200(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm10
vmovaps 0x1a0(%rsp), %ymm3
movq 0x38(%rsp), %r15
vmovaps 0x180(%rsp), %ymm4
vmovaps 0x160(%rsp), %ymm5
leaq 0x480(%rsp), %rbx
je 0x20ab4a
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rsi
jmp 0x20ab4a
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1640, %rsp # imm = 0x1640
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x20abf4
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x20abe2
movq %rdx, %r8
leaq 0x488(%rsp), %r9
vmovaps 0x10(%r14), %xmm3
vmaxss 0xc(%r14), %xmm2, %xmm1
vbroadcastss 0x1d1629e(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce63b5(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq 0x70(%rax), %rax
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1ce1ac1(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %ymm6
vbroadcastss 0x4(%r14), %ymm7
vbroadcastss 0x8(%r14), %ymm8
vaddps %xmm3, %xmm4, %xmm3
xorl %r10d, %r10d
vucomiss %xmm2, %xmm3
movq %rax, -0x8(%r9)
setb %r10b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
shll $0x5, %r10d
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
setb %r11b
shll $0x5, %r11d
orq $0x40, %r11
xorl %esi, %esi
vucomiss %xmm2, %xmm5
setb %sil
shll $0x5, %esi
orq $0x80, %rsi
movq %r10, %rdi
xorq $0x20, %rdi
movq %r11, %r15
xorq $0x20, %r15
movq %rsi, %rcx
xorq $0x20, %rcx
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x1f4527a(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x260(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x480(%rsp), %rbx
vbroadcastss 0x1ce5c9d(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1ce19dc(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x2a0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vmovaps %ymm6, 0x240(%rsp)
vmovaps %ymm7, 0x220(%rsp)
vmovaps %ymm8, 0x200(%rsp)
vmovaps %ymm9, 0x1e0(%rsp)
vmovaps %ymm10, 0x1c0(%rsp)
vmovaps %ymm3, 0x1a0(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
movq %r15, 0x20(%rsp)
movq %rcx, 0x18(%rsp)
vmovaps %ymm4, 0x180(%rsp)
vmovaps %ymm5, 0x160(%rsp)
cmpq %rbx, %r9
je 0x20abe2
movq -0x8(%r9), %r13
addq $-0x8, %r9
testb $0x8, %r13b
jne 0x20ae40
vmovaps 0x40(%r13,%r10), %ymm0
vsubps %ymm6, %ymm0, %ymm0
vmulps %ymm0, %ymm9, %ymm0
vmovaps 0x40(%r13,%r11), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r13,%rsi), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r13,%rdi), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%r13,%r15), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r13,%rcx), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %r13b
jne 0x20ae86
testq %r12, %r12
je 0x20ae8a
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rdx
jne 0x20ae8f
movq %rax, %r13
xorl %eax, %eax
testl %eax, %eax
je 0x20adbd
jmp 0x20aee1
pushq $0x6
jmp 0x20ae8c
pushq $0x4
popq %rax
jmp 0x20ae7c
movq %rcx, %rbx
movq %rax, (%r9)
addq $0x8, %r9
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x20aed1
movq %rcx, (%r9)
addq $0x8, %r9
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x20aea1
movq %rcx, %r13
movq %rbx, %rcx
leaq 0x480(%rsp), %rbx
jmp 0x20ae7a
cmpl $0x6, %eax
jne 0x20b648
movl %r13d, %edx
andl $0xf, %edx
xorl %eax, %eax
addq $-0x8, %rdx
movq %rdx, 0x78(%rsp)
setne %bl
je 0x20b640
andq $-0x10, %r13
movq (%r8), %rax
movq %rax, 0x38(%rsp)
xorl %r15d, %r15d
imulq $0x60, %r15, %rax
prefetcht0 (%r13,%rax)
prefetcht0 0x40(%r13,%rax)
movl 0x40(%r13,%rax), %edx
movq 0x38(%rsp), %rcx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%r13,%rax), %esi
movl 0x4(%r13,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x10(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x20(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x30(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x44(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm7
movl 0x14(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x34(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x48(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm10
movl 0x18(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm11
movl 0x28(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x38(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x4c(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm12
movl 0x1c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm13
movl 0x2c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm14
movl 0x3c(%r13,%rax), %edx
vunpcklps %xmm10, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm12, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
vunpckhps %xmm12, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vmovaps %ymm4, 0x40(%rsp)
vunpcklps %xmm10, %xmm15, %xmm7 # xmm7 = xmm15[0],xmm10[0],xmm15[1],xmm10[1]
vunpckhps %xmm10, %xmm15, %xmm10 # xmm10 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
vunpcklps %xmm11, %xmm2, %xmm15 # xmm15 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpckhps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
vunpcklps %xmm13, %xmm6, %xmm11 # xmm11 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
vunpckhps %xmm13, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm11, %xmm15, %xmm6 # xmm6 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm11 # xmm11 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm9, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
vunpckhps %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
vunpcklps %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
vunpckhps %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3]
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpcklps %xmm9, %xmm13, %xmm5 # xmm5 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
vunpckhps %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
vunpcklps %xmm8, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm12, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm13, %xmm12 # xmm12 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
vunpckhps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
vbroadcastf128 0x40(%r13,%rax), %ymm3 # ymm3 = mem[0,1,0,1]
vmovaps %ymm3, 0x2e0(%rsp)
vbroadcastf128 0x50(%r13,%rax), %ymm3 # ymm3 = mem[0,1,0,1]
vmovaps %ymm3, 0x2c0(%rsp)
vinsertf128 $0x1, %xmm5, %ymm7, %ymm4
vinsertf128 $0x1, %xmm9, %ymm10, %ymm5
vmovaps 0x40(%rsp), %ymm3
vinsertf128 $0x1, %xmm1, %ymm3, %ymm7
vinsertf128 $0x1, %xmm6, %ymm6, %ymm1
vinsertf128 $0x1, %xmm11, %ymm11, %ymm6
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm12, %ymm12, %ymm9
vinsertf128 $0x1, %xmm8, %ymm8, %ymm10
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vsubps %ymm1, %ymm4, %ymm3
vsubps %ymm6, %ymm5, %ymm12
vmovaps %ymm12, 0x100(%rsp)
vsubps %ymm2, %ymm7, %ymm8
vsubps %ymm4, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm0, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x140(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm3, 0x40(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm3, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm3
vbroadcastss (%r14), %ymm12
vbroadcastss 0x4(%r14), %ymm13
vbroadcastss 0x8(%r14), %ymm14
vbroadcastss 0x14(%r14), %ymm15
vsubps %ymm12, %ymm4, %ymm2
vbroadcastss 0x18(%r14), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%r14), %ymm14
vmulps %ymm2, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmovaps %ymm2, 0x120(%rsp)
vmulps %ymm2, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm3, 0xe0(%rsp)
vmulps %ymm3, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x140(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm12
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x100(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d15cd0(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x40(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d15c9f(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x260(%rsp), %ymm10
jne 0x20b261
incq %r15
cmpq 0x78(%rsp), %r15
setb %bl
jne 0x20af13
jmp 0x20b5d8
vmovaps %ymm3, %ymm14
vandps 0x260(%rsp), %ymm10, %ymm10
vmulps 0xe0(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x120(%rsp), %ymm3, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%r14), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%r14), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x20b24b
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm8, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
vmovaps %ymm4, 0x360(%rsp)
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps 0x320(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x2a0(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x300(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovaps 0x280(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x420(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x440(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0xe0(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x460(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1ce13a3(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x340(%rsp), %ymm1, %ymm3
vmovaps %ymm3, 0x400(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x3c0(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovmskps %ymm0, %edx
bsfq %rdx, %rsi
movl 0x2e0(%rsp,%rsi,4), %eax
movq 0x38(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rdi
movl 0x24(%r14), %ecx
testl %ecx, 0x34(%rdi)
je 0x20b3f0
movq 0x10(%r8), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x20b408
cmpq $0x0, 0x48(%rdi)
jne 0x20b408
xorl %eax, %eax
jmp 0x20b3f6
btcq %rsi, %rdx
movb $0x1, %al
testb %al, %al
je 0x20b656
testq %rdx, %rdx
jne 0x20b3b6
jmp 0x20b24b
movq %rcx, 0x100(%rsp)
movq %rdx, 0x40(%rsp)
movq %r11, 0x70(%rsp)
movq %r10, 0x120(%rsp)
movq %r9, 0xe0(%rsp)
vmovss 0x3c0(%rsp,%rsi,4), %xmm0
vmovss 0x3e0(%rsp,%rsi,4), %xmm1
movq %r8, 0x140(%rsp)
movq 0x8(%r8), %rcx
movl 0x2c0(%rsp,%rsi,4), %edx
vmovss 0x420(%rsp,%rsi,4), %xmm2
vmovss 0x440(%rsp,%rsi,4), %xmm3
vmovss 0x460(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0x80(%rsp)
vmovss %xmm3, 0x84(%rsp)
vmovss %xmm4, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovss %xmm1, 0x90(%rsp)
movl %edx, 0x94(%rsp)
movl %eax, 0x98(%rsp)
movl (%rcx), %eax
movl %eax, 0x9c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xa0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x10(%rsp)
movq %rsi, 0x68(%rsp)
vmovss 0x400(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x14(%rsp)
leaq 0x14(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%rdi), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq %rdi, 0x60(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
je 0x20b549
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20b590
movq 0x100(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x20b58c
movq 0x100(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x20b572
movq 0x60(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x20b57f
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20b590
xorl %eax, %eax
jmp 0x20b5b1
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x40(%rsp), %rax
movq 0x68(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x40(%rsp)
movb $0x1, %al
movq 0x140(%rsp), %r8
movq 0xe0(%rsp), %r9
movq 0x120(%rsp), %r10
movq 0x70(%rsp), %r11
movq 0x40(%rsp), %rdx
jmp 0x20b3f6
vmovaps 0x240(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm7
vmovaps 0x200(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm10
vmovaps 0x1a0(%rsp), %ymm3
movq 0x30(%rsp), %rsi
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r15
movq 0x18(%rsp), %rcx
vmovaps 0x180(%rsp), %ymm4
vmovaps 0x160(%rsp), %ymm5
leaq 0x480(%rsp), %rbx
xorl %eax, %eax
jmp 0x20b648
leaq 0x480(%rsp), %rbx
cmpl $0x3, %eax
jne 0x20adac
jmp 0x20abe2
testb $0x1, %bl
vmovaps 0x240(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm7
vmovaps 0x200(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm10
vmovaps 0x1a0(%rsp), %ymm3
movq 0x30(%rsp), %rsi
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r15
movq 0x18(%rsp), %rcx
vmovaps 0x180(%rsp), %ymm4
vmovaps 0x160(%rsp), %ymm5
leaq 0x480(%rsp), %rbx
movl $0x0, %eax
je 0x20b648
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rax
jmp 0x20b648
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, true, embree::avx::ArrayIntersector1<embree::avx::QuadMvIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x17c0, %rsp # imm = 0x17C0
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20c41d
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x20c41d
leaq 0x608(%rsp), %rcx
movq 0x70(%rax), %rax
movq %rcx, 0x10(%rsp)
movq %rax, -0x8(%rcx)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d1578f(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce58a6(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1ce0fc4(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d15803(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d147a4(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d1479b(%rip), %xmm5 # 0x1f1ff14
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm8
vbroadcastss 0x8(%rsi), %ymm9
vmulps %xmm5, %xmm3, %xmm3
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm11
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm12
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm14
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x5, %r9d
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r11d, %r11d
vucomiss %xmm2, %xmm6
vinsertf128 $0x1, %xmm3, %ymm3, %ymm5
setb %r11b
shll $0x5, %r11d
orq $0x80, %r11
movq %r9, %r14
xorq $0x20, %r14
movq %r10, %r15
xorq $0x20, %r15
movq %r11, %r12
xorq $0x20, %r12
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm6
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm15
leaq 0x1f4473d(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
vbroadcastss 0x1ce0eba(%rip), %ymm2 # 0x1eec714
vbroadcastss 0x1ce5169(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x2e0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps %ymm7, 0x260(%rsp)
vmovaps %ymm8, 0x240(%rsp)
vmovaps %ymm9, 0x220(%rsp)
vmovaps %ymm10, 0x200(%rsp)
vmovaps %ymm11, 0x1e0(%rsp)
vmovaps %ymm12, 0x1c0(%rsp)
vmovaps %ymm13, 0x1a0(%rsp)
vmovaps %ymm14, 0x180(%rsp)
vmovaps %ymm5, 0x160(%rsp)
movq %r14, 0x50(%rsp)
movq %r15, 0x48(%rsp)
movq %r12, 0x40(%rsp)
vmovaps %ymm6, 0x140(%rsp)
vmovaps %ymm15, 0x120(%rsp)
leaq 0x600(%rsp), %rax
cmpq %rax, 0x10(%rsp)
je 0x20c41d
movq 0x10(%rsp), %rax
movq -0x8(%rax), %rbx
addq $-0x8, %rax
movq %rax, 0x10(%rsp)
testb $0x8, %bl
jne 0x20b99b
vmovaps 0x40(%rbx,%r9), %ymm0
vsubps %ymm7, %ymm0, %ymm0
vmulps %ymm0, %ymm10, %ymm0
vmovaps 0x40(%rbx,%r10), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm11, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r11), %ymm1
vsubps %ymm9, %ymm1, %ymm1
vmulps %ymm1, %ymm12, %ymm1
vmaxps %ymm6, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r14), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm13, %ymm1
vmovaps 0x40(%rbx,%r15), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm14, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%rbx,%r12), %ymm2
vsubps %ymm9, %ymm2, %ymm2
vmulps %ymm2, %ymm5, %ymm2
vminps %ymm15, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r13d
testb $0x8, %bl
jne 0x20b9de
testq %r13, %r13
je 0x20b9e2
andq $-0x10, %rbx
bsfq %r13, %rax
leaq -0x1(%r13), %rdi
xorl %ecx, %ecx
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdi
jne 0x20b9e7
movq %rax, %rbx
testl %ecx, %ecx
je 0x20b912
jmp 0x20ba40
pushq $0x6
jmp 0x20b9e4
pushq $0x4
popq %rcx
jmp 0x20b9d4
movq 0x10(%rsp), %rcx
movq %rax, (%rcx)
addq $0x8, %rcx
movq %rcx, 0x10(%rsp)
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x20ba39
movq 0x10(%rsp), %rdi
movq %rcx, (%rdi)
addq $0x8, %rdi
movq %rdi, 0x10(%rsp)
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x20ba00
movq %rcx, %rbx
xorl %ecx, %ecx
jmp 0x20b9d4
cmpl $0x6, %ecx
jne 0x20c414
movl %ebx, %edi
andl $0xf, %edi
xorl %ecx, %ecx
addq $-0x8, %rdi
setne %al
je 0x20c414
andq $-0x10, %rbx
xorl %r8d, %r8d
movq %r13, 0x18(%rsp)
movb %al, 0xf(%rsp)
movq %r8, 0x58(%rsp)
imulq $0xe0, %r8, %rax
vbroadcastf128 0xd0(%rbx,%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vbroadcastf128 0xc0(%rbx,%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm0, 0x400(%rsp)
vmovaps (%rbx,%rax), %xmm0
vmovaps 0x10(%rbx,%rax), %xmm1
vmovaps 0x20(%rbx,%rax), %xmm2
vinsertf128 $0x1, 0x60(%rbx,%rax), %ymm0, %ymm0
vinsertf128 $0x1, 0x70(%rbx,%rax), %ymm1, %ymm3
vinsertf128 $0x1, 0x80(%rbx,%rax), %ymm2, %ymm2
vbroadcastf128 0x30(%rbx,%rax), %ymm4 # ymm4 = mem[0,1,0,1]
vbroadcastf128 0x40(%rbx,%rax), %ymm5 # ymm5 = mem[0,1,0,1]
vbroadcastf128 0x50(%rbx,%rax), %ymm14 # ymm14 = mem[0,1,0,1]
vbroadcastf128 0x90(%rbx,%rax), %ymm8 # ymm8 = mem[0,1,0,1]
vbroadcastf128 0xa0(%rbx,%rax), %ymm9 # ymm9 = mem[0,1,0,1]
vbroadcastf128 0xb0(%rbx,%rax), %ymm10 # ymm10 = mem[0,1,0,1]
vbroadcastss (%rsi), %ymm11
vbroadcastss 0x4(%rsi), %ymm12
vbroadcastss 0x8(%rsi), %ymm13
vsubps %ymm11, %ymm0, %ymm1
vsubps %ymm12, %ymm3, %ymm15
vsubps %ymm13, %ymm2, %ymm3
vsubps %ymm11, %ymm4, %ymm0
vmovaps %ymm0, 0x100(%rsp)
vsubps %ymm12, %ymm5, %ymm7
vsubps %ymm13, %ymm14, %ymm14
vsubps %ymm11, %ymm8, %ymm2
vsubps %ymm12, %ymm9, %ymm0
vsubps %ymm13, %ymm10, %ymm5
vmovaps %ymm5, 0x60(%rsp)
vsubps %ymm1, %ymm2, %ymm11
vmovaps %ymm0, 0xe0(%rsp)
vsubps %ymm15, %ymm0, %ymm12
vsubps %ymm3, %ymm5, %ymm6
vaddps %ymm0, %ymm15, %ymm4
vaddps %ymm3, %ymm5, %ymm5
vmulps %ymm4, %ymm6, %ymm8
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm8, %ymm9, %ymm9
vaddps %ymm1, %ymm2, %ymm8
vmulps %ymm5, %ymm11, %ymm5
vmovaps %ymm6, 0x380(%rsp)
vmulps %ymm6, %ymm8, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x3a0(%rsp)
vmulps %ymm8, %ymm12, %ymm8
vmovaps %ymm11, 0x3c0(%rsp)
vmulps %ymm4, %ymm11, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vbroadcastss 0x14(%rsi), %ymm10
vbroadcastss 0x18(%rsi), %ymm6
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm10, %ymm5
vbroadcastss 0x10(%rsi), %ymm13
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm9, %ymm13, %ymm5
vaddps %ymm4, %ymm5, %ymm11
vsubps %ymm7, %ymm15, %ymm9
vsubps %ymm14, %ymm3, %ymm12
vmovaps %ymm15, 0x280(%rsp)
vaddps %ymm7, %ymm15, %ymm4
vmovaps %ymm3, 0x3e0(%rsp)
vaddps %ymm3, %ymm14, %ymm5
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm9, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vmovaps 0x100(%rsp), %ymm0
vsubps %ymm0, %ymm1, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm1, 0x2a0(%rsp)
vaddps %ymm0, %ymm1, %ymm8
vmovaps %ymm12, 0x360(%rsp)
vmulps %ymm8, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm9, %ymm12
vmulps %ymm8, %ymm9, %ymm8
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm10, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm13, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vsubps %ymm2, %ymm0, %ymm5
vaddps %ymm2, %ymm0, %ymm2
vmovaps 0xe0(%rsp), %ymm0
vsubps %ymm0, %ymm7, %ymm9
vaddps %ymm0, %ymm7, %ymm1
vmovaps 0x60(%rsp), %ymm3
vsubps %ymm3, %ymm14, %ymm0
vaddps %ymm3, %ymm14, %ymm3
vmulps %ymm1, %ymm0, %ymm7
vmulps %ymm3, %ymm9, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm3, %ymm5, %ymm3
vmulps %ymm2, %ymm0, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm9, %ymm2
vmulps %ymm1, %ymm5, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm6, 0xe0(%rsp)
vmulps %ymm1, %ymm6, %ymm1
vmovaps %ymm10, 0x100(%rsp)
vmulps %ymm3, %ymm10, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm13, 0x60(%rsp)
vmulps %ymm7, %ymm13, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm11, %ymm2
vaddps %ymm2, %ymm1, %ymm7
vminps %ymm4, %ymm11, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d151d7(%rip), %ymm3 # 0x1f20ec4
vandps %ymm3, %ymm7, %ymm8
vbroadcastss 0x1d151d2(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm8, 0x300(%rsp)
vmulps %ymm3, %ymm8, %ymm3
vbroadcastss 0x1d151b0(%rip), %ymm8 # 0x1f20ec0
vxorps %ymm3, %ymm8, %ymm8
vcmpnltps %ymm8, %ymm2, %ymm2
vmovaps %ymm11, 0x340(%rsp)
vmovaps %ymm4, 0x320(%rsp)
vmaxps %ymm4, %ymm11, %ymm8
vmaxps %ymm1, %ymm8, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm1
vtestps %ymm1, %ymm1
je 0x20c38f
vmovaps 0x380(%rsp), %ymm6
vmovaps %ymm12, %ymm13
vmulps %ymm6, %ymm12, %ymm2
vmovaps 0x3a0(%rsp), %ymm4
vmovaps 0x360(%rsp), %ymm14
vmulps %ymm4, %ymm14, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm9, %ymm14, %ymm8
vmulps %ymm0, %ymm12, %ymm12
vsubps %ymm8, %ymm12, %ymm12
vbroadcastss 0x1d15139(%rip), %ymm10 # 0x1f20ec4
vandps %ymm2, %ymm10, %ymm2
vandps %ymm10, %ymm8, %ymm8
vcmpltps %ymm8, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm12
vmulps %ymm0, %ymm15, %ymm2
vmulps %ymm6, %ymm15, %ymm3
vmovaps 0x3c0(%rsp), %ymm11
vmulps %ymm14, %ymm11, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm5, %ymm14, %ymm8
vsubps %ymm2, %ymm8, %ymm8
vandps %ymm6, %ymm10, %ymm6
vandps %ymm2, %ymm10, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vmovaps %ymm9, %ymm0
vblendvps %ymm2, %ymm3, %ymm8, %ymm9
vmulps %ymm5, %ymm13, %ymm2
vmulps %ymm13, %ymm11, %ymm3
vmulps %ymm4, %ymm15, %ymm5
vmulps %ymm0, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm5, %ymm10, %ymm5
vandps %ymm2, %ymm10, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0xe0(%rsp), %ymm0, %ymm2
vmulps 0x100(%rsp), %ymm9, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x60(%rsp), %ymm12, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm3
vmulps 0x3e0(%rsp), %ymm0, %ymm2
vmulps 0x280(%rsp), %ymm9, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmulps 0x2a0(%rsp), %ymm12, %ymm5
vrcpps %ymm3, %ymm6
vaddps %ymm2, %ymm5, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps %ymm6, %ymm3, %ymm5
vbroadcastss 0x1ce08a9(%rip), %ymm8 # 0x1eec714
vsubps %ymm5, %ymm8, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%rsi), %ymm6
vmulps %ymm5, %ymm2, %ymm2
vcmpleps %ymm2, %ymm6, %ymm5
vbroadcastss 0x20(%rsi), %ymm6
vcmpleps %ymm6, %ymm2, %ymm6
vandps %ymm5, %ymm6, %ymm5
vcmpneqps 0x1d15062(%rip), %ymm3, %ymm3 # 0x1f20f00
vandps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm3, %xmm5
vpackssdw %xmm5, %xmm3, %xmm3
vpand %xmm1, %xmm3, %xmm1
vpmovsxwd %xmm1, %xmm3
vpshufd $0xee, %xmm1, %xmm5 # xmm5 = xmm1[2,3,2,3]
vpmovsxwd %xmm5, %xmm5
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x20c38f
vmovaps 0x340(%rsp), %ymm8
vmovaps %ymm8, 0x440(%rsp)
vmovaps 0x320(%rsp), %ymm4
vmovaps %ymm4, 0x460(%rsp)
vmovaps %ymm7, 0x480(%rsp)
vmovaps %ymm12, 0x4a0(%rsp)
vmovaps %ymm9, 0x4c0(%rsp)
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm2, 0x560(%rsp)
vmovaps 0x2e0(%rsp), %ymm6
vmovaps %ymm6, 0x5e0(%rsp)
movq (%rdx), %r8
vrcpps %ymm7, %ymm2
vmulps %ymm2, %ymm7, %ymm3
vbroadcastss 0x1ce07c4(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ce5083(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x300(%rsp), %ymm7
vcmpnltps %ymm3, %ymm7, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm8, %ymm3
vminps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm4, %ymm2
vminps %ymm5, %ymm2, %ymm2
vsubps %ymm3, %ymm5, %ymm4
vsubps %ymm2, %ymm5, %ymm5
vblendvps %ymm6, %ymm4, %ymm2, %ymm2
vblendvps %ymm6, %ymm5, %ymm3, %ymm3
vmovaps %ymm2, 0x540(%rsp)
vmovaps %ymm3, 0x520(%rsp)
vmovaps 0x2c0(%rsp), %ymm4
vmulps %ymm4, %ymm12, %ymm2
vmulps %ymm4, %ymm9, %ymm3
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, 0x580(%rsp)
vmovaps %ymm3, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vpsllw $0xf, %xmm1, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r12d
vmovaps 0x260(%rsp), %ymm7
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x220(%rsp), %ymm9
vmovaps 0x200(%rsp), %ymm10
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm6
vmovaps 0x120(%rsp), %ymm15
bsfq %r12, %r15
movl 0x420(%rsp,%r15,4), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r13)
je 0x20c091
movq 0x10(%rdx), %r14
cmpq $0x0, 0x10(%r14)
movl $0x0, %ecx
jne 0x20c0b0
cmpq $0x0, 0x48(%r13)
jne 0x20c0b0
xorl %eax, %eax
movq 0x18(%rsp), %r13
jmp 0x20c09e
btcq %r15, %r12
movb $0x1, %al
movq 0x18(%rsp), %r13
xorl %ecx, %ecx
testb %al, %al
je 0x20c3f4
testq %r12, %r12
jne 0x20c051
jmp 0x20c367
movq %rdi, 0x100(%rsp)
vmovss 0x520(%rsp,%r15,4), %xmm0
vmovd 0x540(%rsp,%r15,4), %xmm1
movq 0x8(%rdx), %rcx
movl 0x400(%rsp,%r15,4), %edi
vmovss 0x580(%rsp,%r15,4), %xmm2
vmovss 0x5a0(%rsp,%r15,4), %xmm3
vmovss 0x5c0(%rsp,%r15,4), %xmm4
vmovss %xmm2, 0x80(%rsp)
vmovss %xmm3, 0x84(%rsp)
vmovss %xmm4, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovd %xmm1, 0x90(%rsp)
movl %edi, 0x94(%rsp)
movl %eax, 0x98(%rsp)
movl (%rcx), %eax
movl %eax, 0x9c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xa0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0xe0(%rsp)
vmovd 0x560(%rsp,%r15,4), %xmm0
vmovd %xmm0, 0x20(%rsi)
orl $-0x1, 0x24(%rsp)
leaq 0x24(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %rsi, 0xc8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r9, 0x38(%rsp)
movq %r10, 0x30(%rsp)
movq %r11, 0x28(%rsp)
movq %r8, 0x60(%rsp)
je 0x20c280
leaq 0xb0(%rsp), %rdi
movq %rdx, 0x2a0(%rsp)
movq %rsi, 0x280(%rsp)
vzeroupper
callq *%rax
movq 0x60(%rsp), %r8
vmovaps 0x120(%rsp), %ymm15
vmovaps 0x140(%rsp), %ymm6
vmovaps 0x160(%rsp), %ymm5
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x200(%rsp), %ymm10
movq 0x38(%rsp), %r9
vmovaps 0x220(%rsp), %ymm9
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x260(%rsp), %ymm7
movq 0x280(%rsp), %rsi
movq 0x2a0(%rsp), %rdx
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20c33f
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x20c33b
testb $0x2, (%r14)
jne 0x20c29e
testb $0x40, 0x3e(%r13)
je 0x20c32e
leaq 0xb0(%rsp), %rdi
movq %rdx, %r14
movq %rsi, %r13
vzeroupper
callq *%rax
movq 0x60(%rsp), %r8
vmovaps 0x120(%rsp), %ymm15
vmovaps 0x140(%rsp), %ymm6
vmovaps 0x160(%rsp), %ymm5
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r10
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x200(%rsp), %ymm10
movq 0x38(%rsp), %r9
vmovaps 0x220(%rsp), %ymm9
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x260(%rsp), %ymm7
movq %r13, %rsi
movq %r14, %rdx
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20c33f
xorl %eax, %eax
jmp 0x20c353
vmovd 0xe0(%rsp), %xmm0
vmovd %xmm0, 0x20(%rsi)
btcq %r15, %r12
movb $0x1, %al
movq 0x18(%rsp), %r13
xorl %ecx, %ecx
movq 0x100(%rsp), %rdi
jmp 0x20c09e
movq 0x50(%rsp), %r14
movq 0x48(%rsp), %r15
movq 0x40(%rsp), %r12
movq 0x58(%rsp), %r8
incq %r8
cmpq %rdi, %r8
setb %al
jne 0x20ba69
jmp 0x20c414
vmovaps 0x260(%rsp), %ymm7
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x220(%rsp), %ymm9
vmovaps 0x200(%rsp), %ymm10
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm6
vmovaps 0x120(%rsp), %ymm15
jmp 0x20c376
testb $0x1, 0xf(%rsp)
movq 0x50(%rsp), %r14
movq 0x48(%rsp), %r15
movq 0x40(%rsp), %r12
je 0x20c414
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rcx
cmpl $0x3, %ecx
jne 0x20b8ed
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, true, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1800, %rsp # imm = 0x1800
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20d2d1
movq %rsi, %r9
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x20d2d1
movq %rdx, %r8
leaq 0x648(%rsp), %r10
movq 0x70(%rax), %rax
movq %rax, -0x8(%r10)
vmovaps 0x10(%r9), %xmm3
vmaxss 0xc(%r9), %xmm2, %xmm1
vbroadcastss 0x1d14a30(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce4b47(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vbroadcastss 0x1ce0265(%rip), %xmm5 # 0x1eec714
vdivps %xmm3, %xmm5, %xmm3
vbroadcastss 0x1d14aa4(%rip), %xmm5 # 0x1f20f60
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vbroadcastss 0x1d13a45(%rip), %xmm4 # 0x1f1ff10
vmulps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1d13a3c(%rip), %xmm5 # 0x1f1ff14
vbroadcastss (%r9), %ymm7
vbroadcastss 0x4(%r9), %ymm8
vbroadcastss 0x8(%r9), %ymm9
vmulps %xmm5, %xmm3, %xmm3
xorl %r11d, %r11d
vucomiss %xmm2, %xmm4
setb %r11b
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm11
vshufpd $0x1, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,0]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm12
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm13
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm14
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x5, %r11d
xorl %r14d, %r14d
vucomiss %xmm2, %xmm5
setb %r14b
shll $0x5, %r14d
orq $0x40, %r14
xorl %r15d, %r15d
vucomiss %xmm2, %xmm6
vinsertf128 $0x1, %xmm3, %ymm3, %ymm5
setb %r15b
shll $0x5, %r15d
orq $0x80, %r15
movq %r11, %rsi
xorq $0x20, %rsi
movq %r14, %rdi
xorq $0x20, %rdi
movq %r15, %rax
xorq $0x20, %rax
movq %rax, 0x78(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm6
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm15
leaq 0x1f439d9(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
vbroadcastss 0x1ce0156(%rip), %ymm2 # 0x1eec714
vbroadcastss 0x1ce4405(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x2e0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vmovaps %ymm7, 0x280(%rsp)
vmovaps %ymm8, 0x260(%rsp)
vmovaps %ymm9, 0x240(%rsp)
vmovaps %ymm10, 0x220(%rsp)
vmovaps %ymm11, 0x200(%rsp)
vmovaps %ymm12, 0x1e0(%rsp)
vmovaps %ymm13, 0x1c0(%rsp)
vmovaps %ymm14, 0x1a0(%rsp)
movq %r14, 0x60(%rsp)
movq %r15, 0x58(%rsp)
vmovaps %ymm5, 0x180(%rsp)
movq %rsi, 0x28(%rsp)
movq %rdi, 0x20(%rsp)
vmovaps %ymm6, 0x160(%rsp)
vmovaps %ymm15, 0x140(%rsp)
leaq 0x640(%rsp), %rax
cmpq %rax, %r10
je 0x20d2d1
movq -0x8(%r10), %rbx
addq $-0x8, %r10
testb $0x8, %bl
jne 0x20c6fa
vmovaps 0x40(%rbx,%r11), %ymm0
vsubps %ymm7, %ymm0, %ymm0
vmulps %ymm0, %ymm10, %ymm0
vmovaps 0x40(%rbx,%r14), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm11, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%r15), %ymm1
vsubps %ymm9, %ymm1, %ymm1
vmulps %ymm1, %ymm12, %ymm1
vmaxps %ymm6, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rbx,%rsi), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm13, %ymm1
vmovaps 0x40(%rbx,%rdi), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm14, %ymm2
vminps %ymm2, %ymm1, %ymm1
movq 0x78(%rsp), %rax
vmovaps 0x40(%rbx,%rax), %ymm2
vsubps %ymm9, %ymm2, %ymm2
vmulps %ymm2, %ymm5, %ymm2
vminps %ymm15, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %bl
jne 0x20c740
testq %r12, %r12
je 0x20c744
andq $-0x10, %rbx
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %r13d, %r13d
movq (%rbx,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r12, %rdx
jne 0x20c74a
movq %rax, %rbx
testl %r13d, %r13d
je 0x20c66f
jmp 0x20c78d
pushq $0x6
jmp 0x20c746
pushq $0x4
popq %r13
jmp 0x20c735
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x20c788
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x20c759
movq %rcx, %rbx
jmp 0x20c735
cmpl $0x6, %r13d
jne 0x20d2c7
movl %ebx, %eax
andl $0xf, %eax
xorl %r13d, %r13d
addq $-0x8, %rax
setne %cl
je 0x20d2c7
movb %cl, 0x1f(%rsp)
andq $-0x10, %rbx
movq (%r8), %rdx
xorl %ecx, %ecx
movq %r12, 0x30(%rsp)
movq %rax, 0x68(%rsp)
movq %rdx, 0x38(%rsp)
movq %rcx, 0x70(%rsp)
imulq $0x60, %rcx, %rax
prefetcht0 (%rbx,%rax)
prefetcht0 0x40(%rbx,%rax)
movq %rdx, %rcx
movl 0x40(%rbx,%rax), %edx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%rbx,%rax), %esi
movl 0x4(%rbx,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x10(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x20(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x30(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x44(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm7
movl 0x14(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x34(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x48(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm10
movl 0x18(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm11
movl 0x28(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x38(%rbx,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x4c(%rbx,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm12
movl 0x1c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm13
movl 0x2c(%rbx,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm14
movl 0x3c(%rbx,%rax), %edx
vunpcklps %xmm10, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm12, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
vunpckhps %xmm12, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpcklps %xmm10, %xmm15, %xmm7 # xmm7 = xmm15[0],xmm10[0],xmm15[1],xmm10[1]
vunpckhps %xmm10, %xmm15, %xmm10 # xmm10 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
vunpcklps %xmm11, %xmm2, %xmm15 # xmm15 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpckhps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
vunpcklps %xmm13, %xmm6, %xmm11 # xmm11 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
vunpckhps %xmm13, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm11, %xmm15, %xmm6 # xmm6 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm11 # xmm11 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm9, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
vunpckhps %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
vunpcklps %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
vunpckhps %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3]
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpcklps %xmm9, %xmm13, %xmm5 # xmm5 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
vunpckhps %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
vunpcklps %xmm8, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm12, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vunpcklps %xmm3, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm13, %xmm3 # xmm3 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
vbroadcastf128 0x40(%rbx,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vunpckhps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
vmovaps %ymm12, 0x460(%rsp)
vbroadcastf128 0x50(%rbx,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vmovaps %ymm12, 0x440(%rsp)
vinsertf128 $0x1, %xmm5, %ymm7, %ymm15
vinsertf128 $0x1, %xmm9, %ymm10, %ymm7
vinsertf128 $0x1, %xmm1, %ymm4, %ymm0
vinsertf128 $0x1, %xmm6, %ymm6, %ymm4
vinsertf128 $0x1, %xmm11, %ymm11, %ymm5
vinsertf128 $0x1, %xmm2, %ymm2, %ymm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vinsertf128 $0x1, %xmm8, %ymm8, %ymm10
vinsertf128 $0x1, %xmm14, %ymm14, %ymm3
vbroadcastss (%r9), %ymm12
vbroadcastss 0x4(%r9), %ymm13
vbroadcastss 0x8(%r9), %ymm14
vsubps %ymm12, %ymm15, %ymm2
vsubps %ymm13, %ymm7, %ymm15
vsubps %ymm14, %ymm0, %ymm0
vsubps %ymm12, %ymm4, %ymm4
vmovaps %ymm4, 0x120(%rsp)
vsubps %ymm13, %ymm5, %ymm11
vsubps %ymm14, %ymm1, %ymm1
vmovaps %ymm1, 0x2a0(%rsp)
vsubps %ymm12, %ymm9, %ymm4
vsubps %ymm13, %ymm10, %ymm1
vmovaps %ymm1, 0x100(%rsp)
vsubps %ymm14, %ymm3, %ymm14
vsubps %ymm2, %ymm4, %ymm3
vmovaps %ymm4, %ymm8
vmovaps %ymm4, 0xe0(%rsp)
vsubps %ymm15, %ymm1, %ymm12
vsubps %ymm0, %ymm14, %ymm6
vaddps %ymm1, %ymm15, %ymm4
vaddps %ymm0, %ymm14, %ymm5
vmulps %ymm4, %ymm6, %ymm7
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm7, %ymm9, %ymm1
vaddps %ymm2, %ymm8, %ymm7
vmulps %ymm5, %ymm3, %ymm5
vmovaps %ymm6, 0x380(%rsp)
vmulps %ymm7, %ymm6, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x3a0(%rsp)
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm3, 0x3c0(%rsp)
vmulps %ymm4, %ymm3, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vbroadcastss 0x14(%r9), %ymm13
vbroadcastss 0x18(%r9), %ymm9
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm13, %ymm5
vbroadcastss 0x10(%r9), %ymm6
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm1, %ymm6, %ymm5
vaddps %ymm4, %ymm5, %ymm10
vmovaps %ymm11, %ymm8
vsubps %ymm11, %ymm15, %ymm11
vmovaps 0x2a0(%rsp), %ymm3
vsubps %ymm3, %ymm0, %ymm12
vmovaps %ymm15, 0x400(%rsp)
vaddps %ymm8, %ymm15, %ymm4
vmovaps %ymm0, 0x3e0(%rsp)
vaddps %ymm3, %ymm0, %ymm5
vmovaps %ymm3, %ymm0
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm11, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vmovaps 0x120(%rsp), %ymm1
vsubps %ymm1, %ymm2, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm2, 0x420(%rsp)
vaddps %ymm1, %ymm2, %ymm7
vmovaps %ymm12, 0x360(%rsp)
vmulps %ymm7, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm11, %ymm12
vmulps %ymm7, %ymm11, %ymm7
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vmulps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm13, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm6, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vmovaps 0xe0(%rsp), %ymm2
vsubps %ymm2, %ymm1, %ymm7
vaddps %ymm2, %ymm1, %ymm2
vmovaps 0x100(%rsp), %ymm1
vsubps %ymm1, %ymm8, %ymm11
vaddps %ymm1, %ymm8, %ymm1
vsubps %ymm14, %ymm0, %ymm5
vaddps %ymm0, %ymm14, %ymm3
vmovaps %ymm7, %ymm0
vmulps %ymm1, %ymm5, %ymm7
vmulps %ymm3, %ymm11, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm3, %ymm0, %ymm3
vmulps %ymm2, %ymm5, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm11, %ymm2
vmulps %ymm1, %ymm0, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm9, 0xe0(%rsp)
vmulps %ymm1, %ymm9, %ymm1
vmovaps %ymm13, 0x120(%rsp)
vmulps %ymm3, %ymm13, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm6, 0x100(%rsp)
vmulps %ymm7, %ymm6, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm10, %ymm2
vaddps %ymm2, %ymm1, %ymm6
vminps %ymm4, %ymm10, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d1433d(%rip), %ymm3 # 0x1f20ec4
vmovaps %ymm6, 0x300(%rsp)
vandps %ymm3, %ymm6, %ymm6
vbroadcastss 0x1d1432f(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm6, 0x320(%rsp)
vmulps %ymm3, %ymm6, %ymm3
vbroadcastss 0x1d1430d(%rip), %ymm7 # 0x1f20ec0
vxorps %ymm7, %ymm3, %ymm7
vcmpnltps %ymm7, %ymm2, %ymm2
vmovaps %ymm10, 0x2a0(%rsp)
vmovaps %ymm4, 0x340(%rsp)
vmaxps %ymm4, %ymm10, %ymm7
vmaxps %ymm1, %ymm7, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm1
vtestps %ymm1, %ymm1
je 0x20d233
vmovaps 0x380(%rsp), %ymm6
vmovaps %ymm11, %ymm8
vmovaps %ymm12, %ymm11
vmulps %ymm6, %ymm12, %ymm2
vmovaps 0x3a0(%rsp), %ymm13
vmovaps 0x360(%rsp), %ymm9
vmulps %ymm9, %ymm13, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm8, %ymm9, %ymm7
vmulps %ymm5, %ymm12, %ymm12
vsubps %ymm7, %ymm12, %ymm12
vbroadcastss 0x1d14292(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm2, %ymm2
vandps %ymm4, %ymm7, %ymm7
vcmpltps %ymm7, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm14
vmulps %ymm5, %ymm15, %ymm2
vmulps %ymm6, %ymm15, %ymm3
vmovaps 0x3c0(%rsp), %ymm10
vmulps %ymm9, %ymm10, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm0, %ymm9, %ymm7
vsubps %ymm2, %ymm7, %ymm7
vandps %ymm4, %ymm6, %ymm6
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vblendvps %ymm2, %ymm3, %ymm7, %ymm9
vmulps %ymm0, %ymm11, %ymm2
vmulps %ymm11, %ymm10, %ymm3
vmulps %ymm15, %ymm13, %ymm5
vmulps %ymm8, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm5, %ymm5
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0xe0(%rsp), %ymm0, %ymm2
vmulps 0x120(%rsp), %ymm9, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x100(%rsp), %ymm14, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm3
vmulps 0x3e0(%rsp), %ymm0, %ymm2
vmulps 0x400(%rsp), %ymm9, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmulps 0x420(%rsp), %ymm14, %ymm5
vrcpps %ymm3, %ymm6
vaddps %ymm2, %ymm5, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps %ymm6, %ymm3, %ymm5
vbroadcastss 0x1cdfa03(%rip), %ymm7 # 0x1eec714
vsubps %ymm5, %ymm7, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%r9), %ymm6
vmulps %ymm5, %ymm2, %ymm2
vcmpleps %ymm2, %ymm6, %ymm5
vbroadcastss 0x20(%r9), %ymm6
vcmpleps %ymm6, %ymm2, %ymm6
vandps %ymm5, %ymm6, %ymm5
vcmpneqps 0x1d141bc(%rip), %ymm3, %ymm3 # 0x1f20f00
vandps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm3, %xmm5
vpackssdw %xmm5, %xmm3, %xmm3
vpand %xmm1, %xmm3, %xmm1
vpmovsxwd %xmm1, %xmm3
vpshufd $0xee, %xmm1, %xmm5 # xmm5 = xmm1[2,3,2,3]
vpmovsxwd %xmm5, %xmm5
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x20d233
vmovaps 0x2a0(%rsp), %ymm4
vmovaps %ymm4, 0x480(%rsp)
vmovaps 0x340(%rsp), %ymm7
vmovaps %ymm7, 0x4a0(%rsp)
vmovaps 0x300(%rsp), %ymm5
vmovaps %ymm5, 0x4c0(%rsp)
vmovaps %ymm14, 0x4e0(%rsp)
vmovaps %ymm9, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm3, 0x540(%rsp)
vmovaps %ymm2, 0x5a0(%rsp)
vmovaps 0x2e0(%rsp), %ymm6
vmovaps %ymm6, 0x620(%rsp)
vrcpps %ymm5, %ymm2
vmulps %ymm2, %ymm5, %ymm3
vbroadcastss 0x1cdf918(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ce41d7(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x320(%rsp), %ymm8
vcmpnltps %ymm3, %ymm8, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm4, %ymm3
vminps %ymm5, %ymm3, %ymm3
vmulps %ymm2, %ymm7, %ymm2
vminps %ymm5, %ymm2, %ymm2
vsubps %ymm3, %ymm5, %ymm4
vsubps %ymm2, %ymm5, %ymm5
vblendvps %ymm6, %ymm4, %ymm2, %ymm2
vblendvps %ymm6, %ymm5, %ymm3, %ymm3
vmovaps %ymm2, 0x580(%rsp)
vmovaps %ymm3, 0x560(%rsp)
vmovaps 0x2c0(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm2
vmulps %ymm4, %ymm9, %ymm3
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, 0x5c0(%rsp)
vmovaps %ymm3, 0x5e0(%rsp)
vmovaps %ymm0, 0x600(%rsp)
vpsllw $0xf, %xmm1, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
vmovaps 0x280(%rsp), %ymm7
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x240(%rsp), %ymm9
vmovaps 0x220(%rsp), %ymm10
vmovaps 0x200(%rsp), %ymm11
vmovaps 0x1e0(%rsp), %ymm12
vmovaps 0x1c0(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm14
vmovaps 0x180(%rsp), %ymm5
movq 0x28(%rsp), %rsi
movq 0x20(%rsp), %rdi
vmovaps 0x160(%rsp), %ymm6
vmovaps 0x140(%rsp), %ymm15
movq 0x38(%rsp), %rdx
bsfq %r15, %r12
movl 0x460(%rsp,%r12,4), %eax
movq 0x1e8(%rdx), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%r9), %ecx
testl %ecx, 0x34(%r13)
je 0x20cf43
movq 0x10(%r8), %r14
cmpq $0x0, 0x10(%r14)
jne 0x20cf63
cmpq $0x0, 0x48(%r13)
jne 0x20cf63
xorl %eax, %eax
jmp 0x20cf49
btcq %r12, %r15
movb $0x1, %al
movq 0x30(%rsp), %r12
xorl %r13d, %r13d
testb %al, %al
je 0x20d2aa
testq %r15, %r15
jne 0x20cf0c
jmp 0x20d209
vmovss 0x560(%rsp,%r12,4), %xmm0
vmovd 0x580(%rsp,%r12,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x440(%rsp,%r12,4), %edx
vmovss 0x5c0(%rsp,%r12,4), %xmm2
vmovss 0x5e0(%rsp,%r12,4), %xmm3
vmovss 0x600(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0x80(%rsp)
vmovss %xmm3, 0x84(%rsp)
vmovss %xmm4, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovd %xmm1, 0x90(%rsp)
movl %edx, 0x94(%rsp)
movl %eax, 0x98(%rsp)
movl (%rcx), %eax
movl %eax, 0x9c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xa0(%rsp)
vmovss 0x20(%r9), %xmm0
vmovss %xmm0, 0x120(%rsp)
vmovd 0x5a0(%rsp,%r12,4), %xmm0
vmovd %xmm0, 0x20(%r9)
orl $-0x1, 0x44(%rsp)
leaq 0x44(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r9, 0xc8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r10, 0x50(%rsp)
movq %r11, 0x48(%rsp)
je 0x20d123
leaq 0xb0(%rsp), %rdi
movq %r8, 0x100(%rsp)
movq %r9, 0xe0(%rsp)
vzeroupper
callq *%rax
vmovaps 0x140(%rsp), %ymm15
vmovaps 0x160(%rsp), %ymm6
movq 0x20(%rsp), %rdi
movq 0x28(%rsp), %rsi
vmovaps 0x180(%rsp), %ymm5
vmovaps 0x1a0(%rsp), %ymm14
vmovaps 0x1c0(%rsp), %ymm13
vmovaps 0x1e0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm11
vmovaps 0x220(%rsp), %ymm10
movq 0x48(%rsp), %r11
vmovaps 0x240(%rsp), %ymm9
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm7
movq 0x50(%rsp), %r10
movq 0xe0(%rsp), %r9
movq 0x100(%rsp), %r8
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20d1e2
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x20d1de
testb $0x2, (%r14)
jne 0x20d141
testb $0x40, 0x3e(%r13)
je 0x20d1d1
leaq 0xb0(%rsp), %rdi
movq %r8, %r14
movq %r9, %r13
vzeroupper
callq *%rax
vmovaps 0x140(%rsp), %ymm15
vmovaps 0x160(%rsp), %ymm6
movq 0x20(%rsp), %rdi
movq 0x28(%rsp), %rsi
vmovaps 0x180(%rsp), %ymm5
vmovaps 0x1a0(%rsp), %ymm14
vmovaps 0x1c0(%rsp), %ymm13
vmovaps 0x1e0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm11
vmovaps 0x220(%rsp), %ymm10
movq 0x48(%rsp), %r11
vmovaps 0x240(%rsp), %ymm9
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm7
movq 0x50(%rsp), %r10
movq %r13, %r9
movq %r14, %r8
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20d1e2
xorl %eax, %eax
jmp 0x20d1f7
vmovd 0x120(%rsp), %xmm0
vmovd %xmm0, 0x20(%r9)
btcq %r12, %r15
movb $0x1, %al
movq 0x30(%rsp), %r12
xorl %r13d, %r13d
movq 0x38(%rsp), %rdx
jmp 0x20cf51
movq 0x60(%rsp), %r14
movq 0x58(%rsp), %r15
movq 0x70(%rsp), %rcx
incq %rcx
movq 0x68(%rsp), %rax
cmpq %rax, %rcx
setb 0x1f(%rsp)
jne 0x20c7c8
jmp 0x20d2c7
vmovaps 0x280(%rsp), %ymm7
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x240(%rsp), %ymm9
vmovaps 0x220(%rsp), %ymm10
vmovaps 0x200(%rsp), %ymm11
vmovaps 0x1e0(%rsp), %ymm12
vmovaps 0x1c0(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm14
vmovaps 0x180(%rsp), %ymm5
movq 0x28(%rsp), %rsi
movq 0x20(%rsp), %rdi
vmovaps 0x160(%rsp), %ymm6
vmovaps 0x140(%rsp), %ymm15
movq 0x38(%rsp), %rdx
jmp 0x20d213
testb $0x1, 0x1f(%rsp)
movq 0x60(%rsp), %r14
movq 0x58(%rsp), %r15
je 0x20d2c7
movl $0xff800000, 0x20(%r9) # imm = 0xFF800000
pushq $0x3
popq %r13
cmpl $0x3, %r13d
jne 0x20c656
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1760, %rsp # imm = 0x1760
movq %rdx, 0x20(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x20d31d
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x20d30b
leaq 0x5a8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm5, %xmm1
vbroadcastss 0x1d13b75(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1ce3c8c(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1cdf39c(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm10
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm11
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rbx
xorq $0x20, %rbx
movq %r10, %r15
xorq $0x20, %r15
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm13
leaq 0x1f42b57(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm0 # ymm0 = mem[0,1,0,1]
vmovaps %ymm0, 0x380(%rsp)
vperm2f128 $0x2, (%rax), %ymm0, %ymm1 # ymm1 = mem[0,1],ymm0[0,1]
leaq 0x5a0(%rsp), %r12
vbroadcastss 0x1ce357a(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1cdf2b9(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x3c0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
movq %rsi, 0x28(%rsp)
vmovaps %ymm6, 0x360(%rsp)
vmovaps %ymm7, 0x340(%rsp)
vmovaps %ymm8, 0x320(%rsp)
movq %r8, 0x60(%rsp)
vmovaps %ymm9, 0x300(%rsp)
vmovaps %ymm10, 0x2e0(%rsp)
vmovaps %ymm11, 0x2c0(%rsp)
movq %r9, 0x58(%rsp)
movq %r10, 0x50(%rsp)
movq %r11, 0x48(%rsp)
movq %rbx, 0x40(%rsp)
movq %r15, 0x38(%rsp)
vmovaps %ymm12, 0x2a0(%rsp)
vmovaps %ymm13, 0x280(%rsp)
cmpq %r12, %rdi
je 0x20d30b
movq -0x8(%rdi), %r14
addq $-0x8, %rdi
testb $0x8, %r14b
jne 0x20d5df
movq %r14, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r8), %ymm0, %ymm1
vaddps 0x40(%rax,%r8), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm2
vaddps 0x40(%rax,%r9), %ymm2, %ymm2
vmaxps %ymm1, %ymm12, %ymm1
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r11), %ymm3, %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vminps %ymm2, %ymm13, %ymm2
vmulps 0x100(%rax,%rbx), %ymm0, %ymm3
vaddps 0x40(%rax,%rbx), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vmulps 0x100(%rax,%r15), %ymm0, %ymm4
vaddps 0x40(%rax,%r15), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm11, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r14d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x20d627
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r13d
testb $0x8, %r14b
jne 0x20d623
testq %r13, %r13
je 0x20d654
andq $-0x10, %r14
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %ecx, %ecx
movq (%r14,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdx
jne 0x20d659
movq %rax, %r14
testl %ecx, %ecx
je 0x20d4ef
jmp 0x20d6a1
pushq $0x6
jmp 0x20d656
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x20d5ce
pushq $0x4
popq %rcx
jmp 0x20d619
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r14,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x20d697
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x20d668
movq %rcx, %r14
xorl %ecx, %ecx
jmp 0x20d619
cmpl $0x6, %ecx
jne 0x20e142
movl %r14d, %eax
andl $0xf, %eax
xorl %ecx, %ecx
addq $-0x8, %rax
movq %rax, 0xa8(%rsp)
setne %dl
je 0x20e142
movq %r13, 0x68(%rsp)
movq %rdi, 0x70(%rsp)
andq $-0x10, %r14
movq 0x20(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x78(%rsp)
xorl %eax, %eax
movb %dl, 0x1f(%rsp)
movq %rax, 0xd8(%rsp)
imulq $0x60, %rax, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x40(%r14,%rax), %ecx
movq 0x78(%rsp), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rsi
vmovss 0x28(%rsi), %xmm1
vmovss 0x2c(%rsi), %xmm2
vmovss 0x30(%rsi), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1ce3291(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vmaxss %xmm1, %xmm5, %xmm3
vcvttss2si %xmm3, %ecx
movslq %ecx, %rcx
movq 0xe0(%rsi), %r15
imulq $0x38, %rcx, %r9
movq (%r15,%r9), %rsi
movl (%r14,%rax), %ebx
movl 0x4(%r14,%rax), %r8d
movq %r8, 0xc0(%rsp)
vmovups (%rsi,%rbx,4), %xmm2
movl 0x10(%r14,%rax), %edi
movq %rdi, 0x160(%rsp)
vmovups (%rsi,%rdi,4), %xmm1
movl 0x20(%r14,%rax), %edi
movq %rdi, 0x140(%rsp)
vmovups (%rsi,%rdi,4), %xmm7
movl 0x30(%r14,%rax), %edi
movq %rdi, 0x80(%rsp)
vmovups (%rsi,%rdi,4), %xmm8
movl 0x44(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %rsi
movq %rsi, 0xb0(%rsp)
movq (%rsi,%r9), %rsi
vmovups (%rsi,%r8,4), %xmm4
movl 0x14(%r14,%rax), %edi
movq %rdi, 0xd0(%rsp)
vmovups (%rsi,%rdi,4), %xmm5
movl 0x24(%r14,%rax), %edi
movq %rdi, 0x180(%rsp)
vmovups (%rsi,%rdi,4), %xmm9
movl 0x34(%r14,%rax), %edi
movq %rdi, 0x1a0(%rsp)
vmovups (%rsi,%rdi,4), %xmm10
movl 0x48(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %r8
movq (%r8,%r9), %rsi
movl 0x8(%r14,%rax), %edi
vmovups (%rsi,%rdi,4), %xmm13
movl 0x18(%r14,%rax), %r13d
vmovups (%rsi,%r13,4), %xmm6
movl 0x28(%r14,%rax), %r10d
movq %r10, 0xb8(%rsp)
vmovups (%rsi,%r10,4), %xmm11
movl 0x38(%r14,%rax), %r10d
movq %r10, 0xc8(%rsp)
vmovups (%rsi,%r10,4), %xmm12
movl 0x4c(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rdx
vsubss %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x270(%rsp)
movq 0xe0(%rdx), %rsi
movq (%rsi,%r9), %r12
movl 0xc(%r14,%rax), %edx
vmovups (%r12,%rdx,4), %xmm0
movl 0x1c(%r14,%rax), %r11d
vmovups (%r12,%r11,4), %xmm3
vunpcklps %xmm13, %xmm2, %xmm14 # xmm14 = xmm2[0],xmm13[0],xmm2[1],xmm13[1]
vunpckhps %xmm13, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm13[2],xmm2[3],xmm13[3]
vunpcklps %xmm0, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
vunpckhps %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
movl 0x2c(%r14,%rax), %r9d
vmovups (%r12,%r9,4), %xmm4
vunpcklps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vmovaps %xmm0, 0x250(%rsp)
vunpcklps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
vmovaps %xmm0, 0x210(%rsp)
vunpckhps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
vmovaps %xmm0, 0x230(%rsp)
vunpcklps %xmm6, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm3, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
vunpckhps %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
movl 0x3c(%r14,%rax), %r10d
vmovups (%r12,%r10,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x260(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x220(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x240(%rsp)
vunpcklps %xmm11, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
vunpckhps %xmm11, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm11[2],xmm7[3],xmm11[3]
vunpcklps %xmm4, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpckhps %xmm4, %xmm9, %xmm3 # xmm3 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
incl %ecx
movslq %ecx, %rcx
imulq $0x38, %rcx, %r12
movq (%r15,%r12), %rcx
vmovups (%rcx,%rbx,4), %xmm4
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x200(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x1c0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x1e0(%rsp)
vunpcklps %xmm12, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
vunpckhps %xmm12, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
vunpcklps %xmm5, %xmm10, %xmm2 # xmm2 = xmm10[0],xmm5[0],xmm10[1],xmm5[1]
vunpckhps %xmm5, %xmm10, %xmm3 # xmm3 = xmm10[2],xmm5[2],xmm10[3],xmm5[3]
movq (%r8,%r12), %r8
vmovups (%r8,%rdi,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x1f0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x1d0(%rsp)
movq 0xb0(%rsp), %rdi
movq (%rdi,%r12), %rdi
vunpcklps %xmm5, %xmm4, %xmm0 # xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0xc0(%rsp), %rbx
vmovups (%rdi,%rbx,4), %xmm2
movq (%rsi,%r12), %rsi
vmovups (%rsi,%rdx,4), %xmm3
vunpcklps %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vunpcklps %xmm2, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpcklps %xmm4, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
vunpckhps %xmm4, %xmm0, %xmm8 # xmm8 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
movq 0x160(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
vmovups (%r8,%r13,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0xd0(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%rsi,%r11,4), %xmm2
vunpcklps %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpckhps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vunpcklps %xmm1, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vunpckhps %xmm4, %xmm3, %xmm7 # xmm7 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
movq 0x140(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0xb8(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x180(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%rsi,%r9,4), %xmm3
vunpcklps %xmm3, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm9 # xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x80(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0xc8(%rsp), %rcx
vmovups (%r8,%rcx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x1a0(%rsp), %rcx
vmovups (%rdi,%rcx,4), %xmm1
vmovups (%rsi,%r10,4), %xmm4
movq 0x28(%rsp), %rsi
vunpcklps %xmm4, %xmm1, %xmm11 # xmm11 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpckhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
vunpcklps %xmm1, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm11, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vmovaps 0x270(%rsp), %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[0,0,0,0]
vmovss 0x1cdec52(%rip), %xmm1 # 0x1eec714
vsubss %xmm11, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm10
vmulps 0x210(%rsp), %xmm1, %xmm11
vaddps %xmm10, %xmm11, %xmm11
vmulps %xmm0, %xmm8, %xmm8
vmulps 0x230(%rsp), %xmm1, %xmm10
vaddps %xmm8, %xmm10, %xmm10
vmulps %xmm0, %xmm13, %xmm8
vmulps 0x250(%rsp), %xmm1, %xmm13
vaddps %xmm8, %xmm13, %xmm13
vmulps %xmm2, %xmm0, %xmm2
vmulps 0x220(%rsp), %xmm1, %xmm8
vaddps %xmm2, %xmm8, %xmm8
vmulps %xmm7, %xmm0, %xmm2
vmulps 0x240(%rsp), %xmm1, %xmm7
vaddps %xmm2, %xmm7, %xmm2
vmulps %xmm0, %xmm14, %xmm7
vmulps 0x260(%rsp), %xmm1, %xmm14
vaddps %xmm7, %xmm14, %xmm14
vmulps %xmm3, %xmm0, %xmm3
vmulps 0x1c0(%rsp), %xmm1, %xmm7
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm0, %xmm9, %xmm7
vmulps 0x1e0(%rsp), %xmm1, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vmulps %xmm0, %xmm15, %xmm9
vmulps 0x200(%rsp), %xmm1, %xmm15
vaddps %xmm9, %xmm15, %xmm9
vmulps %xmm4, %xmm0, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm0
vmulps %xmm1, %xmm12, %xmm6
vaddps %xmm4, %xmm6, %xmm15
vbroadcastf128 0x40(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
vmovaps %ymm6, 0x400(%rsp)
vbroadcastf128 0x50(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
movq 0xd8(%rsp), %rax
vmulps 0x1d0(%rsp), %xmm1, %xmm12
vaddps %xmm5, %xmm12, %xmm12
vmulps 0x1f0(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps %ymm6, 0x3e0(%rsp)
vinsertf128 $0x1, %xmm3, %ymm11, %ymm4
vinsertf128 $0x1, %xmm7, %ymm10, %ymm5
vinsertf128 $0x1, %xmm9, %ymm13, %ymm7
vinsertf128 $0x1, %xmm8, %ymm8, %ymm1
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm14, %ymm14, %ymm8
vinsertf128 $0x1, %xmm15, %ymm15, %ymm9
vinsertf128 $0x1, %xmm12, %ymm12, %ymm10
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vsubps %ymm1, %ymm4, %ymm3
vsubps %ymm2, %ymm5, %ymm12
vmovaps %ymm12, 0x140(%rsp)
vsubps %ymm8, %ymm7, %ymm8
vsubps %ymm4, %ymm9, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm0, %ymm11
vmulps %ymm11, %ymm12, %ymm0
vmulps %ymm10, %ymm8, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmulps %ymm9, %ymm8, %ymm0
vmovaps %ymm3, 0x80(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vsubps %ymm0, %ymm2, %ymm6
vmulps %ymm3, %ymm10, %ymm0
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm0, %ymm12, %ymm3
vbroadcastss (%rsi), %ymm12
vbroadcastss 0x4(%rsi), %ymm13
vbroadcastss 0x8(%rsi), %ymm14
vbroadcastss 0x14(%rsi), %ymm15
vsubps %ymm12, %ymm4, %ymm2
vbroadcastss 0x18(%rsi), %ymm12
vsubps %ymm13, %ymm5, %ymm5
vsubps %ymm14, %ymm7, %ymm7
vmulps %ymm7, %ymm15, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x10(%rsi), %ymm14
vmulps %ymm2, %ymm12, %ymm0
vmulps %ymm7, %ymm14, %ymm1
vsubps %ymm0, %ymm1, %ymm0
vmulps %ymm5, %ymm14, %ymm1
vmovaps %ymm2, 0x180(%rsp)
vmulps %ymm2, %ymm15, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm3, 0x160(%rsp)
vmulps %ymm3, %ymm12, %ymm2
vmulps %ymm6, %ymm15, %ymm12
vmovaps %ymm6, %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmovaps 0x1a0(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm12
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm11, %ymm11
vmulps %ymm0, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm1, %ymm8, %ymm1
vmulps 0x140(%rsp), %ymm0, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d131b4(%rip), %ymm1 # 0x1f20ec0
vandps %ymm1, %ymm2, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x80(%rsp), %ymm13, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %ymm10, %ymm6, %ymm0
vcmpnltps %ymm10, %ymm8, %ymm1
vandps %ymm1, %ymm0, %ymm0
vbroadcastss 0x1d13180(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm2, %ymm4
vcmpneqps %ymm2, %ymm10, %ymm1
vandps %ymm1, %ymm0, %ymm0
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm10
vtestps 0x380(%rsp), %ymm10
jne 0x20dd87
incq %rax
cmpq 0xa8(%rsp), %rax
setb %dl
vxorps %xmm5, %xmm5, %xmm5
jne 0x20d6e4
jmp 0x20e0c8
vmovaps %ymm3, %ymm14
vandps 0x380(%rsp), %ymm10, %ymm10
vmulps 0x160(%rsp), %ymm7, %ymm0
vmulps %ymm5, %ymm15, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x180(%rsp), %ymm3, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vxorps %ymm0, %ymm9, %ymm3
vbroadcastss 0xc(%rsi), %ymm0
vmulps %ymm0, %ymm4, %ymm0
vcmpltps %ymm3, %ymm0, %ymm0
vbroadcastss 0x20(%rsi), %ymm1
vmulps %ymm1, %ymm4, %ymm1
vcmpleps %ymm1, %ymm3, %ymm1
vandps %ymm0, %ymm1, %ymm5
vtestps %ymm10, %ymm5
je 0x20dd6a
vandps %ymm5, %ymm10, %ymm0
vmovaps %ymm6, 0x420(%rsp)
vmovaps %ymm8, 0x440(%rsp)
vmovaps %ymm3, 0x460(%rsp)
vmovaps %ymm4, 0x480(%rsp)
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps 0x440(%rsp), %ymm1
vsubps %ymm1, %ymm4, %ymm2
vmovaps 0x3c0(%rsp), %ymm3
vblendvps %ymm3, %ymm2, %ymm0, %ymm2
vmovaps %ymm2, 0x420(%rsp)
vsubps %ymm0, %ymm4, %ymm0
vblendvps %ymm3, %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x440(%rsp)
vmovaps 0x3a0(%rsp), %ymm3
vmulps %ymm3, %ymm14, %ymm1
vmovaps %ymm1, 0x540(%rsp)
vmulps %ymm3, %ymm15, %ymm1
vmovaps %ymm1, 0x560(%rsp)
vrcpps %ymm4, %ymm1
vmulps 0x160(%rsp), %ymm3, %ymm3
vmovaps %ymm3, 0x580(%rsp)
vmulps %ymm1, %ymm4, %ymm3
vbroadcastss 0x1cde87d(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm1, %ymm3
vaddps %ymm3, %ymm1, %ymm1
vmulps 0x460(%rsp), %ymm1, %ymm3
vmovaps %ymm3, 0x520(%rsp)
vmulps %ymm1, %ymm2, %ymm2
vmovaps %ymm2, 0x4e0(%rsp)
vmulps %ymm1, %ymm0, %ymm0
vmovaps %ymm0, 0x500(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovmskps %ymm0, %edx
movq %rax, %r13
bsfq %rdx, %rbx
movl 0x400(%rsp,%rbx,4), %eax
movq 0x78(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r12
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r12)
je 0x20df20
movq 0x20(%rsp), %rcx
movq 0x10(%rcx), %r15
cmpq $0x0, 0x10(%r15)
jne 0x20df3b
cmpq $0x0, 0x48(%r12)
jne 0x20df3b
xorl %eax, %eax
jmp 0x20df26
btcq %rbx, %rdx
movb $0x1, %al
testb %al, %al
je 0x20e150
testq %rdx, %rdx
movq %r13, %rax
jne 0x20dedc
jmp 0x20dd6a
movq %rdx, 0x80(%rsp)
vmovss 0x4e0(%rsp,%rbx,4), %xmm0
vmovss 0x500(%rsp,%rbx,4), %xmm1
movq 0x20(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x3e0(%rsp,%rbx,4), %edx
vmovss 0x540(%rsp,%rbx,4), %xmm2
vmovss 0x560(%rsp,%rbx,4), %xmm3
vmovss 0x580(%rsp,%rbx,4), %xmm4
vmovss %xmm2, 0xe0(%rsp)
vmovss %xmm3, 0xe4(%rsp)
vmovss %xmm4, 0xe8(%rsp)
vmovss %xmm0, 0xec(%rsp)
vmovss %xmm1, 0xf0(%rsp)
movl %edx, 0xf4(%rsp)
movl %eax, 0xf8(%rsp)
movl (%rcx), %eax
movl %eax, 0xfc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x100(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x140(%rsp)
vmovss 0x520(%rsp,%rbx,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x34(%rsp)
leaq 0x34(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x118(%rsp)
movq %rcx, 0x120(%rsp)
movq %rsi, 0x128(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
je 0x20e058
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20e08d
movq 0x10(%r15), %rax
testq %rax, %rax
je 0x20e089
testb $0x2, (%r15)
jne 0x20e06f
testb $0x40, 0x3e(%r12)
je 0x20e07c
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20e08d
xorl %eax, %eax
jmp 0x20e0b6
movq 0x28(%rsp), %rax
vmovss 0x140(%rsp), %xmm0
vmovss %xmm0, 0x20(%rax)
movq 0x80(%rsp), %rax
btcq %rbx, %rax
movq %rax, 0x80(%rsp)
movb $0x1, %al
movq 0x28(%rsp), %rsi
movq 0x80(%rsp), %rdx
jmp 0x20df26
movq 0x70(%rsp), %rdi
vmovaps 0x360(%rsp), %ymm6
vmovaps 0x340(%rsp), %ymm7
vmovaps 0x320(%rsp), %ymm8
movq 0x60(%rsp), %r8
vmovaps 0x300(%rsp), %ymm9
vmovaps 0x2e0(%rsp), %ymm10
vmovaps 0x2c0(%rsp), %ymm11
movq 0x58(%rsp), %r9
movq 0x50(%rsp), %r10
movq 0x48(%rsp), %r11
movq 0x40(%rsp), %rbx
movq 0x38(%rsp), %r15
vmovaps 0x2a0(%rsp), %ymm12
vmovaps 0x280(%rsp), %ymm13
leaq 0x5a0(%rsp), %r12
movq 0x68(%rsp), %r13
xorl %ecx, %ecx
cmpl $0x3, %ecx
jne 0x20d4de
jmp 0x20d30b
testb $0x1, 0x1f(%rsp)
vxorps %xmm5, %xmm5, %xmm5
movq 0x70(%rsp), %rdi
vmovaps 0x360(%rsp), %ymm6
vmovaps 0x340(%rsp), %ymm7
vmovaps 0x320(%rsp), %ymm8
movq 0x60(%rsp), %r8
vmovaps 0x300(%rsp), %ymm9
vmovaps 0x2e0(%rsp), %ymm10
vmovaps 0x2c0(%rsp), %ymm11
movq 0x58(%rsp), %r9
movq 0x50(%rsp), %r10
movq 0x48(%rsp), %r11
movq 0x40(%rsp), %rbx
movq 0x38(%rsp), %r15
vmovaps 0x2a0(%rsp), %ymm12
vmovaps 0x280(%rsp), %ymm13
leaq 0x5a0(%rsp), %r12
movq 0x68(%rsp), %r13
movl $0x0, %ecx
je 0x20e142
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %rcx
jmp 0x20e142
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x18a0, %rsp # imm = 0x18A0
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x20f446
vmovss 0x20(%rsi), %xmm0
vxorps %xmm6, %xmm6, %xmm6
vucomiss %xmm0, %xmm6
ja 0x20f446
leaq 0x6e8(%rsp), %r8
movq 0x70(%rax), %rax
vmovaps 0x10(%rsi), %xmm2
vmaxss 0xc(%rsi), %xmm6, %xmm1
vbroadcastss 0x1d12c80(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1ce2d97(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1cde4b5(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d12cf4(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq %rax, -0x8(%r8)
vbroadcastss 0x1d11c91(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d11c88(%rip), %xmm4 # 0x1f1ff14
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm8
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x8(%rsi), %ymm9
xorl %r9d, %r9d
vucomiss %xmm6, %xmm3
setb %r9b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovaps %ymm4, 0x4c0(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm11
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm12
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm13
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm14
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r9d
xorl %r10d, %r10d
vucomiss %xmm6, %xmm4
vinsertf128 $0x1, %xmm2, %ymm2, %ymm15
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r11d, %r11d
vucomiss %xmm6, %xmm5
setb %r11b
shll $0x5, %r11d
orq $0x80, %r11
movq %r9, %rbx
xorq $0x20, %rbx
movq %r10, %r15
xorq $0x20, %r15
movq %r11, %rax
xorq $0x20, %rax
movq %rax, 0xb8(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm5
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
leaq 0x1f41c10(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
vbroadcastss 0x1d12b3d(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1cde384(%rip), %ymm10 # 0x1eec714
vbroadcastss 0x1ce2633(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x480(%rsp)
vblendvps %ymm1, %ymm0, %ymm10, %ymm0
vmovaps %ymm0, 0x460(%rsp)
movq %rdx, 0x8(%rsp)
movq %rsi, 0x20(%rsp)
vmovaps %ymm7, 0x240(%rsp)
vmovaps %ymm8, 0x220(%rsp)
vmovaps %ymm9, 0x200(%rsp)
movq %r9, 0x50(%rsp)
vmovaps %ymm11, 0x1e0(%rsp)
vmovaps %ymm12, 0x1c0(%rsp)
vmovaps %ymm13, 0x1a0(%rsp)
vmovaps %ymm14, 0x180(%rsp)
movq %r10, 0x48(%rsp)
vmovaps %ymm15, 0x160(%rsp)
movq %r11, 0x40(%rsp)
movq %rbx, 0x38(%rsp)
movq %r15, 0x30(%rsp)
vmovaps %ymm5, 0x140(%rsp)
leaq 0x6e0(%rsp), %rax
cmpq %rax, %r8
je 0x20f446
movq -0x8(%r8), %r14
addq $-0x8, %r8
testb $0x8, %r14b
jne 0x20e544
movq %r14, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r9), %ymm0, %ymm1
vaddps 0x40(%rax,%r9), %ymm1, %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps 0x4c0(%rsp), %ymm1, %ymm1
vmulps 0x100(%rax,%r10), %ymm0, %ymm2
vaddps 0x40(%rax,%r10), %ymm2, %ymm2
vmaxps %ymm1, %ymm5, %ymm1
vsubps %ymm8, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmulps %ymm2, %ymm11, %ymm2
vaddps 0x40(%rax,%r11), %ymm3, %ymm3
vsubps %ymm9, %ymm3, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%rbx), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%rbx), %ymm3, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm13, %ymm2
vmovaps 0x4a0(%rsp), %ymm3
vminps %ymm2, %ymm3, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm14, %ymm3
movq 0xb8(%rsp), %rcx
vmulps 0x100(%rax,%rcx), %ymm0, %ymm4
vaddps 0x40(%rax,%rcx), %ymm4, %ymm4
vsubps %ymm9, %ymm4, %ymm4
vmulps %ymm4, %ymm15, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r14d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x20e58e
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r13d
testb $0x8, %r14b
jne 0x20e58a
testq %r13, %r13
je 0x20e5bb
andq $-0x10, %r14
bsfq %r13, %rax
leaq -0x1(%r13), %rdi
xorl %r12d, %r12d
movq (%r14,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdi
jne 0x20e5c1
movq %rax, %r14
testl %r12d, %r12d
je 0x20e43e
jmp 0x20e607
pushq $0x6
jmp 0x20e5bd
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x20e533
pushq $0x4
popq %r12
jmp 0x20e57f
movq %rax, (%r8)
addq $0x8, %r8
bsfq %rdi, %rcx
leaq -0x1(%rdi), %rax
movq (%r14,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdi, %rax
je 0x20e5ff
movq %rcx, (%r8)
addq $0x8, %r8
bsfq %rax, %rcx
leaq -0x1(%rax), %rdi
jmp 0x20e5d0
movq %rcx, %r14
jmp 0x20e57f
cmpl $0x6, %r12d
jne 0x20f43c
movl %r14d, %eax
andl $0xf, %eax
xorl %r12d, %r12d
addq $-0x8, %rax
setne %cl
je 0x20f43c
movq %r8, 0x58(%rsp)
andq $-0x10, %r14
movq (%rdx), %r8
xorl %edi, %edi
movq %r13, 0x18(%rsp)
movq %rax, 0x98(%rsp)
movq %r8, 0x10(%rsp)
movb %cl, 0x7(%rsp)
movq %rdi, 0xa0(%rsp)
imulq $0x60, %rdi, %rax
vmovss 0x1c(%rsi), %xmm0
movl 0x40(%r14,%rax), %ecx
movq 0x1e8(%r8), %rdx
movq (%rdx,%rcx,8), %rsi
vmovss 0x28(%rsi), %xmm1
vmovss 0x2c(%rsi), %xmm2
vmovss 0x30(%rsi), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1ce2333(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm3
vcvttss2si %xmm3, %ecx
movslq %ecx, %rcx
movq 0xe0(%rsi), %r15
imulq $0x38, %rcx, %r9
movq (%r15,%r9), %rsi
movl (%r14,%rax), %r11d
movl 0x4(%r14,%rax), %r8d
movq %r8, 0x2e0(%rsp)
vmovups (%rsi,%r11,4), %xmm2
movl 0x10(%r14,%rax), %edi
movq %rdi, 0x3c0(%rsp)
vmovups (%rsi,%rdi,4), %xmm1
movl 0x20(%r14,%rax), %edi
movq %rdi, 0x120(%rsp)
vmovups (%rsi,%rdi,4), %xmm7
movl 0x30(%r14,%rax), %edi
movq %rdi, 0x60(%rsp)
vmovups (%rsi,%rdi,4), %xmm8
movl 0x44(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %rsi
movq %rsi, 0xb0(%rsp)
movq (%rsi,%r9), %rsi
vmovups (%rsi,%r8,4), %xmm4
movl 0x14(%r14,%rax), %edi
movq %rdi, 0x360(%rsp)
vmovups (%rsi,%rdi,4), %xmm5
movl 0x24(%r14,%rax), %edi
movq %rdi, 0x3a0(%rsp)
vmovups (%rsi,%rdi,4), %xmm10
movl 0x34(%r14,%rax), %edi
movq %rdi, 0x3e0(%rsp)
vmovups (%rsi,%rdi,4), %xmm9
movl 0x48(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rsi
movq 0xe0(%rsi), %r8
movq (%r8,%r9), %rsi
movl 0x8(%r14,%rax), %edi
vmovups (%rsi,%rdi,4), %xmm13
movl 0x18(%r14,%rax), %r10d
movq %r10, 0xa8(%rsp)
vmovups (%rsi,%r10,4), %xmm6
movl 0x28(%r14,%rax), %r10d
movq %r10, 0x260(%rsp)
vmovups (%rsi,%r10,4), %xmm11
movl 0x38(%r14,%rax), %r10d
movq %r10, 0x340(%rsp)
vmovups (%rsi,%r10,4), %xmm12
movl 0x4c(%r14,%rax), %esi
movq (%rdx,%rsi,8), %rdx
vsubss %xmm3, %xmm0, %xmm0
vmovaps %xmm0, 0x380(%rsp)
movq 0xe0(%rdx), %rsi
movq (%rsi,%r9), %rbx
movl 0xc(%r14,%rax), %edx
vmovups (%rbx,%rdx,4), %xmm0
movl 0x1c(%r14,%rax), %r12d
vmovups (%rbx,%r12,4), %xmm3
vunpcklps %xmm13, %xmm2, %xmm14 # xmm14 = xmm2[0],xmm13[0],xmm2[1],xmm13[1]
vunpckhps %xmm13, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm13[2],xmm2[3],xmm13[3]
vunpcklps %xmm0, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm0[0],xmm4[1],xmm0[1]
vunpckhps %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
movl 0x2c(%r14,%rax), %r9d
vmovups (%rbx,%r9,4), %xmm4
vunpcklps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vmovaps %xmm0, 0x300(%rsp)
vunpcklps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[0],xmm13[0],xmm14[1],xmm13[1]
vmovaps %xmm0, 0x450(%rsp)
vunpckhps %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[2],xmm13[2],xmm14[3],xmm13[3]
vmovaps %xmm0, 0x2a0(%rsp)
vunpcklps %xmm6, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm3, %xmm5, %xmm2 # xmm2 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
vunpckhps %xmm3, %xmm5, %xmm3 # xmm3 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
movl 0x3c(%r14,%rax), %r10d
vmovups (%rbx,%r10,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x320(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x280(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x2c0(%rsp)
vunpcklps %xmm11, %xmm7, %xmm0 # xmm0 = xmm7[0],xmm11[0],xmm7[1],xmm11[1]
vunpckhps %xmm11, %xmm7, %xmm1 # xmm1 = xmm7[2],xmm11[2],xmm7[3],xmm11[3]
vunpcklps %xmm4, %xmm10, %xmm2 # xmm2 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
vunpckhps %xmm4, %xmm10, %xmm3 # xmm3 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
incl %ecx
movslq %ecx, %rcx
imulq $0x38, %rcx, %rbx
movq (%r15,%rbx), %rcx
vmovups (%rcx,%r11,4), %xmm4
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x440(%rsp)
vunpcklps %xmm2, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm1, 0x400(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x410(%rsp)
vunpcklps %xmm12, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
vunpckhps %xmm12, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm12[2],xmm8[3],xmm12[3]
vunpcklps %xmm5, %xmm9, %xmm2 # xmm2 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
vunpckhps %xmm5, %xmm9, %xmm3 # xmm3 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
movq (%r8,%rbx), %r8
vmovups (%r8,%rdi,4), %xmm5
vunpcklps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm1, 0x430(%rsp)
vunpcklps %xmm2, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x420(%rsp)
movq 0xb0(%rsp), %rdi
movq (%rdi,%rbx), %rdi
vunpcklps %xmm5, %xmm4, %xmm0 # xmm0 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x2e0(%rsp), %r11
vmovups (%rdi,%r11,4), %xmm2
movq (%rsi,%rbx), %r11
vmovups (%r11,%rdx,4), %xmm3
vunpcklps %xmm3, %xmm2, %xmm4 # xmm4 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vunpckhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm3[2],xmm2[3],xmm3[3]
vunpcklps %xmm2, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpcklps %xmm4, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
vunpckhps %xmm4, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
movq 0x3c0(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0xa8(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x360(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%r11,%r12,4), %xmm2
vunpcklps %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpckhps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vunpcklps %xmm1, %xmm0, %xmm14 # xmm14 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vunpckhps %xmm4, %xmm3, %xmm8 # xmm8 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
movq 0x120(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0x260(%rsp), %rdx
vmovups (%r8,%rdx,4), %xmm1
vunpcklps %xmm1, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x3a0(%rsp), %rdx
vmovups (%rdi,%rdx,4), %xmm1
vmovups (%r11,%r9,4), %xmm3
vunpcklps %xmm3, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm9 # xmm9 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
movq 0x60(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
movq 0x340(%rsp), %rcx
vmovups (%r8,%rcx,4), %xmm1
movq 0x20(%rsp), %rsi
vunpcklps %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpckhps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
movq 0x3e0(%rsp), %rcx
vmovups (%rdi,%rcx,4), %xmm1
vmovups (%r11,%r10,4), %xmm4
vunpcklps %xmm4, %xmm1, %xmm11 # xmm11 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpckhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
vunpcklps %xmm1, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm11, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpckhps %xmm11, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm11[2],xmm5[3],xmm11[3]
vmovaps 0x380(%rsp), %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[0,0,0,0]
vmovss 0x1cddce3(%rip), %xmm1 # 0x1eec714
vsubss %xmm11, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm10
vmulps 0x450(%rsp), %xmm1, %xmm11
vaddps %xmm10, %xmm11, %xmm11
vmulps %xmm7, %xmm0, %xmm7
vmulps 0x2a0(%rsp), %xmm1, %xmm10
vaddps %xmm7, %xmm10, %xmm10
vmulps %xmm0, %xmm13, %xmm7
vmulps 0x300(%rsp), %xmm1, %xmm13
vaddps %xmm7, %xmm13, %xmm13
vmulps %xmm2, %xmm0, %xmm2
vmulps 0x280(%rsp), %xmm1, %xmm7
vaddps %xmm2, %xmm7, %xmm7
vmulps %xmm0, %xmm8, %xmm2
vmulps 0x2c0(%rsp), %xmm1, %xmm8
vaddps %xmm2, %xmm8, %xmm2
vmulps %xmm0, %xmm14, %xmm8
vmulps 0x320(%rsp), %xmm1, %xmm14
vaddps %xmm8, %xmm14, %xmm14
vmulps %xmm3, %xmm0, %xmm3
vmulps 0x400(%rsp), %xmm1, %xmm8
vaddps %xmm3, %xmm8, %xmm3
vmulps %xmm0, %xmm9, %xmm8
vmulps 0x410(%rsp), %xmm1, %xmm9
vaddps %xmm8, %xmm9, %xmm8
vmulps %xmm0, %xmm15, %xmm9
vmulps 0x440(%rsp), %xmm1, %xmm15
vaddps %xmm9, %xmm15, %xmm9
vmulps %xmm4, %xmm0, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm0
vmulps %xmm1, %xmm12, %xmm6
vbroadcastf128 0x40(%r14,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vaddps %xmm4, %xmm6, %xmm4
vmovaps %ymm12, 0x500(%rsp)
vbroadcastf128 0x50(%r14,%rax), %ymm6 # ymm6 = mem[0,1,0,1]
vmulps 0x420(%rsp), %xmm1, %xmm12
vaddps %xmm5, %xmm12, %xmm5
vmulps 0x430(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps %ymm6, 0x4e0(%rsp)
vinsertf128 $0x1, %xmm3, %ymm11, %ymm1
vinsertf128 $0x1, %xmm8, %ymm10, %ymm3
vinsertf128 $0x1, %xmm9, %ymm13, %ymm6
vinsertf128 $0x1, %xmm7, %ymm7, %ymm7
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm14, %ymm14, %ymm14
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vinsertf128 $0x1, %xmm0, %ymm0, %ymm9
vbroadcastss (%rsi), %ymm10
vbroadcastss 0x4(%rsi), %ymm11
vbroadcastss 0x8(%rsi), %ymm12
vsubps %ymm10, %ymm1, %ymm1
vsubps %ymm11, %ymm3, %ymm3
vsubps %ymm12, %ymm6, %ymm15
vsubps %ymm10, %ymm7, %ymm13
vsubps %ymm11, %ymm2, %ymm8
vsubps %ymm12, %ymm14, %ymm14
vsubps %ymm10, %ymm4, %ymm2
vsubps %ymm11, %ymm5, %ymm4
vmovaps %ymm4, 0x120(%rsp)
vsubps %ymm12, %ymm9, %ymm6
vmovaps %ymm6, 0x60(%rsp)
vsubps %ymm1, %ymm2, %ymm11
vsubps %ymm3, %ymm4, %ymm12
vsubps %ymm15, %ymm6, %ymm0
vaddps %ymm3, %ymm4, %ymm4
vaddps %ymm6, %ymm15, %ymm5
vmulps %ymm4, %ymm0, %ymm7
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm7, %ymm9, %ymm9
vaddps %ymm1, %ymm2, %ymm7
vmulps %ymm5, %ymm11, %ymm5
vmovaps %ymm0, 0x320(%rsp)
vmulps %ymm7, %ymm0, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x360(%rsp)
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm11, 0x380(%rsp)
vmulps %ymm4, %ymm11, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vbroadcastss 0x14(%rsi), %ymm11
vbroadcastss 0x18(%rsi), %ymm6
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vbroadcastss 0x10(%rsi), %ymm0
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm0, %ymm9, %ymm5
vaddps %ymm4, %ymm5, %ymm10
vsubps %ymm8, %ymm3, %ymm9
vsubps %ymm14, %ymm15, %ymm12
vmovaps %ymm3, 0x3c0(%rsp)
vaddps %ymm3, %ymm8, %ymm4
vmovaps %ymm15, 0x3a0(%rsp)
vaddps %ymm14, %ymm15, %ymm5
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm9, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vsubps %ymm13, %ymm1, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm1, 0x3e0(%rsp)
vaddps %ymm1, %ymm13, %ymm7
vmovaps %ymm12, 0x300(%rsp)
vmulps %ymm7, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm9, %ymm12
vmulps %ymm7, %ymm9, %ymm7
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm0, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vmovaps %ymm13, %ymm3
vsubps %ymm2, %ymm13, %ymm13
vaddps %ymm2, %ymm3, %ymm2
vmovaps 0x120(%rsp), %ymm1
vsubps %ymm1, %ymm8, %ymm9
vaddps %ymm1, %ymm8, %ymm1
vmovaps 0x60(%rsp), %ymm3
vsubps %ymm3, %ymm14, %ymm5
vaddps %ymm3, %ymm14, %ymm3
vmulps %ymm1, %ymm5, %ymm7
vmulps %ymm3, %ymm9, %ymm8
vsubps %ymm7, %ymm8, %ymm7
vmulps %ymm3, %ymm13, %ymm3
vmulps %ymm2, %ymm5, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm9, %ymm2
vmulps %ymm1, %ymm13, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm6, 0x340(%rsp)
vmulps %ymm1, %ymm6, %ymm1
vmovaps %ymm11, 0x60(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm0, 0x120(%rsp)
vmulps %ymm7, %ymm0, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm10, %ymm2
vaddps %ymm2, %ymm1, %ymm0
vminps %ymm4, %ymm10, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d1218b(%rip), %ymm3 # 0x1f20ec4
vmovaps %ymm0, 0x280(%rsp)
vandps %ymm3, %ymm0, %ymm0
vbroadcastss 0x1d1217d(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm0, 0x2a0(%rsp)
vmulps %ymm3, %ymm0, %ymm3
vbroadcastss 0x1d1215b(%rip), %ymm7 # 0x1f20ec0
vxorps %ymm7, %ymm3, %ymm7
vcmpnltps %ymm7, %ymm2, %ymm2
vmovaps %ymm10, 0x2e0(%rsp)
vmovaps %ymm4, 0x2c0(%rsp)
vmaxps %ymm4, %ymm10, %ymm7
vmaxps %ymm1, %ymm7, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm0
vtestps %ymm0, %ymm0
je 0x20f35e
vmovaps %ymm0, 0x260(%rsp)
vmovaps 0x320(%rsp), %ymm0
vmovaps %ymm12, %ymm11
vmulps %ymm0, %ymm12, %ymm2
vmovaps 0x360(%rsp), %ymm1
vmovaps %ymm9, %ymm8
vmovaps 0x300(%rsp), %ymm9
vmulps %ymm1, %ymm9, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm8, %ymm9, %ymm7
vmulps %ymm5, %ymm12, %ymm12
vsubps %ymm7, %ymm12, %ymm12
vbroadcastss 0x1d120d8(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm2, %ymm2
vandps %ymm4, %ymm7, %ymm7
vcmpltps %ymm7, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm14
vmulps %ymm5, %ymm15, %ymm2
vmulps %ymm0, %ymm15, %ymm3
vmovaps 0x380(%rsp), %ymm10
vmulps %ymm9, %ymm10, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm13, %ymm9, %ymm7
vsubps %ymm2, %ymm7, %ymm7
vandps %ymm4, %ymm6, %ymm6
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vblendvps %ymm2, %ymm3, %ymm7, %ymm9
vmulps %ymm13, %ymm11, %ymm2
vmulps %ymm11, %ymm10, %ymm3
vmulps %ymm1, %ymm15, %ymm5
vmulps %ymm8, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm5, %ymm5
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vmovdqa 0x260(%rsp), %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0x340(%rsp), %ymm0, %ymm2
vmulps 0x60(%rsp), %ymm9, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x120(%rsp), %ymm14, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm3
vmulps 0x3a0(%rsp), %ymm0, %ymm2
vmulps 0x3c0(%rsp), %ymm9, %ymm5
vaddps %ymm2, %ymm5, %ymm2
vmulps 0x3e0(%rsp), %ymm14, %ymm5
vrcpps %ymm3, %ymm6
vaddps %ymm2, %ymm5, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps %ymm6, %ymm3, %ymm5
vbroadcastss 0x1cdd842(%rip), %ymm4 # 0x1eec714
vsubps %ymm5, %ymm4, %ymm5
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0xc(%rsi), %ymm6
vmulps %ymm5, %ymm2, %ymm2
vcmpleps %ymm2, %ymm6, %ymm5
vbroadcastss 0x20(%rsi), %ymm6
vcmpleps %ymm6, %ymm2, %ymm6
vandps %ymm5, %ymm6, %ymm5
vcmpneqps 0x1d11ffb(%rip), %ymm3, %ymm3 # 0x1f20f00
vandps %ymm3, %ymm5, %ymm3
vextractf128 $0x1, %ymm3, %xmm5
vpackssdw %xmm5, %xmm3, %xmm3
vpand %xmm1, %xmm3, %xmm1
vpmovsxwd %xmm1, %xmm3
vpshufd $0xee, %xmm1, %xmm5 # xmm5 = xmm1[2,3,2,3]
vpmovsxwd %xmm5, %xmm5
vinsertf128 $0x1, %xmm5, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x20f35e
vmovaps 0x2e0(%rsp), %ymm4
vmovaps %ymm4, 0x520(%rsp)
vmovaps 0x2c0(%rsp), %ymm5
vmovaps %ymm5, 0x540(%rsp)
vmovaps 0x280(%rsp), %ymm7
vmovaps %ymm7, 0x560(%rsp)
vmovaps %ymm14, 0x580(%rsp)
vmovaps %ymm9, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm3, 0x5e0(%rsp)
vmovaps %ymm2, 0x640(%rsp)
vmovaps 0x480(%rsp), %ymm6
vmovaps %ymm6, 0x6c0(%rsp)
vrcpps %ymm7, %ymm2
vmulps %ymm2, %ymm7, %ymm3
vbroadcastss 0x1cdd757(%rip), %ymm10 # 0x1eec714
vsubps %ymm3, %ymm10, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1ce2016(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x2a0(%rsp), %ymm7
vcmpnltps %ymm3, %ymm7, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm4, %ymm3
vminps %ymm10, %ymm3, %ymm3
vmulps %ymm2, %ymm5, %ymm2
vminps %ymm10, %ymm2, %ymm2
vsubps %ymm3, %ymm10, %ymm4
vsubps %ymm2, %ymm10, %ymm5
vblendvps %ymm6, %ymm4, %ymm2, %ymm2
vblendvps %ymm6, %ymm5, %ymm3, %ymm3
vmovaps %ymm2, 0x620(%rsp)
vmovaps %ymm3, 0x600(%rsp)
vmovaps 0x460(%rsp), %ymm4
vmulps %ymm4, %ymm14, %ymm2
vmulps %ymm4, %ymm9, %ymm3
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, 0x660(%rsp)
vmovaps %ymm3, 0x680(%rsp)
vmovaps %ymm0, 0x6a0(%rsp)
vpsllw $0xf, %xmm1, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
movq 0x8(%rsp), %rdx
vmovaps 0x240(%rsp), %ymm7
vmovaps 0x220(%rsp), %ymm8
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x160(%rsp), %ymm15
vmovaps 0x140(%rsp), %ymm5
vbroadcastss 0x1d11e08(%rip), %ymm6 # 0x1f20ec4
movq 0x10(%rsp), %r8
movb 0x7(%rsp), %dil
bsfq %r15, %r12
movl 0x500(%rsp,%r12,4), %eax
movq 0x1e8(%r8), %rcx
movq (%rcx,%rax,8), %r13
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r13)
je 0x20f0fc
movq 0x10(%rdx), %rbx
cmpq $0x0, 0x10(%rbx)
jne 0x20f11c
cmpq $0x0, 0x48(%r13)
jne 0x20f11c
xorl %eax, %eax
jmp 0x20f102
btcq %r12, %r15
movb $0x1, %al
movq 0x18(%rsp), %r13
xorl %r12d, %r12d
testb %al, %al
je 0x20f40d
testq %r15, %r15
jne 0x20f0c6
jmp 0x20f3cb
vmovss 0x600(%rsp,%r12,4), %xmm0
vmovd 0x620(%rsp,%r12,4), %xmm1
movq 0x8(%rdx), %rcx
movl 0x4e0(%rsp,%r12,4), %edi
vmovss 0x660(%rsp,%r12,4), %xmm2
vmovss 0x680(%rsp,%r12,4), %xmm3
vmovss 0x6a0(%rsp,%r12,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovd %xmm1, 0xd0(%rsp)
movl %edi, 0xd4(%rsp)
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x60(%rsp)
vmovd 0x640(%rsp,%r12,4), %xmm0
vmovd %xmm0, 0x20(%rsi)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %rsi, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
je 0x20f292
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x10(%rsp), %r8
vmovaps 0x140(%rsp), %ymm5
vmovaps 0x160(%rsp), %ymm15
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x220(%rsp), %ymm8
vmovaps 0x240(%rsp), %ymm7
movq 0x20(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20f329
movq 0x10(%rbx), %rax
testq %rax, %rax
je 0x20f325
testb $0x2, (%rbx)
jne 0x20f2ab
testb $0x40, 0x3e(%r13)
je 0x20f318
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x10(%rsp), %r8
vmovaps 0x140(%rsp), %ymm5
vmovaps 0x160(%rsp), %ymm15
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x220(%rsp), %ymm8
vmovaps 0x240(%rsp), %ymm7
movq 0x20(%rsp), %rsi
movq 0x8(%rsp), %rdx
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x20f329
xorl %eax, %eax
jmp 0x20f33a
vmovd 0x60(%rsp), %xmm0
vmovd %xmm0, 0x20(%rsi)
btcq %r12, %r15
movb $0x1, %al
vbroadcastss 0x1d11b81(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1cdd3c8(%rip), %ymm10 # 0x1eec714
movq 0x18(%rsp), %r13
xorl %r12d, %r12d
movb 0x7(%rsp), %dil
jmp 0x20f10a
movq 0x8(%rsp), %rdx
vmovaps 0x240(%rsp), %ymm7
vmovaps 0x220(%rsp), %ymm8
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm11
vmovaps 0x1c0(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm14
vmovaps 0x160(%rsp), %ymm15
vmovaps 0x140(%rsp), %ymm5
vbroadcastss 0x1d11b07(%rip), %ymm6 # 0x1f20ec4
vbroadcastss 0x1cdd34e(%rip), %ymm10 # 0x1eec714
movq 0x10(%rsp), %r8
movq 0xa0(%rsp), %rdi
incq %rdi
movq 0x98(%rsp), %rax
cmpq %rax, %rdi
setb %cl
jne 0x20e647
movq 0x58(%rsp), %r8
movq 0x50(%rsp), %r9
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r11
movq 0x38(%rsp), %rbx
movq 0x30(%rsp), %r15
xorl %r12d, %r12d
jmp 0x20f43c
testb $0x1, %dil
movq 0x58(%rsp), %r8
movq 0x50(%rsp), %r9
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r11
movq 0x38(%rsp), %rbx
movq 0x30(%rsp), %r15
je 0x20f43c
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r12
cmpl $0x3, %r12d
jne 0x20e425
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1528, %rsp # imm = 0x1528
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x210125
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x210125
movq %rdx, %r8
leaq 0x388(%rsp), %r10
movq 0x70(%rax), %rax
movq %rax, -0x8(%r10)
vmaxss 0xc(%rsi), %xmm1, %xmm2
vmovaps 0x10(%rsi), %xmm3
vbroadcastss 0x1d11a14(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce1b2b(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cdd23b(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm5
vmovups %ymm5, 0x360(%rsp)
vbroadcastss 0x4(%rsi), %ymm5
vmovups %ymm5, 0x340(%rsp)
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss 0x8(%rsi), %ymm4
vmovups %ymm4, 0x320(%rsp)
xorl %r11d, %r11d
vucomiss %xmm1, %xmm3
setb %r11b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovups %ymm4, 0x300(%rsp)
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vmovups %ymm5, 0x2e0(%rsp)
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmovdqu %ymm3, 0x2c0(%rsp)
xorl %eax, %eax
vucomiss %xmm1, %xmm4
setb %al
xorl %ecx, %ecx
vucomiss %xmm1, %xmm5
setb %cl
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovups %ymm1, 0x2a0(%rsp)
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x280(%rsp)
shll $0x3, %r11d
movq %r11, %rdi
xorq $0x8, %rdi
leal 0x10(,%rax,8), %r9d
movq %r9, %rbx
xorq $0x8, %rbx
leal 0x20(,%rcx,8), %r14d
movq %r14, %r15
xorq $0x8, %r15
leaq 0x1f409b5(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x100(%rsp)
movq %rdi, 0x88(%rsp)
movq %r9, 0x80(%rsp)
movq %rbx, 0x78(%rsp)
movq %r14, 0x70(%rsp)
movq %r15, 0x68(%rsp)
leaq 0x380(%rsp), %rax
cmpq %rax, %r10
je 0x210125
movq -0x8(%r10), %r13
addq $-0x8, %r10
testb $0x8, %r13b
jne 0x20f832
movq %r13, %rcx
andq $-0x10, %rcx
leaq 0x40(%rcx), %rax
testq %rcx, %rcx
cmoveq %rcx, %rax
vmovq (%rax), %xmm0
vmovq 0x4(%rax), %xmm1
vmovq 0x8(%rax), %xmm2
vmovq 0xc(%rax), %xmm3
vpminub %xmm2, %xmm0, %xmm2
vpcmpeqb %xmm2, %xmm0, %xmm0
vpminub %xmm3, %xmm1, %xmm2
vpcmpeqb %xmm2, %xmm1, %xmm1
vbroadcastss 0x30(%rax), %ymm3
vbroadcastss 0x3c(%rax), %ymm4
vmovq (%rax,%r11), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vmovq 0x4(%rax,%r11), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vinsertf128 $0x1, %xmm5, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm4, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vmovq (%rax,%rdi), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vmovq 0x4(%rax,%rdi), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm5, %ymm5
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x34(%rax), %ymm5
vbroadcastss 0x40(%rax), %ymm6
vmovq (%rax,%r9), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vmovq 0x4(%rax,%r9), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmovq (%rax,%rbx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%rbx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm6, %ymm6
vaddps %ymm6, %ymm5, %ymm5
vmovq (%rax,%r14), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vmovq 0x4(%rax,%r14), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vmovq (%rax,%r15), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%r15), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vbroadcastss 0x44(%rax), %ymm8
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm8, %ymm7
vbroadcastss 0x38(%rax), %ymm8
vaddps %ymm6, %ymm8, %ymm6
vaddps %ymm7, %ymm8, %ymm7
vmovups 0x360(%rsp), %ymm8
vsubps %ymm8, %ymm2, %ymm2
vmovups 0x300(%rsp), %ymm11
vmulps %ymm2, %ymm11, %ymm2
vmovups 0x340(%rsp), %ymm9
vsubps %ymm9, %ymm4, %ymm4
vmovups 0x2e0(%rsp), %ymm12
vmulps %ymm4, %ymm12, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vmovups 0x320(%rsp), %ymm10
vsubps %ymm10, %ymm6, %ymm4
vmovups 0x2c0(%rsp), %ymm6
vmulps %ymm4, %ymm6, %ymm4
vmaxps 0x2a0(%rsp), %ymm4, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vsubps %ymm9, %ymm5, %ymm4
vmulps %ymm4, %ymm12, %ymm4
vminps %ymm4, %ymm3, %ymm3
vsubps %ymm10, %ymm7, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vminps 0x280(%rsp), %ymm4, %ymm4
vminps %ymm4, %ymm3, %ymm3
vcmpleps %ymm3, %ymm2, %ymm2
vpcmpeqd %xmm3, %xmm3, %xmm3
vpxor %xmm3, %xmm0, %xmm0
vpmovsxbd %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpmovsxbd %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vbroadcastss 0x1ce11a7(%rip), %ymm1 # 0x1ef09cc
vcmpltps %ymm0, %ymm1, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovmskps %ymm0, %ebp
testb $0x8, %r13b
jne 0x20f872
testq %rbp, %rbp
je 0x20f876
andq $-0x10, %r13
bsfq %rbp, %rax
leaq -0x1(%rbp), %rdx
xorl %r12d, %r12d
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
andq %rbp, %rdx
jne 0x20f87c
movq %rax, %r13
testl %r12d, %r12d
je 0x20f614
jmp 0x20f8b9
pushq $0x6
jmp 0x20f878
pushq $0x4
popq %r12
jmp 0x20f867
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
andq %rdx, %rax
je 0x20f8b4
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x20f88b
movq %rcx, %r13
jmp 0x20f867
cmpl $0x6, %r12d
jne 0x21011b
movl %r13d, %eax
andl $0xf, %eax
xorl %r12d, %r12d
addq $-0x8, %rax
setne %cl
je 0x21011b
andq $-0x10, %r13
movq (%r8), %rdi
xorl %edx, %edx
movq %rax, 0x90(%rsp)
movq %rdi, 0x20(%rsp)
movb %cl, 0xe(%rsp)
movq %rdx, 0x98(%rsp)
imulq $0x50, %rdx, %rax
movl 0x30(%r13,%rax), %edx
movq 0x228(%rdi), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%r13,%rax), %r9d
movl 0x4(%r13,%rax), %edi
vmovups (%rdx,%r9,4), %xmm2
movl 0x10(%r13,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm1
movl 0x20(%r13,%rax), %r9d
vmovups (%rdx,%r9,4), %xmm0
movl 0x34(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm5
movl 0x14(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm4
movl 0x24(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm3
movl 0x38(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm6
movl 0x18(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm7
movl 0x28(%r13,%rax), %edi
vmovups (%rdx,%rdi,4), %xmm8
movq 0x20(%rsp), %rdi
movl 0x3c(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm9
movl 0x1c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm10
movl 0x2c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm11
movb 0xe(%rsp), %dl
vunpcklps %xmm6, %xmm2, %xmm12 # xmm12 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpckhps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
vunpcklps %xmm9, %xmm5, %xmm6 # xmm6 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vmovaps %xmm2, 0x10(%rsp)
vunpcklps %xmm6, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
vunpckhps %xmm6, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm6[2],xmm12[3],xmm6[3]
vunpcklps %xmm7, %xmm1, %xmm9 # xmm9 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
vunpckhps %xmm7, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
vunpcklps %xmm10, %xmm4, %xmm7 # xmm7 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm4, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
vunpcklps %xmm7, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
vunpckhps %xmm7, %xmm9, %xmm15 # xmm15 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
vunpcklps %xmm8, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm11, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vunpcklps %xmm3, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm9, %xmm12 # xmm12 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
vunpckhps %xmm8, %xmm9, %xmm8 # xmm8 = xmm9[2],xmm8[2],xmm9[3],xmm8[3]
vmovaps 0x30(%r13,%rax), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vmovaps 0x40(%r13,%rax), %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vbroadcastss (%rsi), %xmm9
vbroadcastss 0x4(%rsi), %xmm13
vbroadcastss 0x8(%rsi), %xmm14
vsubps %xmm9, %xmm5, %xmm7
vsubps %xmm13, %xmm6, %xmm3
vmovaps 0x10(%rsp), %xmm0
vsubps %xmm14, %xmm0, %xmm1
vsubps %xmm9, %xmm10, %xmm11
vsubps %xmm13, %xmm15, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vsubps %xmm14, %xmm2, %xmm0
vmovaps %xmm0, 0x40(%rsp)
vsubps %xmm9, %xmm12, %xmm12
vsubps %xmm13, %xmm8, %xmm10
vsubps %xmm14, %xmm4, %xmm2
vmovaps %xmm2, 0x10(%rsp)
vsubps %xmm7, %xmm12, %xmm14
vsubps %xmm3, %xmm10, %xmm15
vsubps %xmm1, %xmm2, %xmm0
vaddps %xmm3, %xmm10, %xmm4
vaddps %xmm1, %xmm2, %xmm5
vmulps %xmm4, %xmm0, %xmm8
vmulps %xmm5, %xmm15, %xmm9
vsubps %xmm8, %xmm9, %xmm13
vaddps %xmm7, %xmm12, %xmm8
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm0, 0x150(%rsp)
vmulps %xmm0, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm5
vmovaps %xmm15, 0x160(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vmovaps %xmm14, 0x170(%rsp)
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vbroadcastss 0x18(%rsi), %xmm0
vmulps %xmm4, %xmm0, %xmm4
vbroadcastss 0x14(%rsi), %xmm6
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vbroadcastss 0x10(%rsi), %xmm15
vmulps %xmm13, %xmm15, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x30(%rsp), %xmm2
vsubps %xmm2, %xmm3, %xmm13
vmovaps 0x40(%rsp), %xmm14
vsubps %xmm14, %xmm1, %xmm8
vmovaps %xmm3, 0x190(%rsp)
vaddps %xmm2, %xmm3, %xmm4
vmovaps %xmm1, 0x180(%rsp)
vaddps %xmm1, %xmm14, %xmm5
vmovaps %xmm14, %xmm1
vmulps %xmm4, %xmm8, %xmm14
vmulps %xmm5, %xmm13, %xmm3
vsubps %xmm14, %xmm3, %xmm3
vsubps %xmm11, %xmm7, %xmm14
vmulps %xmm5, %xmm14, %xmm5
vmovaps %xmm7, 0x1a0(%rsp)
vaddps %xmm7, %xmm11, %xmm7
vmovaps %xmm8, 0x130(%rsp)
vmulps %xmm7, %xmm8, %xmm8
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm13, 0x140(%rsp)
vmulps %xmm7, %xmm13, %xmm7
vmulps %xmm4, %xmm14, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm0, %xmm4
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm3, %xmm15, %xmm3
vaddps %xmm4, %xmm3, %xmm4
vsubps %xmm12, %xmm11, %xmm5
vaddps %xmm12, %xmm11, %xmm3
vsubps %xmm10, %xmm2, %xmm12
vaddps %xmm2, %xmm10, %xmm2
vmovaps %xmm0, %xmm11
vmovaps 0x10(%rsp), %xmm0
vsubps %xmm0, %xmm1, %xmm8
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm2, %xmm8, %xmm1
vmulps %xmm0, %xmm12, %xmm7
vsubps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm3, %xmm8, %xmm7
vsubps %xmm0, %xmm7, %xmm0
vmulps %xmm3, %xmm12, %xmm3
vmulps %xmm2, %xmm5, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps %xmm11, 0x30(%rsp)
vmulps %xmm2, %xmm11, %xmm2
vmovaps %xmm6, 0x10(%rsp)
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm15, 0x40(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm4, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm11
vminps %xmm4, %xmm9, %xmm1
vminps %xmm0, %xmm1, %xmm1
vbroadcastss 0x1d112a3(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm11, %xmm15
vbroadcastss 0x1d1129e(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm15, %xmm2
vbroadcastss 0x1d11285(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm2, %xmm3
vcmpnltps %xmm3, %xmm1, %xmm1
vmovaps %xmm9, 0x120(%rsp)
vmaxps %xmm4, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm2, %xmm0, %xmm0
vorps %xmm0, %xmm1, %xmm0
movb $0x0, 0xf(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0x200(%rsp)
vtestps 0x100(%rsp), %xmm0
je 0x2100ac
vmovaps 0x150(%rsp), %xmm6
vmovaps 0x140(%rsp), %xmm10
vmovaps %xmm0, 0x110(%rsp)
vmulps %xmm6, %xmm10, %xmm0
vmovaps 0x160(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm13
vmulps %xmm13, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm12, %xmm13, %xmm2
vmulps %xmm8, %xmm10, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d111f3(%rip), %xmm7 # 0x1f20ec4
vandps %xmm7, %xmm0, %xmm0
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm0
vmulps %xmm8, %xmm14, %xmm1
vmulps %xmm6, %xmm14, %xmm2
vmovaps 0x170(%rsp), %xmm8
vmulps %xmm13, %xmm8, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm5, %xmm13, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm7, %xmm3, %xmm3
vandps %xmm7, %xmm1, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm1
vmulps %xmm5, %xmm10, %xmm2
vmulps %xmm10, %xmm8, %xmm3
vmulps %xmm14, %xmm9, %xmm5
vmulps %xmm12, %xmm14, %xmm6
vsubps %xmm5, %xmm3, %xmm3
vsubps %xmm2, %xmm6, %xmm6
vandps %xmm7, %xmm5, %xmm5
vandps %xmm7, %xmm2, %xmm2
vcmpltps %xmm2, %xmm5, %xmm2
vblendvps %xmm2, %xmm3, %xmm6, %xmm2
vmulps 0x30(%rsp), %xmm2, %xmm3
vmulps 0x10(%rsp), %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vmulps 0x40(%rsp), %xmm0, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm3, %xmm5
vmulps 0x180(%rsp), %xmm2, %xmm3
vmulps 0x190(%rsp), %xmm1, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vmulps 0x1a0(%rsp), %xmm0, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm3, %xmm3
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm7
vbroadcastss 0x1cdc976(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0xc(%rsi), %xmm6
vcmpleps %xmm3, %xmm6, %xmm6
vbroadcastss 0x20(%rsi), %xmm7
vcmpleps %xmm7, %xmm3, %xmm7
vandps %xmm7, %xmm6, %xmm6
vcmpneqps 0x1cdbc3f(%rip), %xmm5, %xmm5 # 0x1eeba10
vandps %xmm6, %xmm5, %xmm6
vmovaps 0x110(%rsp), %xmm5
vandps 0x100(%rsp), %xmm5, %xmm5
vpslld $0x1f, %xmm6, %xmm6
vpsrad $0x1f, %xmm6, %xmm6
vtestps %xmm5, %xmm6
je 0x2100ac
vandps %xmm5, %xmm6, %xmm5
vmovaps 0x120(%rsp), %xmm6
vmovaps %xmm6, 0x1d0(%rsp)
vmovaps %xmm4, 0x1e0(%rsp)
vmovaps %xmm11, 0x1f0(%rsp)
movq %rax, 0x200(%rsp)
vmovaps %xmm5, 0x210(%rsp)
vmovaps %xmm3, 0x240(%rsp)
vmovaps %xmm0, 0x250(%rsp)
vmovaps %xmm1, 0x260(%rsp)
vmovaps %xmm2, 0x270(%rsp)
vrcpps %xmm11, %xmm0
vmulps %xmm0, %xmm11, %xmm1
vbroadcastss 0x1cdc8a9(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ce1168(%rip), %xmm1 # 0x1ef0fe8
vcmpnltps %xmm1, %xmm15, %xmm1
vandps %xmm0, %xmm1, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vminps %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmulps %xmm0, %xmm4, %xmm0
vminps %xmm2, %xmm0, %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovmskps %xmm5, %r15d
bsfq %r15, %r14
movl 0x1c0(%rsp,%r14,4), %eax
movq 0x1e8(%rdi), %rcx
movq (%rcx,%rax,8), %rbx
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rbx)
je 0x20fee8
movq 0x10(%r8), %r12
cmpq $0x0, 0x10(%r12)
jne 0x20ff00
cmpq $0x0, 0x48(%rbx)
jne 0x20ff00
xorl %eax, %eax
xorl %r12d, %r12d
jmp 0x20feee
btcq %r14, %r15
movb $0x1, %al
testb %al, %al
je 0x2100ec
testq %r15, %r15
jne 0x20feaf
jmp 0x2100ac
vmovss 0x220(%rsp,%r14,4), %xmm0
vmovss 0x230(%rsp,%r14,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x1b0(%rsp,%r14,4), %edx
vmovss 0x250(%rsp,%r14,4), %xmm2
vmovss 0x260(%rsp,%r14,4), %xmm3
vmovss 0x270(%rsp,%r14,4), %xmm4
vmovss %xmm2, 0xa0(%rsp)
vmovss %xmm3, 0xa4(%rsp)
vmovss %xmm4, 0xa8(%rsp)
vmovss %xmm0, 0xac(%rsp)
vmovss %xmm1, 0xb0(%rsp)
movl %edx, 0xb4(%rsp)
movl %eax, 0xb8(%rsp)
movl (%rcx), %eax
movl %eax, 0xbc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xc0(%rsp)
vmovss 0x20(%rsi), %xmm0
vmovss %xmm0, 0x10(%rsp)
vmovss 0x240(%rsp,%r14,4), %xmm0
vmovss %xmm0, 0x20(%rsi)
orl $-0x1, 0x2c(%rsp)
leaq 0x2c(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0xd8(%rsp)
movq %rcx, 0xe0(%rsp)
movq %rsi, 0xe8(%rsp)
leaq 0xa0(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x1, 0xf8(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r8, 0x60(%rsp)
movq %r10, 0x58(%rsp)
je 0x21003f
leaq 0xd0(%rsp), %rdi
movq %rsi, 0x40(%rsp)
movq %r11, 0x30(%rsp)
vzeroupper
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x30(%rsp), %r11
movq 0x58(%rsp), %r10
movq 0x40(%rsp), %rsi
movq 0x60(%rsp), %r8
movq 0xd0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x21008f
movq 0x10(%r12), %rax
testq %rax, %rax
je 0x21008b
testb $0x2, (%r12)
jne 0x210056
testb $0x40, 0x3e(%rbx)
je 0x21007e
leaq 0xd0(%rsp), %rdi
movq %rsi, %r12
movq %r11, %rbx
vzeroupper
callq *%rax
movq 0x20(%rsp), %rdi
movq %rbx, %r11
movq 0x58(%rsp), %r10
movq %r12, %rsi
movq 0x60(%rsp), %r8
movq 0xd0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x21008f
xorl %eax, %eax
jmp 0x2100a0
vmovss 0x10(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
btcq %r14, %r15
movb $0x1, %al
xorl %r12d, %r12d
movb 0xe(%rsp), %dl
jmp 0x20feee
movq 0x98(%rsp), %rdx
incq %rdx
movq 0x90(%rsp), %rax
cmpq %rax, %rdx
setb %cl
jne 0x20f8ef
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %rbx
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
jmp 0x21011b
testb $0x1, %dl
movq 0x88(%rsp), %rdi
movq 0x80(%rsp), %r9
movq 0x78(%rsp), %rbx
movq 0x70(%rsp), %r14
movq 0x68(%rsp), %r15
je 0x21011b
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r12
cmpl $0x3, %r12d
jne 0x20f5fb
addq $0x1528, %rsp # imm = 0x1528
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1498, %rsp # imm = 0x1498
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x21016a
addq $0x1498, %rsp # imm = 0x1498
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x210155
leaq 0x2f8(%rsp), %rdi
vmovaps 0x10(%r14), %xmm3
vmaxss 0xc(%r14), %xmm1, %xmm2
vbroadcastss 0x1d10d2b(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce0e42(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq 0x70(%rax), %rax
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cdc54e(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r14), %ymm9
vbroadcastss 0x4(%r14), %ymm10
vbroadcastss 0x8(%r14), %ymm11
vaddps %xmm3, %xmm4, %xmm3
xorl %r8d, %r8d
vucomiss %xmm1, %xmm3
movq %rax, -0x8(%rdi)
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm12
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm13
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
xorl %eax, %eax
vucomiss %xmm1, %xmm4
setb %al
xorl %ecx, %ecx
vucomiss %xmm1, %xmm5
vinsertf128 $0x1, %xmm3, %ymm3, %ymm14
setb %cl
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm15
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovups %ymm0, 0x2d0(%rsp)
shll $0x3, %r8d
movq %r8, %r9
xorq $0x8, %r9
leal 0x10(,%rax,8), %r10d
movq %r10, %r11
xorq $0x8, %r11
leal 0x20(,%rcx,8), %r15d
movq %r15, %r12
xorq $0x8, %r12
leaq 0x1f3fd03(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x2f0(%rsp), %r13
vmovups %ymm9, 0x2b0(%rsp)
vmovups %ymm10, 0x290(%rsp)
vmovups %ymm11, 0x270(%rsp)
vmovups %ymm12, 0x250(%rsp)
vmovups %ymm13, 0x230(%rsp)
vmovups %ymm14, 0x210(%rsp)
vmovups %ymm15, 0x1f0(%rsp)
movq %r15, 0x58(%rsp)
movq %r12, 0x50(%rsp)
cmpq %r13, %rdi
je 0x210155
movq -0x8(%rdi), %rbp
addq $-0x8, %rdi
testb $0x8, %bpl
jne 0x2104d8
movq %rbp, %rcx
andq $-0x10, %rcx
leaq 0x40(%rcx), %rax
testq %rcx, %rcx
cmoveq %rcx, %rax
vmovq (%rax), %xmm0
vmovq 0x4(%rax), %xmm1
vmovq 0x8(%rax), %xmm2
vmovq 0xc(%rax), %xmm3
vpminub %xmm2, %xmm0, %xmm2
vpcmpeqb %xmm2, %xmm0, %xmm0
vpminub %xmm3, %xmm1, %xmm2
vpcmpeqb %xmm2, %xmm1, %xmm1
vbroadcastss 0x30(%rax), %ymm3
vbroadcastss 0x3c(%rax), %ymm4
vmovq (%rax,%r8), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vmovq 0x4(%rax,%r8), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vinsertf128 $0x1, %xmm5, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm4, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vmovq (%rax,%r9), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vmovq 0x4(%rax,%r9), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm5, %ymm5
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x34(%rax), %ymm5
vbroadcastss 0x40(%rax), %ymm6
vmovq (%rax,%r10), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vmovq 0x4(%rax,%r10), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmovq (%rax,%r11), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%r11), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm6, %ymm6
vaddps %ymm6, %ymm5, %ymm5
vmovq (%rax,%r15), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vmovq 0x4(%rax,%r15), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vmovq (%rax,%r12), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%r12), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vbroadcastss 0x44(%rax), %ymm8
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm8, %ymm7
vbroadcastss 0x38(%rax), %ymm8
vaddps %ymm6, %ymm8, %ymm6
vaddps %ymm7, %ymm8, %ymm7
vsubps %ymm9, %ymm2, %ymm2
vmulps %ymm2, %ymm12, %ymm2
vsubps %ymm10, %ymm4, %ymm4
vmulps %ymm4, %ymm13, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vsubps %ymm11, %ymm6, %ymm4
vmulps %ymm4, %ymm14, %ymm4
vmaxps %ymm15, %ymm4, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vsubps %ymm9, %ymm3, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vsubps %ymm10, %ymm5, %ymm4
vmulps %ymm4, %ymm13, %ymm4
vminps %ymm4, %ymm3, %ymm3
vsubps %ymm11, %ymm7, %ymm4
vmulps %ymm4, %ymm14, %ymm4
vminps 0x2d0(%rsp), %ymm4, %ymm4
vminps %ymm4, %ymm3, %ymm3
vcmpleps %ymm3, %ymm2, %ymm2
vpcmpeqd %xmm3, %xmm3, %xmm3
vpxor %xmm3, %xmm0, %xmm0
vpmovsxbd %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpmovsxbd %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vbroadcastss 0x1ce0501(%rip), %ymm1 # 0x1ef09cc
vcmpltps %ymm0, %ymm1, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovmskps %ymm0, %ebx
testb $0x8, %bpl
jne 0x210516
testq %rbx, %rbx
je 0x21051a
andq $-0x10, %rbp
bsfq %rbx, %rax
leaq -0x1(%rbx), %rsi
xorl %ecx, %ecx
movq (%rbp,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
andq %rbx, %rsi
jne 0x21051f
movq %rax, %rbp
testl %ecx, %ecx
je 0x2102f0
jmp 0x21055e
pushq $0x6
jmp 0x21051c
pushq $0x4
popq %rcx
jmp 0x21050c
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rcx
leaq -0x1(%rsi), %rax
movq (%rbp,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
andq %rsi, %rax
je 0x210557
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rsi
jmp 0x21052e
movq %rcx, %rbp
xorl %ecx, %ecx
jmp 0x21050c
cmpl $0x6, %ecx
jne 0x210b07
movl %ebp, %esi
andl $0xf, %esi
xorl %ecx, %ecx
addq $-0x8, %rsi
setne %r13b
je 0x210aff
andq $-0x10, %rbp
xorl %r12d, %r12d
imulq $0xb0, %r12, %r15
vmovaps 0x80(%rbp,%r15), %xmm9
vmovaps 0x40(%rbp,%r15), %xmm6
vmulps %xmm6, %xmm9, %xmm0
vmovaps 0x70(%rbp,%r15), %xmm10
vmovaps 0x50(%rbp,%r15), %xmm7
vmulps %xmm7, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, (%rsp)
vmovaps 0x60(%rbp,%r15), %xmm11
vmulps %xmm7, %xmm11, %xmm1
vmovaps (%rbp,%r15), %xmm3
vmovaps 0x10(%rbp,%r15), %xmm13
vmovaps 0x20(%rbp,%r15), %xmm0
vmovaps 0x30(%rbp,%r15), %xmm8
vmulps %xmm8, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm5
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm6, %xmm11, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vbroadcastss (%r14), %xmm12
vsubps %xmm12, %xmm3, %xmm2
vbroadcastss 0x4(%r14), %xmm12
vsubps %xmm12, %xmm13, %xmm3
vbroadcastss 0x8(%r14), %xmm12
vsubps %xmm12, %xmm0, %xmm1
vbroadcastss 0x14(%r14), %xmm12
vbroadcastss 0x18(%r14), %xmm13
vmulps %xmm1, %xmm12, %xmm14
vmulps %xmm3, %xmm13, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x10(%r14), %xmm15
vmulps %xmm2, %xmm13, %xmm0
vmovaps %xmm1, 0x60(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm3, 0x40(%rsp)
vmulps %xmm3, %xmm15, %xmm1
vmovaps %xmm2, 0x70(%rsp)
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm4, 0x20(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vmovaps (%rsp), %xmm13
vmovaps %xmm5, 0x30(%rsp)
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm15, %xmm13, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm14, %xmm11, %xmm10
vaddps %xmm9, %xmm10, %xmm10
vmulps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmovddup 0x1d108b7(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps %xmm14, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1d107df(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm6
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x120(%rsp), %xmm10
jne 0x210720
incq %r12
cmpq %rsi, %r12
setb %r13b
jne 0x210583
jmp 0x210ab6
vandps 0x120(%rsp), %xmm10, %xmm10
vmovaps 0x60(%rsp), %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm0
vmovaps 0x40(%rsp), %xmm1
vmulps 0x30(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x70(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm3
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm4
vtestps %xmm10, %xmm4
je 0x21070b
addq %rbp, %r15
vandps %xmm4, %xmm10, %xmm0
vmovaps %xmm7, 0x130(%rsp)
vmovaps %xmm8, 0x140(%rsp)
vmovaps %xmm3, 0x150(%rsp)
vmovaps %xmm6, 0x160(%rsp)
vmovaps %xmm0, 0x180(%rsp)
vmovaps %xmm13, 0x1c0(%rsp)
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm1, 0x1d0(%rsp)
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0x1e0(%rsp)
movq (%rdx), %rax
movq %rax, 0x70(%rsp)
vrcpps %xmm6, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vbroadcastss 0x1cdbf24(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x150(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1b0(%rsp)
vmulps 0x130(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x190(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vmovmskps %xmm0, %eax
movq %rax, (%rsp)
bsfq %rax, %rcx
movq %rcx, 0x20(%rsp)
movl 0x90(%r15,%rcx,4), %eax
movq 0x70(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0x40(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x24(%r14), %ecx
movq %rax, 0x30(%rsp)
testl %ecx, 0x34(%rax)
je 0x210893
movq 0x10(%rdx), %rax
movq %rax, 0x60(%rsp)
cmpq $0x0, 0x10(%rax)
movl $0x0, %ecx
jne 0x2108c2
movq 0x30(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x2108c2
xorl %eax, %eax
jmp 0x2108a8
movq 0x20(%rsp), %rax
movq (%rsp), %rcx
btcq %rax, %rcx
movq %rcx, (%rsp)
movb $0x1, %al
xorl %ecx, %ecx
testb %al, %al
je 0x210b15
movq (%rsp), %rax
testq %rax, %rax
jne 0x210836
jmp 0x21070b
movq %rsi, 0x88(%rsp)
movq %r11, 0x90(%rsp)
movq %r10, 0x98(%rsp)
movq %r9, 0xa0(%rsp)
movq %r8, 0xa8(%rsp)
movq %rdi, 0xb0(%rsp)
movq 0x20(%rsp), %rsi
vmovss 0x190(%rsp,%rsi,4), %xmm0
vmovss 0x1a0(%rsp,%rsi,4), %xmm1
movq %rdx, 0xb8(%rsp)
movq 0x8(%rdx), %rcx
movl 0xa0(%r15,%rsi,4), %edx
vmovss 0x1c0(%rsp,%rsi,4), %xmm2
vmovss 0x1d0(%rsp,%rsi,4), %xmm3
vmovss 0x1e0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0xc0(%rsp)
vmovss %xmm3, 0xc4(%rsp)
vmovss %xmm4, 0xc8(%rsp)
vmovss %xmm0, 0xcc(%rsp)
vmovss %xmm1, 0xd0(%rsp)
movl %edx, 0xd4(%rsp)
movq 0x40(%rsp), %rax
movl %eax, 0xd8(%rsp)
movl (%rcx), %eax
movl %eax, 0xdc(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xe0(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x40(%rsp)
vmovss 0x1b0(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
orl $-0x1, 0x1c(%rsp)
leaq 0x1c(%rsp), %rax
movq %rax, 0xf0(%rsp)
movq 0x30(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xf8(%rsp)
movq %rcx, 0x100(%rsp)
movq %r14, 0x108(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x110(%rsp)
movl $0x1, 0x118(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x210a17
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x210a58
movq 0x60(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x210a54
movq 0x60(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x210a3a
movq 0x30(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x210a47
leaq 0xf0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x210a58
xorl %eax, %eax
jmp 0x210a77
vmovss 0x40(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq (%rsp), %rax
movq 0x20(%rsp), %rcx
btcq %rcx, %rax
movq %rax, (%rsp)
movb $0x1, %al
movq 0xb8(%rsp), %rdx
movq 0xb0(%rsp), %rdi
movq 0xa8(%rsp), %r8
movq 0xa0(%rsp), %r9
movq 0x98(%rsp), %r10
movq 0x90(%rsp), %r11
xorl %ecx, %ecx
movq 0x88(%rsp), %rsi
jmp 0x2108a8
vmovups 0x2b0(%rsp), %ymm9
vmovups 0x290(%rsp), %ymm10
vmovups 0x270(%rsp), %ymm11
vmovups 0x250(%rsp), %ymm12
vmovups 0x230(%rsp), %ymm13
vmovups 0x210(%rsp), %ymm14
vmovups 0x1f0(%rsp), %ymm15
movq 0x58(%rsp), %r15
movq 0x50(%rsp), %r12
leaq 0x2f0(%rsp), %r13
cmpl $0x3, %ecx
jne 0x2102df
jmp 0x210155
testb $0x1, %r13b
vmovups 0x2b0(%rsp), %ymm9
vmovups 0x290(%rsp), %ymm10
vmovups 0x270(%rsp), %ymm11
vmovups 0x250(%rsp), %ymm12
vmovups 0x230(%rsp), %ymm13
vmovups 0x210(%rsp), %ymm14
vmovups 0x1f0(%rsp), %ymm15
movq 0x58(%rsp), %r15
movq 0x50(%rsp), %r12
leaq 0x2f0(%rsp), %r13
je 0x210b07
movl $0xff800000, 0x20(%r14) # imm = 0xFF800000
pushq $0x3
popq %rcx
jmp 0x210b07
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Pluecker<4, true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1760, %rsp # imm = 0x1760
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x210bae
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r9
vmovss 0x20(%rsi), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm0, %xmm1
ja 0x210b9c
movq %rdx, %r8
leaq 0x5a8(%rsp), %r10
vmovaps 0x10(%r9), %xmm3
vmaxss 0xc(%r9), %xmm1, %xmm2
vbroadcastss 0x1d102e4(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1ce03fb(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
movq 0x70(%rax), %rax
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cdbb07(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%r9), %ymm9
vbroadcastss 0x4(%r9), %ymm10
vbroadcastss 0x8(%r9), %ymm11
vaddps %xmm3, %xmm4, %xmm3
xorl %r11d, %r11d
vucomiss %xmm1, %xmm3
movq %rax, -0x8(%r10)
setb %r11b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm12
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm13
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
xorl %eax, %eax
vucomiss %xmm1, %xmm4
setb %al
xorl %ecx, %ecx
vucomiss %xmm1, %xmm5
vinsertf128 $0x1, %xmm3, %ymm3, %ymm14
setb %cl
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm15
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovaps %ymm0, 0x380(%rsp)
shll $0x3, %r11d
movq %r11, %rbx
xorq $0x8, %rbx
leal 0x10(,%rax,8), %r14d
movq %r14, %r15
xorq $0x8, %r15
leal 0x20(,%rcx,8), %esi
movq %rsi, %rdi
xorq $0x8, %rdi
leaq 0x1f3f2bd(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
vmovaps %ymm9, 0x200(%rsp)
vmovaps %ymm10, 0x1e0(%rsp)
vmovaps %ymm11, 0x1c0(%rsp)
vmovaps %ymm12, 0x1a0(%rsp)
vmovaps %ymm13, 0x180(%rsp)
vmovaps %ymm14, 0x160(%rsp)
vmovaps %ymm15, 0x140(%rsp)
movq %rbx, 0x68(%rsp)
movq %r14, 0x60(%rsp)
movq %r15, 0x58(%rsp)
movq %rsi, 0x28(%rsp)
movq %rdi, 0x20(%rsp)
leaq 0x5a0(%rsp), %rax
cmpq %rax, %r10
je 0x210b9c
movq -0x8(%r10), %r13
addq $-0x8, %r10
testb $0x8, %r13b
jne 0x210f2d
movq %r13, %rcx
andq $-0x10, %rcx
leaq 0x40(%rcx), %rax
testq %rcx, %rcx
cmoveq %rcx, %rax
vmovq (%rax), %xmm0
vmovq 0x4(%rax), %xmm1
vmovq 0x8(%rax), %xmm2
vmovq 0xc(%rax), %xmm3
vpminub %xmm2, %xmm0, %xmm2
vpcmpeqb %xmm2, %xmm0, %xmm0
vpminub %xmm3, %xmm1, %xmm2
vpcmpeqb %xmm2, %xmm1, %xmm1
vbroadcastss 0x30(%rax), %ymm3
vbroadcastss 0x3c(%rax), %ymm4
vmovq (%rax,%r11), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vmovq 0x4(%rax,%r11), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vinsertf128 $0x1, %xmm5, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm4, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vmovq (%rax,%rbx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vmovq 0x4(%rax,%rbx), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm5, %ymm5
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vbroadcastss 0x34(%rax), %ymm5
vbroadcastss 0x40(%rax), %ymm6
vmovq (%rax,%r14), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vmovq 0x4(%rax,%r14), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmovq (%rax,%r15), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%r15), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm6, %ymm6
vaddps %ymm6, %ymm5, %ymm5
vmovq (%rax,%rsi), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vmovq 0x4(%rax,%rsi), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vmovq (%rax,%rdi), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x4(%rax,%rdi), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vbroadcastss 0x44(%rax), %ymm8
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm8, %ymm7
vbroadcastss 0x38(%rax), %ymm8
vaddps %ymm6, %ymm8, %ymm6
vaddps %ymm7, %ymm8, %ymm7
vsubps %ymm9, %ymm2, %ymm2
vmulps %ymm2, %ymm12, %ymm2
vsubps %ymm10, %ymm4, %ymm4
vmulps %ymm4, %ymm13, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vsubps %ymm11, %ymm6, %ymm4
vmulps %ymm4, %ymm14, %ymm4
vmaxps %ymm15, %ymm4, %ymm4
vmaxps %ymm4, %ymm2, %ymm2
vsubps %ymm9, %ymm3, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vsubps %ymm10, %ymm5, %ymm4
vmulps %ymm4, %ymm13, %ymm4
vminps %ymm4, %ymm3, %ymm3
vsubps %ymm11, %ymm7, %ymm4
vmulps %ymm4, %ymm14, %ymm4
vminps 0x380(%rsp), %ymm4, %ymm4
vminps %ymm4, %ymm3, %ymm3
vcmpleps %ymm3, %ymm2, %ymm2
vpcmpeqd %xmm3, %xmm3, %xmm3
vpxor %xmm3, %xmm0, %xmm0
vpmovsxbd %xmm0, %xmm0
vpxor %xmm3, %xmm1, %xmm1
vpmovsxbd %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vbroadcastss 0x1cdfaac(%rip), %ymm1 # 0x1ef09cc
vcmpltps %ymm0, %ymm1, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %r13b
jne 0x210f6c
testq %r12, %r12
je 0x210f70
andq $-0x10, %r13
bsfq %r12, %rax
leaq -0x1(%r12), %rdx
xorl %ecx, %ecx
movq (%r13,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
andq %r12, %rdx
jne 0x210f75
movq %rax, %r13
testl %ecx, %ecx
je 0x210d4b
jmp 0x210fb4
pushq $0x6
jmp 0x210f72
pushq $0x4
popq %rcx
jmp 0x210f62
movq %rax, (%r10)
addq $0x8, %r10
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r13,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
andq %rdx, %rax
je 0x210fad
movq %rcx, (%r10)
addq $0x8, %r10
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x210f84
movq %rcx, %r13
xorl %ecx, %ecx
jmp 0x210f62
cmpl $0x6, %ecx
jne 0x211a5a
movl %r13d, %eax
andl $0xf, %eax
xorl %ecx, %ecx
addq $-0x8, %rax
setne %dl
je 0x211a5a
movb %dl, 0x1f(%rsp)
andq $-0x10, %r13
movq (%r8), %rdx
xorl %ecx, %ecx
movq %r12, 0x30(%rsp)
movq %rax, 0x70(%rsp)
movq %rdx, 0x38(%rsp)
movq %rcx, 0x78(%rsp)
imulq $0x60, %rcx, %rax
prefetcht0 (%r13,%rax)
prefetcht0 0x40(%r13,%rax)
movq %rdx, %rcx
movl 0x40(%r13,%rax), %edx
movq 0x228(%rcx), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%r13,%rax), %esi
movl 0x4(%r13,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm4
movl 0x10(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x20(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x30(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x44(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm7
movl 0x14(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x34(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x48(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm10
movl 0x18(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm11
movl 0x28(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x38(%r13,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x4c(%r13,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm12
movl 0x1c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm13
movl 0x2c(%r13,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm14
movl 0x3c(%r13,%rax), %edx
vunpcklps %xmm10, %xmm4, %xmm15 # xmm15 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm12, %xmm7, %xmm10 # xmm10 = xmm7[0],xmm12[0],xmm7[1],xmm12[1]
vunpckhps %xmm12, %xmm7, %xmm7 # xmm7 = xmm7[2],xmm12[2],xmm7[3],xmm12[3]
vmovups (%rcx,%rdx,4), %xmm12
vunpcklps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpcklps %xmm10, %xmm15, %xmm7 # xmm7 = xmm15[0],xmm10[0],xmm15[1],xmm10[1]
vunpckhps %xmm10, %xmm15, %xmm10 # xmm10 = xmm15[2],xmm10[2],xmm15[3],xmm10[3]
vunpcklps %xmm11, %xmm2, %xmm15 # xmm15 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpckhps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm11[2],xmm2[3],xmm11[3]
vunpcklps %xmm13, %xmm6, %xmm11 # xmm11 = xmm6[0],xmm13[0],xmm6[1],xmm13[1]
vunpckhps %xmm13, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm11, %xmm15, %xmm6 # xmm6 = xmm15[0],xmm11[0],xmm15[1],xmm11[1]
vunpckhps %xmm11, %xmm15, %xmm11 # xmm11 = xmm15[2],xmm11[2],xmm15[3],xmm11[3]
vunpcklps %xmm9, %xmm1, %xmm13 # xmm13 = xmm1[0],xmm9[0],xmm1[1],xmm9[1]
vunpckhps %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm9[2],xmm1[3],xmm9[3]
vunpcklps %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
vunpckhps %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm14[2],xmm5[3],xmm14[3]
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpcklps %xmm9, %xmm13, %xmm5 # xmm5 = xmm13[0],xmm9[0],xmm13[1],xmm9[1]
vunpckhps %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[2],xmm9[2],xmm13[3],xmm9[3]
vunpcklps %xmm8, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
vunpckhps %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm8[2],xmm0[3],xmm8[3]
vunpcklps %xmm12, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm12[0],xmm3[1],xmm12[1]
vunpckhps %xmm12, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm12[2],xmm3[3],xmm12[3]
vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm8, %xmm13, %xmm3 # xmm3 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
vbroadcastf128 0x40(%r13,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vunpckhps %xmm8, %xmm13, %xmm8 # xmm8 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
vmovaps %ymm12, 0x3c0(%rsp)
vbroadcastf128 0x50(%r13,%rax), %ymm12 # ymm12 = mem[0,1,0,1]
vmovaps %ymm12, 0x3a0(%rsp)
vinsertf128 $0x1, %xmm5, %ymm7, %ymm5
vinsertf128 $0x1, %xmm9, %ymm10, %ymm7
vinsertf128 $0x1, %xmm1, %ymm4, %ymm15
vinsertf128 $0x1, %xmm6, %ymm6, %ymm4
vinsertf128 $0x1, %xmm11, %ymm11, %ymm14
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vinsertf128 $0x1, %xmm8, %ymm8, %ymm8
vinsertf128 $0x1, %xmm0, %ymm0, %ymm10
vbroadcastss (%r9), %ymm11
vbroadcastss 0x4(%r9), %ymm12
vbroadcastss 0x8(%r9), %ymm13
vsubps %ymm11, %ymm5, %ymm1
vsubps %ymm12, %ymm7, %ymm3
vsubps %ymm13, %ymm15, %ymm15
vsubps %ymm11, %ymm4, %ymm0
vmovaps %ymm0, 0x120(%rsp)
vsubps %ymm12, %ymm14, %ymm7
vsubps %ymm13, %ymm2, %ymm14
vsubps %ymm11, %ymm9, %ymm2
vsubps %ymm12, %ymm8, %ymm4
vsubps %ymm13, %ymm10, %ymm0
vmovaps %ymm0, 0x100(%rsp)
vsubps %ymm1, %ymm2, %ymm11
vmovaps %ymm4, 0xe0(%rsp)
vsubps %ymm3, %ymm4, %ymm12
vsubps %ymm15, %ymm0, %ymm6
vaddps %ymm3, %ymm4, %ymm4
vaddps %ymm0, %ymm15, %ymm5
vmulps %ymm4, %ymm6, %ymm8
vmulps %ymm5, %ymm12, %ymm9
vsubps %ymm8, %ymm9, %ymm8
vaddps %ymm1, %ymm2, %ymm9
vmulps %ymm5, %ymm11, %ymm5
vmovaps %ymm6, 0x2c0(%rsp)
vmulps %ymm6, %ymm9, %ymm10
vsubps %ymm5, %ymm10, %ymm5
vmovaps %ymm12, 0x2e0(%rsp)
vmulps %ymm9, %ymm12, %ymm9
vmovaps %ymm11, 0x300(%rsp)
vmulps %ymm4, %ymm11, %ymm4
vsubps %ymm9, %ymm4, %ymm4
vbroadcastss 0x14(%r9), %ymm11
vbroadcastss 0x18(%r9), %ymm6
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vbroadcastss 0x10(%r9), %ymm13
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm8, %ymm13, %ymm5
vaddps %ymm4, %ymm5, %ymm10
vsubps %ymm7, %ymm3, %ymm8
vsubps %ymm14, %ymm15, %ymm12
vmovaps %ymm3, 0x340(%rsp)
vaddps %ymm7, %ymm3, %ymm4
vmovaps %ymm15, 0x320(%rsp)
vaddps %ymm14, %ymm15, %ymm5
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm5, %ymm8, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vmovaps 0x120(%rsp), %ymm0
vsubps %ymm0, %ymm1, %ymm15
vmulps %ymm5, %ymm15, %ymm5
vmovaps %ymm1, 0x360(%rsp)
vaddps %ymm0, %ymm1, %ymm9
vmovaps %ymm12, 0x2a0(%rsp)
vmulps %ymm9, %ymm12, %ymm12
vsubps %ymm5, %ymm12, %ymm5
vmovaps %ymm8, %ymm12
vmulps %ymm9, %ymm8, %ymm9
vmulps %ymm4, %ymm15, %ymm4
vsubps %ymm9, %ymm4, %ymm4
vmulps %ymm4, %ymm6, %ymm4
vmulps %ymm5, %ymm11, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm3, %ymm13, %ymm3
vaddps %ymm4, %ymm3, %ymm4
vmovaps %ymm4, 0x260(%rsp)
vsubps %ymm2, %ymm0, %ymm5
vaddps %ymm2, %ymm0, %ymm2
vmovaps 0xe0(%rsp), %ymm0
vsubps %ymm0, %ymm7, %ymm8
vaddps %ymm0, %ymm7, %ymm1
vmovaps 0x100(%rsp), %ymm3
vsubps %ymm3, %ymm14, %ymm0
vaddps %ymm3, %ymm14, %ymm3
vmulps %ymm1, %ymm0, %ymm7
vmulps %ymm3, %ymm8, %ymm9
vsubps %ymm7, %ymm9, %ymm7
vmulps %ymm3, %ymm5, %ymm3
vmulps %ymm2, %ymm0, %ymm9
vsubps %ymm3, %ymm9, %ymm3
vmulps %ymm2, %ymm8, %ymm2
vmulps %ymm1, %ymm5, %ymm1
vsubps %ymm2, %ymm1, %ymm1
vmovaps %ymm6, 0x100(%rsp)
vmulps %ymm1, %ymm6, %ymm1
vmovaps %ymm11, 0x120(%rsp)
vmulps %ymm3, %ymm11, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm13, 0xe0(%rsp)
vmulps %ymm7, %ymm13, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm4, %ymm10, %ymm2
vaddps %ymm2, %ymm1, %ymm7
vminps %ymm4, %ymm10, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d0fb13(%rip), %ymm3 # 0x1f20ec4
vandps %ymm3, %ymm7, %ymm9
vbroadcastss 0x1d0fb0e(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm9, 0x240(%rsp)
vmulps %ymm3, %ymm9, %ymm3
vbroadcastss 0x1d0faec(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm3, %ymm9, %ymm9
vcmpnltps %ymm9, %ymm2, %ymm2
vmovaps %ymm10, 0x280(%rsp)
vmaxps %ymm4, %ymm10, %ymm9
vmaxps %ymm1, %ymm9, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm1
vtestps %ymm1, %ymm1
je 0x2119e9
vmovaps 0x2c0(%rsp), %ymm6
vmovaps %ymm12, %ymm13
vmulps %ymm6, %ymm12, %ymm2
vmovaps 0x2e0(%rsp), %ymm10
vmovaps %ymm8, %ymm11
vmovaps 0x2a0(%rsp), %ymm8
vmulps %ymm8, %ymm10, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm11, %ymm8, %ymm9
vmulps %ymm0, %ymm12, %ymm12
vsubps %ymm9, %ymm12, %ymm12
vbroadcastss 0x1d0fa78(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm2, %ymm2
vandps %ymm4, %ymm9, %ymm9
vcmpltps %ymm9, %ymm2, %ymm2
vblendvps %ymm2, %ymm3, %ymm12, %ymm14
vmulps %ymm0, %ymm15, %ymm2
vmulps %ymm6, %ymm15, %ymm3
vmovaps 0x300(%rsp), %ymm9
vmulps %ymm8, %ymm9, %ymm6
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm5, %ymm8, %ymm8
vsubps %ymm2, %ymm8, %ymm8
vandps %ymm4, %ymm6, %ymm6
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm6, %ymm2
vblendvps %ymm2, %ymm3, %ymm8, %ymm8
vmulps %ymm5, %ymm13, %ymm2
vmulps %ymm13, %ymm9, %ymm3
vmulps %ymm15, %ymm10, %ymm5
vmulps %ymm11, %ymm15, %ymm0
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm5, %ymm5
vandps %ymm4, %ymm2, %ymm2
vcmpltps %ymm2, %ymm5, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm5
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vmulps 0x100(%rsp), %ymm5, %ymm1
vmulps 0x120(%rsp), %ymm8, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps 0xe0(%rsp), %ymm14, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm1, %ymm1, %ymm2
vmulps 0x320(%rsp), %ymm5, %ymm1
vmulps 0x340(%rsp), %ymm8, %ymm3
vaddps %ymm1, %ymm3, %ymm1
vmulps 0x360(%rsp), %ymm14, %ymm3
vrcpps %ymm2, %ymm6
vaddps %ymm1, %ymm3, %ymm1
vaddps %ymm1, %ymm1, %ymm1
vmulps %ymm6, %ymm2, %ymm3
vbroadcastss 0x1cdb1e8(%rip), %ymm9 # 0x1eec714
vsubps %ymm3, %ymm9, %ymm3
vmulps %ymm3, %ymm6, %ymm3
vaddps %ymm3, %ymm6, %ymm3
vbroadcastss 0xc(%r9), %ymm6
vmulps %ymm3, %ymm1, %ymm1
vcmpleps %ymm1, %ymm6, %ymm3
vbroadcastss 0x20(%r9), %ymm6
vcmpleps %ymm6, %ymm1, %ymm6
vandps %ymm3, %ymm6, %ymm3
vcmpneqps 0x1d0f9a1(%rip), %ymm2, %ymm2 # 0x1f20f00
vandps %ymm2, %ymm3, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpackssdw %xmm3, %xmm2, %xmm2
vpand %xmm0, %xmm2, %xmm0
vpmovsxwd %xmm0, %xmm2
vpshufd $0xee, %xmm0, %xmm3 # xmm3 = xmm0[2,3,2,3]
vpmovsxwd %xmm3, %xmm3
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vtestps %ymm2, %ymm2
je 0x2119e9
vmovaps 0x280(%rsp), %ymm3
vmovaps %ymm3, 0x3e0(%rsp)
vmovaps 0x260(%rsp), %ymm4
vmovaps %ymm4, 0x400(%rsp)
vmovaps %ymm7, 0x420(%rsp)
vmovaps %ymm14, 0x440(%rsp)
vmovaps %ymm8, 0x460(%rsp)
vmovaps %ymm5, 0x480(%rsp)
vmovaps %ymm2, 0x4a0(%rsp)
vrcpps %ymm7, %ymm2
vmovaps %ymm1, 0x500(%rsp)
vmovaps 0x220(%rsp), %ymm9
vmovaps %ymm9, 0x580(%rsp)
vmulps %ymm2, %ymm7, %ymm1
vbroadcastss 0x1cdb106(%rip), %ymm7 # 0x1eec714
vsubps %ymm1, %ymm7, %ymm1
vmulps %ymm1, %ymm2, %ymm1
vaddps %ymm1, %ymm2, %ymm1
vbroadcastss 0x1cdf9c5(%rip), %ymm2 # 0x1ef0fe8
vmovaps 0x240(%rsp), %ymm6
vcmpnltps %ymm2, %ymm6, %ymm2
vandps %ymm1, %ymm2, %ymm1
vmulps %ymm1, %ymm3, %ymm2
vminps %ymm7, %ymm2, %ymm2
vmulps %ymm1, %ymm4, %ymm1
vminps %ymm7, %ymm1, %ymm1
vsubps %ymm2, %ymm7, %ymm3
vsubps %ymm1, %ymm7, %ymm4
vbroadcastss 0x1cdf376(%rip), %ymm6 # 0x1ef09cc
vblendvps %ymm9, %ymm6, %ymm7, %ymm6
vblendvps %ymm9, %ymm3, %ymm1, %ymm1
vblendvps %ymm9, %ymm4, %ymm2, %ymm2
vmovaps %ymm1, 0x4e0(%rsp)
vmovaps %ymm2, 0x4c0(%rsp)
vmulps %ymm6, %ymm14, %ymm1
vmulps %ymm6, %ymm8, %ymm2
vmulps %ymm5, %ymm6, %ymm3
vmovaps %ymm1, 0x520(%rsp)
vmovaps %ymm2, 0x540(%rsp)
vmovaps %ymm3, 0x560(%rsp)
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm10
vmovaps 0x1c0(%rsp), %ymm11
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x180(%rsp), %ymm13
vmovaps 0x160(%rsp), %ymm14
vmovaps 0x140(%rsp), %ymm15
movq 0x28(%rsp), %rsi
movq 0x20(%rsp), %rdi
movq 0x38(%rsp), %rdx
bsfq %r15, %r14
movl 0x3c0(%rsp,%r14,4), %eax
movq 0x1e8(%rdx), %rcx
movq (%rcx,%rax,8), %r12
movl 0x24(%r9), %ecx
testl %ecx, 0x34(%r12)
je 0x211743
movq 0x10(%r8), %rbx
cmpq $0x0, 0x10(%rbx)
movl $0x0, %ecx
jne 0x211762
cmpq $0x0, 0x48(%r12)
jne 0x211762
xorl %eax, %eax
movq 0x30(%rsp), %r12
jmp 0x211750
btcq %r14, %r15
movb $0x1, %al
movq 0x30(%rsp), %r12
xorl %ecx, %ecx
testb %al, %al
je 0x211a39
testq %r15, %r15
jne 0x211700
jmp 0x2119bd
vmovss 0x4c0(%rsp,%r14,4), %xmm0
vmovss 0x4e0(%rsp,%r14,4), %xmm1
movq 0x8(%r8), %rcx
movl 0x3a0(%rsp,%r14,4), %edx
vmovss 0x520(%rsp,%r14,4), %xmm2
vmovss 0x540(%rsp,%r14,4), %xmm3
vmovss 0x560(%rsp,%r14,4), %xmm4
vmovss %xmm2, 0x80(%rsp)
vmovss %xmm3, 0x84(%rsp)
vmovss %xmm4, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovss %xmm1, 0x90(%rsp)
movl %edx, 0x94(%rsp)
movl %eax, 0x98(%rsp)
movl (%rcx), %eax
movl %eax, 0x9c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0xa0(%rsp)
vmovss 0x20(%r9), %xmm0
vmovss %xmm0, 0x120(%rsp)
vmovd 0x500(%rsp,%r14,4), %xmm0
vmovd %xmm0, 0x20(%r9)
orl $-0x1, 0x44(%rsp)
leaq 0x44(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r9, 0xc8(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
movq %r10, 0x50(%rsp)
movq %r11, 0x48(%rsp)
je 0x211900
leaq 0xb0(%rsp), %rdi
movq %r8, 0x100(%rsp)
movq %r9, 0xe0(%rsp)
vzeroupper
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x28(%rsp), %rsi
vmovaps 0x140(%rsp), %ymm15
vmovaps 0x160(%rsp), %ymm14
vmovaps 0x180(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm12
movq 0x48(%rsp), %r11
vmovaps 0x1c0(%rsp), %ymm11
vmovaps 0x1e0(%rsp), %ymm10
vmovaps 0x200(%rsp), %ymm9
movq 0x50(%rsp), %r10
movq 0xe0(%rsp), %r9
movq 0x100(%rsp), %r8
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x211997
movq 0x10(%rbx), %rax
testq %rax, %rax
je 0x211993
testb $0x2, (%rbx)
jne 0x21191a
testb $0x40, 0x3e(%r12)
je 0x211986
leaq 0xb0(%rsp), %rdi
movq %r8, %rbx
movq %r9, %r12
vzeroupper
callq *%rax
movq 0x20(%rsp), %rdi
movq 0x28(%rsp), %rsi
vmovaps 0x140(%rsp), %ymm15
vmovaps 0x160(%rsp), %ymm14
vmovaps 0x180(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm12
movq 0x48(%rsp), %r11
vmovaps 0x1c0(%rsp), %ymm11
vmovaps 0x1e0(%rsp), %ymm10
vmovaps 0x200(%rsp), %ymm9
movq 0x50(%rsp), %r10
movq %r12, %r9
movq %rbx, %r8
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x211997
xorl %eax, %eax
jmp 0x2119ac
vmovd 0x120(%rsp), %xmm0
vmovd %xmm0, 0x20(%r9)
btcq %r14, %r15
movb $0x1, %al
movq 0x30(%rsp), %r12
xorl %ecx, %ecx
movq 0x38(%rsp), %rdx
jmp 0x211750
movq 0x68(%rsp), %rbx
movq 0x60(%rsp), %r14
movq 0x58(%rsp), %r15
movq 0x78(%rsp), %rcx
incq %rcx
movq 0x70(%rsp), %rax
cmpq %rax, %rcx
setb 0x1f(%rsp)
jne 0x210fee
jmp 0x211a68
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm10
vmovaps 0x1c0(%rsp), %ymm11
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x180(%rsp), %ymm13
vmovaps 0x160(%rsp), %ymm14
vmovaps 0x140(%rsp), %ymm15
movq 0x28(%rsp), %rsi
movq 0x20(%rsp), %rdi
movq 0x38(%rsp), %rdx
jmp 0x2119cc
testb $0x1, 0x1f(%rsp)
movq 0x68(%rsp), %rbx
movq 0x60(%rsp), %r14
movq 0x58(%rsp), %r15
je 0x211a5a
movl $0xff800000, 0x20(%r9) # imm = 0xFF800000
pushq $0x3
popq %rcx
cmpl $0x3, %ecx
jne 0x210d32
jmp 0x210b9c
xorl %ecx, %ecx
jmp 0x211a5a
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::ObjectIntersector1<true>>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1348, %rsp # imm = 0x1348
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x211f31
addq $0x1348, %rsp # imm = 0x1348
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
vmovss 0x20(%rsi), %xmm0
vxorps %xmm5, %xmm5, %xmm5
vucomiss %xmm0, %xmm5
ja 0x211f1c
leaq 0x1a8(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmaxss 0xc(%rsi), %xmm5, %xmm1
vmovaps 0x10(%rsi), %xmm2
vbroadcastss 0x1d0ef61(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1cdf078(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1cda788(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss 0x8(%rsi), %ymm8
xorl %r8d, %r8d
vucomiss %xmm5, %xmm2
setb %r8b
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm10
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm5, %xmm3
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm5, %xmm4
vinsertf128 $0x1, %xmm2, %ymm2, %ymm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %r15
xorq $0x20, %r15
movq %r10, %rbp
xorq $0x20, %rbp
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm11
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm12
leaq 0x1a0(%rsp), %r13
vmovups %ymm6, 0x180(%rsp)
vmovups %ymm7, 0x160(%rsp)
vmovups %ymm8, 0x140(%rsp)
movq %r8, 0x30(%rsp)
vmovups %ymm9, 0x120(%rsp)
vmovups %ymm10, 0x100(%rsp)
movq %r9, 0x28(%rsp)
movq %r10, 0x20(%rsp)
vmovups %ymm5, 0xe0(%rsp)
movq %r11, 0x18(%rsp)
movq %r15, 0x10(%rsp)
movq %rbp, 0x8(%rsp)
vmovups %ymm11, 0xc0(%rsp)
vmovups %ymm12, 0xa0(%rsp)
cmpq %r13, %rdi
je 0x211f1c
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x2121a4
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r8), %ymm0, %ymm1
vaddps 0x40(%rax,%r8), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm2
vaddps 0x40(%rax,%r9), %ymm2, %ymm2
vmaxps %ymm1, %ymm11, %ymm1
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm5, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r11), %ymm3, %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vminps %ymm2, %ymm12, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vmulps 0x100(%rax,%rbp), %ymm0, %ymm4
vaddps 0x40(%rax,%rbp), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm5, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x2121ee
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %ebx
testb $0x8, %r12b
jne 0x2121ea
testq %rbx, %rbx
je 0x21221b
andq $-0x10, %r12
bsfq %rbx, %rax
leaq -0x1(%rbx), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %rbx, %rdx
jne 0x212221
movq %rax, %r12
testl %r14d, %r14d
je 0x2120b5
jmp 0x212267
pushq $0x6
jmp 0x21221d
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x212194
pushq $0x4
popq %r14
jmp 0x2121df
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x21225f
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x212230
movq %rcx, %r12
jmp 0x2121df
cmpl $0x6, %r14d
jne 0x2123d6
movl %r12d, %eax
andl $0xf, %eax
xorl %r14d, %r14d
addq $-0x8, %rax
movq %rax, 0x50(%rsp)
setne %r13b
je 0x2123ce
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
movq 0x40(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x48(%rsp)
xorl %ebp, %ebp
movl (%r12,%rbp,8), %ecx
movq 0x48(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rax
movl 0x34(%rax), %edx
testl %edx, 0x24(%rsi)
je 0x212399
movl 0x4(%r12,%rbp,8), %edx
orl $-0x1, 0x4(%rsp)
leaq 0x4(%rsp), %rdi
movq %rdi, 0x58(%rsp)
movq 0x18(%rax), %rdi
movq %rdi, 0x60(%rsp)
movq 0x40(%rsp), %rdi
movq 0x8(%rdi), %r8
movq %r8, 0x70(%rsp)
movq %rsi, %r15
movq %rsi, 0x78(%rsp)
movl $0x1, 0x80(%rsp)
movl %ecx, 0x84(%rsp)
movl %edx, 0x68(%rsp)
movq %rax, 0x88(%rsp)
andq $0x0, 0x90(%rsp)
movq 0x10(%rdi), %rcx
movq %rcx, 0x98(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x212338
movq 0x68(%rax), %rcx
leaq 0x58(%rsp), %rdi
vzeroupper
callq *%rcx
movq %r15, %rsi
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x20(%r15), %xmm0
vmovups 0x180(%rsp), %ymm6
vmovups 0x160(%rsp), %ymm7
vmovups 0x140(%rsp), %ymm8
vmovups 0x120(%rsp), %ymm9
vmovups 0x100(%rsp), %ymm10
vmovups 0xe0(%rsp), %ymm5
vmovups 0xc0(%rsp), %ymm11
vmovups 0xa0(%rsp), %ymm12
ja 0x2123e5
incq %rbp
cmpq 0x50(%rsp), %rbp
setb %r13b
jne 0x2122a5
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rbp
leaq 0x1a0(%rsp), %r13
cmpl $0x3, %r14d
jne 0x2120a4
jmp 0x211f1c
testb $0x1, %r13b
movq 0x38(%rsp), %rdi
movq 0x30(%rsp), %r8
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %r15
movq 0x8(%rsp), %rbp
leaq 0x1a0(%rsp), %r13
je 0x2123d6
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x2123d6
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::InstanceIntersector1>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12f8, %rsp # imm = 0x12F8
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x212866
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x212866
leaq 0x158(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d0ea46(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1cdeb5d(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cda26d(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rcx
xorq $0x20, %rcx
movq %r10, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x150(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovups %ymm6, 0x130(%rsp)
vmovups %ymm7, 0x110(%rsp)
vmovups %ymm8, 0xf0(%rsp)
movq %r8, 0x30(%rsp)
vmovups %ymm9, 0xd0(%rsp)
vmovups %ymm10, 0xb0(%rsp)
vmovups %ymm3, 0x90(%rsp)
movq %r9, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %rcx, 0x10(%rsp)
vmovups %ymm4, 0x70(%rsp)
vmovups %ymm5, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x212866
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x21264d
vmovaps 0x40(%r12,%r8), %ymm0
vsubps %ymm6, %ymm0, %ymm0
vmulps %ymm0, %ymm9, %ymm0
vmovaps 0x40(%r12,%r9), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r12,%r10), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r12,%r11), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%r12,%rcx), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r12,%rbx), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r13d
testb $0x8, %r12b
jne 0x212693
testq %r13, %r13
je 0x212697
andq $-0x10, %r12
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdx
jne 0x21269d
movq %rax, %r12
testl %r14d, %r14d
je 0x2125ca
jmp 0x2126e6
pushq $0x6
jmp 0x212699
pushq $0x4
popq %r14
jmp 0x212688
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x2126de
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x2126af
movq %rcx, %r12
movq %r15, %rcx
jmp 0x212688
cmpl $0x6, %r14d
jne 0x21285c
movl %r12d, %ebp
andl $0xf, %ebp
xorl %r14d, %r14d
movq %rbp, %rax
addq $-0x8, %rax
setne %r15b
je 0x212854
movq %rax, 0x48(%rsp)
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
vzeroupper
callq 0x3f50dc
testb %al, %al
jne 0x212770
addq $0x10, %r12
addq $-0x9, %rbp
xorl %r15d, %r15d
cmpq %r15, %rbp
je 0x2127ef
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x3f50dc
addq $0x10, %r12
incq %r15
testb %al, %al
je 0x21273c
cmpq 0x48(%rsp), %r15
setb %r15b
testb %r15b, %r15b
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x30(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovups 0x70(%rsp), %ymm4
vmovups 0x50(%rsp), %ymm5
leaq 0x150(%rsp), %rbp
je 0x21285c
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x21285c
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x30(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovups 0x70(%rsp), %ymm4
vmovups 0x50(%rsp), %ymm5
leaq 0x150(%rsp), %rbp
cmpl $0x3, %r14d
jne 0x2125b9
addq $0x12f8, %rsp # imm = 0x12F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::InstanceIntersector1MB>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12f8, %rsp # imm = 0x12F8
movq %rdx, 0x38(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x212d4b
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x212d4b
leaq 0x158(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d0e5ee(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1cde705(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cd9e15(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm11
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %r15
xorq $0x20, %r15
movq %r10, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm5
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm12
leaq 0x150(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovups %ymm6, 0x130(%rsp)
vmovups %ymm7, 0x110(%rsp)
vmovups %ymm8, 0xf0(%rsp)
movq %r8, 0x28(%rsp)
vmovups %ymm9, 0xd0(%rsp)
vmovups %ymm10, 0xb0(%rsp)
vmovups %ymm11, 0x90(%rsp)
movq %r9, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %r11, 0x10(%rsp)
vmovups %ymm5, 0x70(%rsp)
vmovups %ymm12, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x212d4b
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x212b0d
movq %r12, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rsi), %ymm0
vmulps 0x100(%rax,%r8), %ymm0, %ymm1
vaddps 0x40(%rax,%r8), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm2
vaddps 0x40(%rax,%r9), %ymm2, %ymm2
vmaxps %ymm1, %ymm5, %ymm1
vsubps %ymm7, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmulps 0x100(%rax,%r11), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r11), %ymm3, %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vminps %ymm2, %ymm12, %ymm2
vmulps 0x100(%rax,%r15), %ymm0, %ymm3
vaddps 0x40(%rax,%r15), %ymm3, %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vmulps 0x100(%rax,%rbx), %ymm0, %ymm4
vaddps 0x40(%rax,%rbx), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm11, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x212b57
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r14d
testb $0x8, %r12b
jne 0x212b53
testq %r14, %r14
je 0x212b84
andq $-0x10, %r12
bsfq %r14, %rax
leaq -0x1(%r14), %rdx
xorl %r13d, %r13d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r14, %rdx
jne 0x212b8a
movq %rax, %r12
testl %r13d, %r13d
je 0x212a1d
jmp 0x212bd0
pushq $0x6
jmp 0x212b86
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x212afc
pushq $0x4
popq %r13
jmp 0x212b48
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x212bc8
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x212b99
movq %rcx, %r12
jmp 0x212b48
cmpl $0x6, %r13d
jne 0x212d41
movl %r12d, %eax
andl $0xf, %eax
xorl %r13d, %r13d
movq %rax, 0x40(%rsp)
addq $-0x8, %rax
setne %bpl
je 0x212d39
movq %rax, 0x48(%rsp)
movq %rdi, 0x30(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
vzeroupper
callq 0x3f5b0c
testb %al, %al
jne 0x212c5f
addq $0x10, %r12
addq $-0x9, 0x40(%rsp)
xorl %ebp, %ebp
cmpq %rbp, 0x40(%rsp)
je 0x212cd9
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x3f5b0c
addq $0x10, %r12
incq %rbp
testb %al, %al
je 0x212c29
cmpq 0x48(%rsp), %rbp
setb %bpl
testb %bpl, %bpl
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x28(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovups 0x70(%rsp), %ymm5
vmovups 0x50(%rsp), %ymm12
leaq 0x150(%rsp), %rbp
je 0x212d41
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r13
jmp 0x212d41
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x28(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm11
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovups 0x70(%rsp), %ymm5
vmovups 0x50(%rsp), %ymm12
leaq 0x150(%rsp), %rbp
cmpl $0x3, %r13d
jne 0x212a0c
addq $0x12f8, %rsp # imm = 0x12F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::InstanceArrayIntersector1>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12f8, %rsp # imm = 0x12F8
movq %rdx, 0x40(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x2131a2
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x2131a2
leaq 0x158(%rsp), %rdi
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps 0x10(%rsi), %xmm3
vmaxss 0xc(%rsi), %xmm2, %xmm1
vbroadcastss 0x1d0e10a(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1cde221(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss 0x1cd9931(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
xorl %r8d, %r8d
vucomiss %xmm2, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
shll $0x5, %r8d
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %r8, %r11
xorq $0x20, %r11
movq %r9, %rcx
xorq $0x20, %rcx
movq %r10, %rbx
xorq $0x20, %rbx
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm4
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x150(%rsp), %rbp
movq %rsi, 0x8(%rsp)
vmovups %ymm6, 0x130(%rsp)
vmovups %ymm7, 0x110(%rsp)
vmovups %ymm8, 0xf0(%rsp)
movq %r8, 0x30(%rsp)
vmovups %ymm9, 0xd0(%rsp)
vmovups %ymm10, 0xb0(%rsp)
vmovups %ymm3, 0x90(%rsp)
movq %r9, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %rcx, 0x10(%rsp)
vmovups %ymm4, 0x70(%rsp)
vmovups %ymm5, 0x50(%rsp)
cmpq %rbp, %rdi
je 0x2131a2
movq -0x8(%rdi), %r12
addq $-0x8, %rdi
testb $0x8, %r12b
jne 0x212f89
vmovaps 0x40(%r12,%r8), %ymm0
vsubps %ymm6, %ymm0, %ymm0
vmulps %ymm0, %ymm9, %ymm0
vmovaps 0x40(%r12,%r9), %ymm1
vsubps %ymm7, %ymm1, %ymm1
vmulps %ymm1, %ymm10, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r12,%r10), %ymm1
vsubps %ymm8, %ymm1, %ymm1
vmulps %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm1, %ymm1
vmaxps %ymm1, %ymm0, %ymm0
vmovaps 0x40(%r12,%r11), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%r12,%rcx), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vminps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r12,%rbx), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r13d
testb $0x8, %r12b
jne 0x212fcf
testq %r13, %r13
je 0x212fd3
andq $-0x10, %r12
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
xorl %r14d, %r14d
movq (%r12,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
prefetcht0 0x80(%rax)
prefetcht0 0xc0(%rax)
andq %r13, %rdx
jne 0x212fd9
movq %rax, %r12
testl %r14d, %r14d
je 0x212f06
jmp 0x213022
pushq $0x6
jmp 0x212fd5
pushq $0x4
popq %r14
jmp 0x212fc4
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %rdx, %rax
je 0x21301a
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x212feb
movq %rcx, %r12
movq %r15, %rcx
jmp 0x212fc4
cmpl $0x6, %r14d
jne 0x213198
movl %r12d, %ebp
andl $0xf, %ebp
xorl %r14d, %r14d
movq %rbp, %rax
addq $-0x8, %rax
setne %r15b
je 0x213190
movq %rax, 0x48(%rsp)
movq %rdi, 0x38(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
vzeroupper
callq 0x4070ac
testb %al, %al
jne 0x2130ac
addq $0x8, %r12
addq $-0x9, %rbp
xorl %r15d, %r15d
cmpq %r15, %rbp
je 0x21312b
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %r12, %rcx
callq 0x4070ac
addq $0x8, %r12
incq %r15
testb %al, %al
je 0x213078
cmpq 0x48(%rsp), %r15
setb %r15b
testb %r15b, %r15b
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x30(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovups 0x70(%rsp), %ymm4
vmovups 0x50(%rsp), %ymm5
leaq 0x150(%rsp), %rbp
je 0x213198
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r14
jmp 0x213198
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdi
vmovups 0x130(%rsp), %ymm6
vmovups 0x110(%rsp), %ymm7
vmovups 0xf0(%rsp), %ymm8
movq 0x30(%rsp), %r8
vmovups 0xd0(%rsp), %ymm9
vmovups 0xb0(%rsp), %ymm10
vmovups 0x90(%rsp), %ymm3
movq 0x28(%rsp), %r9
movq 0x20(%rsp), %r10
movq 0x18(%rsp), %r11
movq 0x10(%rsp), %rcx
vmovups 0x70(%rsp), %ymm4
vmovups 0x50(%rsp), %ymm5
leaq 0x150(%rsp), %rbp
cmpl $0x3, %r14d
jne 0x212ef5
addq $0x12f8, %rsp # imm = 0x12F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::SubGridMBIntersector1Pluecker<8, true>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x17e0, %rsp # imm = 0x17E0
movq %rdx, 0x38(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x21453d
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %rbx
vmovss 0x20(%rsi), %xmm0
vxorps %xmm12, %xmm12, %xmm12
vucomiss %xmm0, %xmm12
ja 0x21452b
leaq 0x628(%rsp), %r15
movq 0x70(%rax), %rax
vmovaps 0x10(%rbx), %xmm2
vmaxss 0xc(%rbx), %xmm12, %xmm1
vbroadcastss 0x1d0c955(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1cdca6c(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1cd818a(%rip), %xmm4 # 0x1eec714
vdivps %xmm2, %xmm4, %xmm2
vbroadcastss 0x1d0c9c9(%rip), %xmm4 # 0x1f20f60
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
movq %rax, -0x8(%r15)
vbroadcastss 0x1d0b966(%rip), %xmm3 # 0x1f1ff10
vmulps %xmm3, %xmm2, %xmm3
vbroadcastss 0x1d0b95d(%rip), %xmm4 # 0x1f1ff14
vbroadcastss (%rbx), %ymm15
vbroadcastss 0x4(%rbx), %ymm13
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x8(%rbx), %ymm6
xorl %r13d, %r13d
vucomiss %xmm12, %xmm3
setb %r13b
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm7
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm8
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm10
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm11
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r13d
xorl %edi, %edi
vucomiss %xmm12, %xmm4
vinsertf128 $0x1, %xmm2, %ymm2, %ymm14
setb %dil
shll $0x5, %edi
orq $0x40, %rdi
xorl %r8d, %r8d
vucomiss %xmm12, %xmm5
setb %r8b
shll $0x5, %r8d
orq $0x80, %r8
movq %r13, %r9
xorq $0x20, %r9
movq %rdi, %r10
xorq $0x20, %r10
movq %r8, %r11
xorq $0x20, %r11
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps %ymm1, 0x400(%rsp)
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
leaq 0x1f3b8fb(%rip), %rax # 0x214ff80
vbroadcastf128 0xf0(%rax), %ymm1 # ymm1 = mem[0,1,0,1]
vmovaps %ymm1, 0x380(%rsp)
vperm2f128 $0x2, (%rax), %ymm1, %ymm1 # ymm1 = mem[0,1],ymm1[0,1]
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm0, 0x3e0(%rsp)
vinsertf128 $0x1, %xmm0, %ymm0, %ymm5
leaq 0x620(%rsp), %rdx
movq %rbx, 0x110(%rsp)
vmovaps %ymm15, 0x220(%rsp)
vmovaps %ymm13, 0x1a0(%rsp)
vmovaps %ymm6, 0x340(%rsp)
movq %r13, 0x50(%rsp)
vmovaps %ymm7, 0x2e0(%rsp)
vmovaps %ymm8, 0x2c0(%rsp)
vmovaps %ymm9, 0x2a0(%rsp)
vmovaps %ymm10, 0x320(%rsp)
vmovaps %ymm11, 0x300(%rsp)
movq %rdi, 0x30(%rsp)
vmovaps %ymm14, 0x180(%rsp)
movq %r8, 0x28(%rsp)
movq %r9, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %r11, 0x10(%rsp)
vmovaps %ymm12, 0x160(%rsp)
vmovaps %ymm5, 0x260(%rsp)
cmpq %rdx, %r15
je 0x21452b
movq -0x8(%r15), %r14
addq $-0x8, %r15
testb $0x8, %r14b
jne 0x214849
movq %r14, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%rbx), %ymm0
vmulps 0x100(%rax,%r13), %ymm0, %ymm1
vaddps 0x40(%rax,%r13), %ymm1, %ymm1
vsubps %ymm15, %ymm1, %ymm1
vmulps %ymm1, %ymm7, %ymm1
vmulps 0x100(%rax,%rdi), %ymm0, %ymm2
vaddps 0x40(%rax,%rdi), %ymm2, %ymm2
vsubps %ymm13, %ymm2, %ymm2
vmulps %ymm2, %ymm8, %ymm2
vmulps 0x100(%rax,%r8), %ymm0, %ymm3
vaddps 0x40(%rax,%r8), %ymm3, %ymm3
vsubps %ymm6, %ymm3, %ymm3
vmulps %ymm3, %ymm9, %ymm3
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm1, %ymm12, %ymm1
vmulps 0x100(%rax,%r9), %ymm0, %ymm3
vmaxps %ymm2, %ymm1, %ymm1
vaddps 0x40(%rax,%r9), %ymm3, %ymm2
vsubps %ymm15, %ymm2, %ymm2
vmulps 0x100(%rax,%r10), %ymm0, %ymm3
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vmulps %ymm2, %ymm10, %ymm2
vsubps %ymm13, %ymm3, %ymm3
vmulps 0x100(%rax,%r11), %ymm0, %ymm4
vmulps %ymm3, %ymm11, %ymm3
vaddps 0x40(%rax,%r11), %ymm4, %ymm4
vsubps %ymm6, %ymm4, %ymm4
vmulps %ymm4, %ymm14, %ymm4
vminps %ymm4, %ymm3, %ymm3
vminps %ymm2, %ymm5, %ymm2
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r14d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x214895
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r12d
testb $0x8, %r14b
jne 0x214891
testq %r12, %r12
je 0x2148c2
andq $-0x10, %r14
bsfq %r12, %rcx
leaq -0x1(%r12), %rsi
xorl %eax, %eax
movq (%r14,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r12, %rsi
jne 0x2148c7
movq %rcx, %r14
testl %eax, %eax
je 0x214757
jmp 0x214915
pushq $0x6
jmp 0x2148c4
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x214838
pushq $0x4
popq %rax
jmp 0x214884
movq %rcx, (%r15)
addq $0x8, %r15
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rcx
movq (%r14,%rdx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %rsi, %rcx
je 0x214905
movq %rdx, (%r15)
addq $0x8, %r15
bsfq %rcx, %rdx
leaq -0x1(%rcx), %rsi
jmp 0x2148d6
movq %rdx, %r14
leaq 0x620(%rsp), %rdx
jmp 0x214884
cmpl $0x6, %eax
jne 0x215c15
movl %r14d, %esi
andl $0xf, %esi
addq $-0x8, %rsi
setne %cl
je 0x215c04
andq $-0x10, %r14
xorl %eax, %eax
movq %rax, 0x120(%rsp)
imulq $0xe0, %rax, %rdx
vmovss 0x1c(%rbx), %xmm0
vsubss 0xd0(%r14,%rdx), %xmm0, %xmm0
vmulss 0xd4(%r14,%rdx), %xmm0, %xmm2
vmovq 0x40(%r14,%rdx), %xmm0
vpmovzxbd %xmm0, %xmm1 # xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vmovq 0x44(%r14,%rdx), %xmm3
vpmovzxbd %xmm3, %xmm4 # xmm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vinsertf128 $0x1, %xmm4, %ymm1, %ymm4
vmovq 0x48(%r14,%rdx), %xmm1
vpmovzxbd %xmm1, %xmm5 # xmm5 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
vmovaps %ymm6, %ymm14
vmovq 0x4c(%r14,%rdx), %xmm6
vpmovzxbd %xmm6, %xmm7 # xmm7 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm5, %ymm5
vpminub %xmm1, %xmm0, %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x80(%rsp)
vpminub %xmm6, %xmm3, %xmm1
vpcmpeqb %xmm1, %xmm3, %xmm0
vmovdqa %xmm0, 0xd0(%rsp)
vcvtdq2ps %ymm4, %ymm3
vbroadcastss 0x7c(%r14,%rdx), %ymm4
vbroadcastss 0x70(%r14,%rdx), %ymm6
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm6, %ymm3
vmovq 0x88(%r14,%rdx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x8c(%r14,%rdx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vbroadcastss 0xc4(%r14,%rdx), %ymm8
vbroadcastss 0xb8(%r14,%rdx), %ymm9
vmulps %ymm7, %ymm8, %ymm7
vaddps %ymm7, %ymm9, %ymm7
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm2, %ymm7
vaddps %ymm7, %ymm3, %ymm3
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm4, %ymm4
vaddps %ymm4, %ymm6, %ymm4
vmovq 0x90(%r14,%rdx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vmovq 0x94(%r14,%rdx), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm5, %ymm5
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm8, %ymm5
vaddps %ymm5, %ymm9, %ymm5
vsubps %ymm4, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmovq 0x50(%r14,%rdx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vmovq 0x54(%r14,%rdx), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm5, %ymm5
vbroadcastss 0x80(%r14,%rdx), %ymm6
vcvtdq2ps %ymm5, %ymm5
vbroadcastss 0x74(%r14,%rdx), %ymm7
vmulps %ymm5, %ymm6, %ymm5
vaddps %ymm5, %ymm7, %ymm5
vmovq 0x98(%r14,%rdx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vmovq 0x9c(%r14,%rdx), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vinsertf128 $0x1, %xmm9, %ymm8, %ymm8
vbroadcastss 0xc8(%r14,%rdx), %ymm9
vcvtdq2ps %ymm8, %ymm8
vmovaps %ymm10, %ymm1
vbroadcastss 0xbc(%r14,%rdx), %ymm10
vmulps %ymm8, %ymm9, %ymm8
vaddps %ymm8, %ymm10, %ymm8
vsubps %ymm5, %ymm8, %ymm8
vmulps %ymm2, %ymm8, %ymm8
vaddps %ymm5, %ymm8, %ymm5
vmovq 0x58(%r14,%rdx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vmovaps %ymm11, %ymm0
vmovq 0x5c(%r14,%rdx), %xmm11
vpmovzxbd %xmm11, %xmm11 # xmm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero
vinsertf128 $0x1, %xmm11, %ymm8, %ymm8
vcvtdq2ps %ymm8, %ymm8
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmovq 0xa0(%r14,%rdx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0xa4(%r14,%rdx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm9, %ymm7
vaddps %ymm7, %ymm10, %ymm7
vsubps %ymm6, %ymm7, %ymm7
vmulps %ymm7, %ymm2, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovq 0x60(%r14,%rdx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x64(%r14,%rdx), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vbroadcastss 0x84(%r14,%rdx), %ymm8
vbroadcastss 0x78(%r14,%rdx), %ymm9
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm8, %ymm7
vaddps %ymm7, %ymm9, %ymm7
vmovq 0xa8(%r14,%rdx), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vmovq 0xac(%r14,%rdx), %xmm11
vpmovzxbd %xmm11, %xmm11 # xmm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero
vinsertf128 $0x1, %xmm11, %ymm10, %ymm10
vbroadcastss 0xcc(%r14,%rdx), %ymm11
vbroadcastss 0xc0(%r14,%rdx), %ymm12
vcvtdq2ps %ymm10, %ymm10
vmulps %ymm10, %ymm11, %ymm10
vaddps %ymm10, %ymm12, %ymm10
vsubps %ymm7, %ymm10, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vaddps %ymm7, %ymm10, %ymm7
vmovq 0x68(%r14,%rdx), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vmovq 0x6c(%r14,%rdx), %xmm13
vpmovzxbd %xmm13, %xmm13 # xmm13 = xmm13[0],zero,zero,zero,xmm13[1],zero,zero,zero,xmm13[2],zero,zero,zero,xmm13[3],zero,zero,zero
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vmovaps 0x1a0(%rsp), %ymm13
vcvtdq2ps %ymm10, %ymm10
vmulps %ymm10, %ymm8, %ymm8
vaddps %ymm8, %ymm9, %ymm8
vmovq 0xb0(%r14,%rdx), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vmovq 0xb4(%r14,%rdx), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vaddps %ymm9, %ymm12, %ymm9
vsubps %ymm8, %ymm9, %ymm9
vmulps %ymm2, %ymm9, %ymm2
vaddps %ymm2, %ymm8, %ymm8
vsubps %ymm15, %ymm3, %ymm2
vmulps 0x2e0(%rsp), %ymm2, %ymm2
vsubps %ymm13, %ymm5, %ymm3
vmulps 0x2c0(%rsp), %ymm3, %ymm3
vsubps %ymm14, %ymm7, %ymm5
vmulps 0x2a0(%rsp), %ymm5, %ymm5
vsubps %ymm15, %ymm4, %ymm4
vmulps %ymm4, %ymm1, %ymm4
vsubps %ymm13, %ymm6, %ymm6
vmulps %ymm6, %ymm0, %ymm6
vsubps %ymm14, %ymm8, %ymm7
vmovaps 0x180(%rsp), %ymm14
vmulps %ymm7, %ymm14, %ymm7
vextractf128 $0x1, %ymm2, %xmm8
vextractf128 $0x1, %ymm4, %xmm9
vpminsd %xmm9, %xmm8, %xmm10
vpmaxsd %xmm9, %xmm8, %xmm8
vextractf128 $0x1, %ymm3, %xmm9
vextractf128 $0x1, %ymm6, %xmm11
vpminsd %xmm11, %xmm9, %xmm12
vpmaxsd %xmm12, %xmm10, %xmm10
vpmaxsd %xmm11, %xmm9, %xmm9
vpminsd %xmm9, %xmm8, %xmm8
vextractf128 $0x1, %ymm5, %xmm9
vextractf128 $0x1, %ymm7, %xmm11
vpminsd %xmm11, %xmm9, %xmm12
vpmaxsd %xmm11, %xmm9, %xmm9
vpmaxsd 0x400(%rsp), %xmm12, %xmm11
vmovdqa 0x160(%rsp), %ymm12
vpmaxsd %xmm11, %xmm10, %xmm10
vpminsd 0x3e0(%rsp), %xmm9, %xmm9
vpminsd %xmm9, %xmm8, %xmm8
vpcmpeqd %xmm0, %xmm0, %xmm0
vpxor 0xd0(%rsp), %xmm0, %xmm1
vpmovsxbd %xmm1, %xmm1
vpcmpgtd %xmm8, %xmm10, %xmm8
vpor %xmm1, %xmm8, %xmm1
vpminsd %xmm4, %xmm2, %xmm8
vpminsd %xmm6, %xmm3, %xmm9
vpmaxsd %xmm9, %xmm8, %xmm8
vpminsd %xmm7, %xmm5, %xmm9
vpmaxsd %xmm12, %xmm9, %xmm9
vpmaxsd %xmm9, %xmm8, %xmm8
vpmaxsd %xmm4, %xmm2, %xmm2
vpmaxsd %xmm6, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm7, %xmm5, %xmm3
vmovdqa 0x260(%rsp), %ymm5
vpminsd %xmm5, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpcmpgtd %xmm2, %xmm8, %xmm2
vpxor 0x80(%rsp), %xmm0, %xmm0
vpmovsxbd %xmm0, %xmm0
vpor %xmm0, %xmm2, %xmm0
vinsertf128 $0x1, %xmm1, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vbroadcastss 0x1cdbc03(%rip), %ymm1 # 0x1ef09cc
vcmpltps %ymm0, %ymm1, %ymm0
vmovmskps %ymm0, %eax
testl %eax, %eax
je 0x215b53
addq %r14, %rdx
movzbl %al, %eax
movq %r15, 0x58(%rsp)
movb %cl, 0x6(%rsp)
movq %rsi, 0x8(%rsp)
movq %rdx, 0x108(%rsp)
movq %rax, 0x118(%rsp)
bsfq %rax, %rax
movzwl (%rdx,%rax,8), %r8d
movl %r8d, 0x140(%rsp)
movzwl 0x2(%rdx,%rax,8), %r9d
movl %r9d, 0xe0(%rsp)
movl 0xd8(%rdx), %ecx
movl 0x4(%rdx,%rax,8), %edx
movq 0x38(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x138(%rsp)
movq 0x1e8(%rax), %rax
movq %rcx, 0xd0(%rsp)
movq (%rax,%rcx,8), %rax
movq 0x58(%rax), %rsi
movq 0x68(%rax), %rdi
movq %rdx, 0x100(%rsp)
imulq %rdx, %rdi
vmovss 0x1c(%rbx), %xmm0
vmovss 0x28(%rax), %xmm1
vmovss 0x2c(%rax), %xmm2
vmovss 0x30(%rax), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1cdbb3c(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm5
movl $0x7fff, %edx # imm = 0x7FFF
andl %edx, %r8d
movl (%rsi,%rdi), %ecx
movl %r8d, 0x48(%rsp)
addl %r8d, %ecx
movl %r9d, %r8d
andl %edx, %r8d
movq %rsi, 0x130(%rsp)
movq %rdi, 0x128(%rsp)
movl 0x4(%rsi,%rdi), %r10d
movl %r10d, %esi
movl %r8d, 0x44(%rsp)
imull %r8d, %esi
vcvttss2si %xmm5, %edx
addl %ecx, %esi
movslq %edx, %rcx
movq 0xe0(%rax), %rdi
movq %rdi, 0xa0(%rsp)
imulq $0x38, %rcx, %r8
movq %r8, 0x80(%rsp)
movq 0x10(%rdi,%r8), %rcx
movq %rcx, %rax
imulq %rsi, %rax
movq (%rdi,%r8), %rdx
vmovups (%rdx,%rax), %xmm3
leaq 0x1(%rsi), %rax
movq 0x48(%rdi,%r8), %r9
movq %r9, %rdi
imulq %rax, %rdi
movq %rdi, 0x60(%rsp)
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm2
leaq (%rsi,%r10), %r15
movq %r15, %rax
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm4
leaq (%rsi,%r10), %r13
incq %r13
movq %r13, %rax
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm1
movq %r9, %r11
imulq %rsi, %r11
movq %r9, %rax
imulq %r15, %rax
movq %rax, 0x240(%rsp)
movq %r9, %r8
imulq %r13, %r8
xorl %ebx, %ebx
cmpw $0x0, 0x140(%rsp)
setns %bl
leaq (%rsi,%rbx), %rax
incq %rax
movq %r9, %rdi
imulq %rax, %rdi
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm6
addq %r13, %rbx
movq %rbx, %rax
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm7
movq %r9, %rsi
imulq %rbx, %rsi
cmpw $0x0, 0xe0(%rsp)
movl $0x0, %eax
cmovnsq %r10, %rax
addq %rax, %r15
movq %r9, %r10
imulq %r15, %r10
imulq %rcx, %r15
vmovups (%rdx,%r15), %xmm8
addq %rax, %r13
movq %r9, %r15
imulq %r13, %r15
imulq %rcx, %r13
vmovups (%rdx,%r13), %xmm9
addq %rbx, %rax
movq 0x110(%rsp), %rbx
imulq %rax, %rcx
vmovups (%rdx,%rcx), %xmm10
movq 0x80(%rsp), %rcx
movq 0xa0(%rsp), %rdx
movq 0x38(%rdx,%rcx), %rcx
vmovups (%rcx,%r11), %xmm11
movq 0x60(%rsp), %rdx
vmovups (%rcx,%rdx), %xmm12
movq 0x240(%rsp), %rdx
vmovups (%rcx,%rdx), %xmm13
vmovups (%rcx,%r8), %xmm14
vmovups (%rcx,%rdi), %xmm15
vsubss %xmm5, %xmm0, %xmm0
vmovups (%rcx,%rsi), %xmm5
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vsubps %xmm3, %xmm11, %xmm11
vmulps %xmm0, %xmm11, %xmm11
vaddps %xmm3, %xmm11, %xmm11
vmovups (%rcx,%r10), %xmm3
vsubps %xmm2, %xmm12, %xmm12
vmulps %xmm0, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmovups (%rcx,%r15), %xmm12
imulq %r9, %rax
vsubps %xmm4, %xmm13, %xmm13
vmulps %xmm0, %xmm13, %xmm13
vaddps %xmm4, %xmm13, %xmm4
vmovups (%rcx,%rax), %xmm13
vsubps %xmm1, %xmm14, %xmm14
vmulps %xmm0, %xmm14, %xmm14
vaddps %xmm1, %xmm14, %xmm1
vsubps %xmm6, %xmm15, %xmm14
vmulps %xmm0, %xmm14, %xmm14
vaddps %xmm6, %xmm14, %xmm6
vsubps %xmm7, %xmm5, %xmm5
vmulps %xmm5, %xmm0, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm0, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vsubps %xmm9, %xmm12, %xmm7
vmulps %xmm7, %xmm0, %xmm7
vaddps %xmm7, %xmm9, %xmm7
vsubps %xmm10, %xmm13, %xmm8
vmulps %xmm0, %xmm8, %xmm0
vaddps %xmm0, %xmm10, %xmm0
vunpcklps %xmm1, %xmm11, %xmm8 # xmm8 = xmm11[0],xmm1[0],xmm11[1],xmm1[1]
vunpckhps %xmm1, %xmm11, %xmm9 # xmm9 = xmm11[2],xmm1[2],xmm11[3],xmm1[3]
vunpcklps %xmm4, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
vunpckhps %xmm4, %xmm2, %xmm11 # xmm11 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
vunpcklps %xmm11, %xmm9, %xmm9 # xmm9 = xmm9[0],xmm11[0],xmm9[1],xmm11[1]
vunpcklps %xmm10, %xmm8, %xmm11 # xmm11 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
vunpckhps %xmm10, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
vunpcklps %xmm5, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vunpckhps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
vunpcklps %xmm1, %xmm6, %xmm12 # xmm12 = xmm6[0],xmm1[0],xmm6[1],xmm1[1]
vunpckhps %xmm1, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
vunpcklps %xmm6, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm6[0],xmm2[1],xmm6[1]
vunpcklps %xmm12, %xmm10, %xmm6 # xmm6 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
vunpckhps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
vunpcklps %xmm0, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vunpckhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vunpcklps %xmm7, %xmm5, %xmm13 # xmm13 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
vunpckhps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
vunpcklps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpcklps %xmm13, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm7, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpckhps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
vunpcklps %xmm3, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vunpcklps %xmm7, %xmm13, %xmm3 # xmm3 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
vunpckhps %xmm7, %xmm13, %xmm4 # xmm4 = xmm13[2],xmm7[2],xmm13[3],xmm7[3]
vinsertf128 $0x1, %xmm5, %ymm11, %ymm5
vinsertf128 $0x1, %xmm12, %ymm8, %ymm7
vinsertf128 $0x1, %xmm0, %ymm9, %ymm0
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vinsertf128 $0x1, %xmm10, %ymm10, %ymm13
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vbroadcastss (%rbx), %ymm4
vbroadcastss 0x4(%rbx), %ymm10
vinsertf128 $0x1, %xmm1, %ymm1, %ymm11
vbroadcastss 0x8(%rbx), %ymm12
vsubps %ymm4, %ymm5, %ymm1
vsubps %ymm10, %ymm7, %ymm14
vsubps %ymm12, %ymm0, %ymm5
vsubps %ymm4, %ymm6, %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vsubps %ymm10, %ymm13, %ymm13
vsubps %ymm12, %ymm2, %ymm8
vsubps %ymm4, %ymm3, %ymm4
vsubps %ymm10, %ymm9, %ymm3
vsubps %ymm12, %ymm11, %ymm2
vmovaps %ymm2, 0x60(%rsp)
vsubps %ymm1, %ymm4, %ymm12
vsubps %ymm14, %ymm3, %ymm15
vsubps %ymm5, %ymm2, %ymm0
vaddps %ymm3, %ymm14, %ymm6
vaddps %ymm5, %ymm2, %ymm7
vmulps %ymm6, %ymm0, %ymm9
vmulps %ymm7, %ymm15, %ymm11
vsubps %ymm9, %ymm11, %ymm10
vaddps %ymm1, %ymm4, %ymm9
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm0, 0x4a0(%rsp)
vmulps %ymm0, %ymm9, %ymm11
vsubps %ymm7, %ymm11, %ymm7
vmovaps %ymm15, 0x280(%rsp)
vmulps %ymm9, %ymm15, %ymm11
vmovaps %ymm12, 0x360(%rsp)
vmulps %ymm6, %ymm12, %ymm6
vbroadcastss 0x18(%rbx), %ymm12
vsubps %ymm11, %ymm6, %ymm6
vmulps %ymm6, %ymm12, %ymm6
vbroadcastss 0x14(%rbx), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vmulps %ymm7, %ymm0, %ymm7
vaddps %ymm6, %ymm7, %ymm6
vbroadcastss 0x10(%rbx), %ymm0
vmulps %ymm0, %ymm10, %ymm7
vaddps %ymm6, %ymm7, %ymm11
vmovaps %ymm13, %ymm2
vsubps %ymm13, %ymm14, %ymm13
vsubps %ymm8, %ymm5, %ymm15
vmovaps %ymm14, 0x240(%rsp)
vaddps %ymm2, %ymm14, %ymm6
vmovaps %ymm5, 0x140(%rsp)
vaddps %ymm5, %ymm8, %ymm7
vmulps %ymm6, %ymm15, %ymm14
vmulps %ymm7, %ymm13, %ymm5
vsubps %ymm14, %ymm5, %ymm5
vmovaps 0xa0(%rsp), %ymm9
vsubps %ymm9, %ymm1, %ymm14
vmulps %ymm7, %ymm14, %ymm7
vmovaps %ymm1, 0xe0(%rsp)
vaddps %ymm1, %ymm9, %ymm10
vmovaps %ymm9, %ymm1
vmulps %ymm10, %ymm15, %ymm9
vsubps %ymm7, %ymm9, %ymm7
vmovaps %ymm13, 0x480(%rsp)
vmulps %ymm10, %ymm13, %ymm9
vmulps %ymm6, %ymm14, %ymm6
vsubps %ymm9, %ymm6, %ymm6
vmulps %ymm6, %ymm12, %ymm6
vmovaps 0x80(%rsp), %ymm9
vmulps %ymm7, %ymm9, %ymm7
vaddps %ymm6, %ymm7, %ymm6
vmulps %ymm5, %ymm0, %ymm5
vaddps %ymm6, %ymm5, %ymm10
vsubps %ymm4, %ymm1, %ymm13
vaddps %ymm4, %ymm1, %ymm4
vsubps %ymm3, %ymm2, %ymm7
vaddps %ymm3, %ymm2, %ymm3
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm1, %ymm8, %ymm6
vaddps %ymm1, %ymm8, %ymm1
vmulps %ymm3, %ymm6, %ymm2
vmulps %ymm1, %ymm7, %ymm5
vsubps %ymm2, %ymm5, %ymm2
vmulps %ymm1, %ymm13, %ymm1
vmulps %ymm4, %ymm6, %ymm5
vsubps %ymm1, %ymm5, %ymm1
vmulps %ymm4, %ymm7, %ymm4
vmulps %ymm3, %ymm13, %ymm3
vsubps %ymm4, %ymm3, %ymm3
vmovaps %ymm12, 0xa0(%rsp)
vmulps %ymm3, %ymm12, %ymm3
vmulps %ymm1, %ymm9, %ymm1
vaddps %ymm3, %ymm1, %ymm1
vmovaps %ymm0, 0x60(%rsp)
vmulps %ymm2, %ymm0, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vaddps %ymm10, %ymm11, %ymm2
vaddps %ymm2, %ymm1, %ymm9
vminps %ymm10, %ymm11, %ymm2
vminps %ymm1, %ymm2, %ymm2
vbroadcastss 0x1d0bb69(%rip), %ymm3 # 0x1f20ec4
vandps %ymm3, %ymm9, %ymm4
vbroadcastss 0x1d0bb64(%rip), %ymm3 # 0x1f20ecc
vmovaps %ymm4, 0x3a0(%rsp)
vmulps %ymm3, %ymm4, %ymm3
vbroadcastss 0x1d0bb42(%rip), %ymm4 # 0x1f20ec0
vxorps %ymm4, %ymm3, %ymm4
vcmpnltps %ymm4, %ymm2, %ymm2
vmovaps %ymm11, 0x460(%rsp)
vmovaps %ymm10, 0x440(%rsp)
vmaxps %ymm10, %ymm11, %ymm4
vmaxps %ymm1, %ymm4, %ymm1
vcmpleps %ymm3, %ymm1, %ymm1
vorps %ymm1, %ymm2, %ymm0
leaq 0x7(%rsp), %rax
movq %rax, 0x520(%rsp)
vtestps 0x380(%rsp), %ymm0
je 0x215ac4
vmovaps 0x4a0(%rsp), %ymm10
vmovaps %ymm0, 0x420(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmulps %ymm0, %ymm10, %ymm1
vmovaps 0x280(%rsp), %ymm12
vmulps %ymm15, %ymm12, %ymm2
vsubps %ymm1, %ymm2, %ymm2
vmulps %ymm7, %ymm15, %ymm3
vmulps %ymm6, %ymm0, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vbroadcastss 0x1d0bab6(%rip), %ymm5 # 0x1f20ec4
vmovaps %ymm9, %ymm11
vmovaps %ymm5, %ymm9
vandps %ymm5, %ymm1, %ymm1
vandps %ymm5, %ymm3, %ymm3
vcmpltps %ymm3, %ymm1, %ymm1
vblendvps %ymm1, %ymm2, %ymm4, %ymm1
vmulps %ymm6, %ymm14, %ymm2
vmulps %ymm14, %ymm10, %ymm3
vmovaps 0x360(%rsp), %ymm8
vmulps %ymm15, %ymm8, %ymm4
vsubps %ymm4, %ymm3, %ymm3
vmulps %ymm13, %ymm15, %ymm5
vsubps %ymm2, %ymm5, %ymm5
vandps %ymm4, %ymm9, %ymm4
vandps %ymm2, %ymm9, %ymm2
vcmpltps %ymm2, %ymm4, %ymm2
vblendvps %ymm2, %ymm3, %ymm5, %ymm10
vmulps %ymm0, %ymm13, %ymm2
vmulps %ymm0, %ymm8, %ymm3
vmulps %ymm14, %ymm12, %ymm4
vmulps %ymm7, %ymm14, %ymm0
vsubps %ymm4, %ymm3, %ymm3
vsubps %ymm2, %ymm0, %ymm0
vandps %ymm4, %ymm9, %ymm4
vandps %ymm2, %ymm9, %ymm2
vcmpltps %ymm2, %ymm4, %ymm2
vblendvps %ymm2, %ymm3, %ymm0, %ymm0
vmulps 0xa0(%rsp), %ymm0, %ymm2
vmulps 0x80(%rsp), %ymm10, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps 0x60(%rsp), %ymm1, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vaddps %ymm2, %ymm2, %ymm2
vmulps 0x140(%rsp), %ymm0, %ymm3
vmulps 0x240(%rsp), %ymm10, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vmulps 0xe0(%rsp), %ymm1, %ymm4
vrcpps %ymm2, %ymm5
vaddps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm3, %ymm3
vmulps %ymm5, %ymm2, %ymm4
vbroadcastss 0x1cd722b(%rip), %ymm7 # 0x1eec714
vsubps %ymm4, %ymm7, %ymm4
vmulps %ymm4, %ymm5, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vbroadcastss 0xc(%rbx), %ymm5
vmulps %ymm4, %ymm3, %ymm4
vcmpleps %ymm4, %ymm5, %ymm3
vbroadcastss 0x20(%rbx), %ymm5
vcmpleps %ymm5, %ymm4, %ymm5
vandps %ymm5, %ymm3, %ymm3
vcmpneqps 0x1d0b9e4(%rip), %ymm2, %ymm2 # 0x1f20f00
vandps %ymm3, %ymm2, %ymm2
vpslld $0x1f, %xmm2, %xmm3
vpsrad $0x1f, %xmm3, %xmm3
vextractf128 $0x1, %ymm2, %xmm2
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vinsertf128 $0x1, %xmm2, %ymm3, %ymm2
vmovaps 0x420(%rsp), %ymm3
vandps 0x380(%rsp), %ymm3, %ymm3
vtestps %ymm3, %ymm2
je 0x215ac4
vandps %ymm3, %ymm2, %ymm2
vmovaps %ymm11, 0x500(%rsp)
leaq 0x7(%rsp), %rax
movq %rax, 0x520(%rsp)
vmovaps 0x440(%rsp), %ymm6
vsubps %ymm6, %ymm11, %ymm3
vmovaps 0x3c0(%rsp), %ymm5
vmovaps 0x460(%rsp), %ymm7
vblendvps %ymm5, %ymm3, %ymm7, %ymm3
vmovaps %ymm2, 0x540(%rsp)
vsubps %ymm7, %ymm11, %ymm2
vblendvps %ymm5, %ymm2, %ymm6, %ymm2
vmovaps %ymm4, 0x5a0(%rsp)
vmovaps %ymm3, 0x4c0(%rsp)
vbroadcastss 0x1cdb402(%rip), %ymm4 # 0x1ef09cc
vbroadcastss 0x1cd7141(%rip), %ymm6 # 0x1eec714
vblendvps %ymm5, %ymm4, %ymm6, %ymm4
vmovaps %ymm2, 0x4e0(%rsp)
vmulps %ymm1, %ymm4, %ymm1
vmovaps %ymm1, 0x5c0(%rsp)
vmulps %ymm4, %ymm10, %ymm1
vmovaps %ymm1, 0x5e0(%rsp)
vmulps %ymm0, %ymm4, %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovd 0x48(%rsp), %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x1d07374(%rip), %xmm0, %xmm0 # 0x1f1c990
vmovd 0x44(%rsp), %xmm1
movq 0x130(%rsp), %rcx
movq 0x128(%rsp), %rdx
movzwl 0x8(%rcx,%rdx), %eax
decl %eax
vcvtsi2ss %eax, %xmm13, %xmm4
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vpaddd 0x1d07356(%rip), %xmm1, %xmm1 # 0x1f1c9a0
vrcpss %xmm4, %xmm4, %xmm5
vmulss %xmm4, %xmm5, %xmm4
vmovss 0x1cdb99e(%rip), %xmm7 # 0x1ef0ff8
vsubss %xmm4, %xmm7, %xmm4
vmulss %xmm4, %xmm5, %xmm4
movzwl 0xa(%rcx,%rdx), %eax
decl %eax
vcvtsi2ss %eax, %xmm13, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm5, %xmm6, %xmm5
vsubss %xmm5, %xmm7, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vmulps %ymm0, %ymm11, %ymm0
vaddps %ymm3, %ymm0, %ymm0
vshufps $0x0, %xmm4, %xmm4, %xmm3 # xmm3 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vcvtdq2ps %ymm1, %ymm1
vmulps %ymm1, %ymm11, %ymm1
vaddps %ymm2, %ymm1, %ymm1
vshufps $0x0, %xmm5, %xmm5, %xmm2 # xmm2 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmulps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0x4e0(%rsp)
movq 0x138(%rsp), %rax
movq 0x1e8(%rax), %rax
movq 0xd0(%rsp), %rcx
movq (%rax,%rcx,8), %rsi
movl 0x24(%rbx), %eax
testl %eax, 0x34(%rsi)
je 0x215ac4
movq 0x38(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
movq 0x58(%rsp), %r15
movq 0x50(%rsp), %r13
movq 0x30(%rsp), %rdi
movq 0x28(%rsp), %r8
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovdqa 0x260(%rsp), %ymm5
leaq 0x620(%rsp), %rdx
movb 0x6(%rsp), %cl
jne 0x21574b
cmpq $0x0, 0x48(%rsi)
je 0x215baa
vrcpps %ymm11, %ymm2
vmulps %ymm2, %ymm11, %ymm3
vbroadcastss 0x1cd6fb7(%rip), %ymm4 # 0x1eec714
vsubps %ymm3, %ymm4, %ymm3
vmulps %ymm3, %ymm2, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0x1cdb876(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x3a0(%rsp), %ymm6
vcmpnltps %ymm3, %ymm6, %ymm3
vandps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vminps %ymm4, %ymm0, %ymm0
vmovaps %ymm0, 0x560(%rsp)
vmulps %ymm2, %ymm1, %ymm0
vminps %ymm4, %ymm0, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovmskps %ymm0, %ecx
movq %rcx, 0x60(%rsp)
movq 0x60(%rsp), %rcx
bsfq %rcx, %rcx
movq %rcx, 0xe0(%rsp)
cmpl $0x0, 0x60(%rsp)
setne 0x280(%rsp)
vmovaps 0x220(%rsp), %ymm15
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm14
vmovdqa 0x160(%rsp), %ymm12
je 0x215b3b
movq %rax, 0x140(%rsp)
movq %rsi, 0xa0(%rsp)
movq 0x38(%rsp), %rax
movq 0x8(%rax), %rax
movq %rax, 0x80(%rsp)
movq 0x8(%rsp), %rsi
movq 0xe0(%rsp), %rax
movb 0x6(%rsp), %cl
vmovss 0x560(%rsp,%rax,4), %xmm0
vmovss 0x580(%rsp,%rax,4), %xmm1
vmovss 0x20(%rbx), %xmm2
vmovss %xmm2, 0x240(%rsp)
vmovss 0x5a0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x20(%rbx)
vmovss 0x5c0(%rsp,%rax,4), %xmm2
vmovss 0x5e0(%rsp,%rax,4), %xmm3
movq %rax, 0xe0(%rsp)
vmovss 0x600(%rsp,%rax,4), %xmm4
vmovss %xmm2, 0x1c0(%rsp)
vmovss %xmm3, 0x1c4(%rsp)
vmovss %xmm4, 0x1c8(%rsp)
vmovss %xmm0, 0x1cc(%rsp)
vmovss %xmm1, 0x1d0(%rsp)
movq 0x100(%rsp), %rax
movl %eax, 0x1d4(%rsp)
movq 0xd0(%rsp), %rax
movl %eax, 0x1d8(%rsp)
movq 0x80(%rsp), %rax
movl (%rax), %eax
movl %eax, 0x1dc(%rsp)
movq 0x80(%rsp), %rax
movl 0x4(%rax), %eax
movl %eax, 0x1e0(%rsp)
orl $-0x1, 0x4c(%rsp)
leaq 0x4c(%rsp), %rax
movq %rax, 0x1f0(%rsp)
movq 0xa0(%rsp), %rax
movq 0x18(%rax), %rax
movq %rax, 0x1f8(%rsp)
movq 0x80(%rsp), %rax
movq %rax, 0x200(%rsp)
movq %rbx, 0x208(%rsp)
leaq 0x1c0(%rsp), %rax
movq %rax, 0x210(%rsp)
movl $0x1, 0x218(%rsp)
movq 0xa0(%rsp), %rax
movq 0x48(%rax), %rax
testq %rax, %rax
je 0x2159d0
leaq 0x1f0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x8(%rsp), %rsi
movb 0x6(%rsp), %cl
leaq 0x620(%rsp), %rdx
vmovdqa 0x260(%rsp), %ymm5
vmovdqa 0x160(%rsp), %ymm12
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r9
movq 0x28(%rsp), %r8
vmovaps 0x180(%rsp), %ymm14
movq 0x30(%rsp), %rdi
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x220(%rsp), %ymm15
movq 0x1f0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x215a82
movq 0x140(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x215b44
movq %rax, 0x360(%rsp)
movq 0x140(%rsp), %rax
testb $0x2, (%rax)
jne 0x215a08
movq 0xa0(%rsp), %rax
testb $0x40, 0x3e(%rax)
je 0x215a71
leaq 0x1f0(%rsp), %rdi
vzeroupper
callq *0x360(%rsp)
movq 0x8(%rsp), %rsi
movb 0x6(%rsp), %cl
leaq 0x620(%rsp), %rdx
vmovdqa 0x260(%rsp), %ymm5
vmovdqa 0x160(%rsp), %ymm12
movq 0x10(%rsp), %r11
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r9
movq 0x28(%rsp), %r8
vmovaps 0x180(%rsp), %ymm14
movq 0x30(%rsp), %rdi
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x220(%rsp), %ymm15
movq 0x1f0(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x215b44
vmovss 0x240(%rsp), %xmm0
vmovss %xmm0, 0x20(%rbx)
movq 0x60(%rsp), %rcx
movq 0xe0(%rsp), %rax
btcq %rax, %rcx
bsfq %rcx, %rax
movq %rcx, 0x60(%rsp)
testq %rcx, %rcx
movb 0x6(%rsp), %cl
setne 0x280(%rsp)
jne 0x215832
jmp 0x215b44
movq 0x58(%rsp), %r15
vmovaps 0x220(%rsp), %ymm15
vmovaps 0x1a0(%rsp), %ymm13
movq 0x50(%rsp), %r13
movq 0x30(%rsp), %rdi
vmovaps 0x180(%rsp), %ymm14
movq 0x28(%rsp), %r8
movq 0x20(%rsp), %r9
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %r11
vmovdqa 0x160(%rsp), %ymm12
vmovdqa 0x260(%rsp), %ymm5
movq 0x8(%rsp), %rsi
movq 0x118(%rsp), %rdx
leaq -0x1(%rdx), %rax
andq %rax, %rdx
movq %rdx, %rax
movq 0x108(%rsp), %rdx
jne 0x214df6
jmp 0x215b53
movq 0x8(%rsp), %rsi
movb 0x6(%rsp), %cl
testb $0x1, 0x280(%rsp)
je 0x215b19
jmp 0x215c23
movq 0x120(%rsp), %rax
incq %rax
cmpq %rsi, %rax
setb %cl
vmovaps 0x340(%rsp), %ymm6
vmovaps 0x2e0(%rsp), %ymm7
vmovaps 0x2c0(%rsp), %ymm8
vmovaps 0x2a0(%rsp), %ymm9
vmovaps 0x320(%rsp), %ymm10
vmovaps 0x300(%rsp), %ymm11
leaq 0x620(%rsp), %rdx
jne 0x214937
jmp 0x215c04
vmovaps 0x220(%rsp), %ymm15
vmovaps 0x1a0(%rsp), %ymm13
vmovaps 0x340(%rsp), %ymm6
vmovaps 0x2e0(%rsp), %ymm7
vmovaps 0x2c0(%rsp), %ymm8
vmovaps 0x2a0(%rsp), %ymm9
vmovaps 0x320(%rsp), %ymm10
vmovaps 0x300(%rsp), %ymm11
vmovaps 0x180(%rsp), %ymm14
vmovdqa 0x160(%rsp), %ymm12
xorl %eax, %eax
testb $0x1, %cl
je 0x215c15
movl $0xff800000, 0x20(%rbx) # imm = 0xFF800000
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x214746
jmp 0x21452b
vmovaps 0x340(%rsp), %ymm6
vmovaps 0x2e0(%rsp), %ymm7
vmovaps 0x2c0(%rsp), %ymm8
vmovaps 0x2a0(%rsp), %ymm9
vmovaps 0x320(%rsp), %ymm10
vmovaps 0x300(%rsp), %ymm11
jmp 0x215c04
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
__forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
#elif defined(__SSE4_1__) || (defined(__aarch64__))
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
|
vbroadcastss 0x1cd4a18(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm0, %xmm3, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,0,3,2]
vminps %xmm0, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vminps %xmm3, %xmm4, %xmm3
vcmpeqps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x217030
vandps %xmm2, %xmm0, %xmm1
vmovmskps %xmm1, %eax
bsfq %rax, %rax
movq %rax, (%rdi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat4_sse2.h
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
__forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
#elif defined(__SSE4_1__) || (defined(__aarch64__))
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
|
vbroadcastss 0x1cd49db(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm0, %xmm3, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,0,3,2]
vminps %xmm0, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vminps %xmm3, %xmm4, %xmm3
vcmpeqps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x21706d
vandps %xmm2, %xmm0, %xmm1
vmovmskps %xmm1, %eax
bsfq %rax, %rax
movq %rax, (%rdi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat4_sse2.h
|
embree::avx::BVHNIntersector1<8, 1, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
__forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
#elif defined(__SSE4_1__) || (defined(__aarch64__))
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
|
vbroadcastss 0x1cd4961(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm0, %xmm3, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,0,3,2]
vminps %xmm0, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vminps %xmm3, %xmm4, %xmm3
vcmpeqps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x2170e7
vandps %xmm2, %xmm0, %xmm1
vmovmskps %xmm1, %eax
bsfq %rax, %rax
movq %rax, (%rdi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat4_sse2.h
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
return _mm256_blendv_ps(f, t, m);
}
|
vbroadcastss 0x1cd468b(%rip), %ymm2 # 0x1eeba20
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vshufps $0xb1, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm0, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vperm2f128 $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[2,3,0,1]
vminps %ymm3, %ymm2, %ymm2
vcmpeqps %ymm2, %ymm0, %ymm0
vtestps %ymm1, %ymm0
je 0x2173c7
vandps %ymm1, %ymm0, %ymm1
vmovmskps %ymm1, %eax
bsfl %eax, %eax
movl %eax, (%rdi)
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat8_avx.h
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
__forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
#elif defined(__SSE4_1__) || (defined(__aarch64__))
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
|
vbroadcastss 0x1cd4643(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm0, %xmm3, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,0,3,2]
vminps %xmm0, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vminps %xmm3, %xmm4, %xmm3
vcmpeqps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x217405
vandps %xmm2, %xmm0, %xmm1
vmovmskps %xmm1, %eax
bsfq %rax, %rax
movq %rax, (%rdi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat4_sse2.h
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
__forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) {
#if defined(__AVX512VL__)
return _mm_mask_blend_ps(m, f, t);
#elif defined(__SSE4_1__) || (defined(__aarch64__))
return _mm_blendv_ps(f, t, m);
#else
return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f));
#endif
}
|
vbroadcastss 0x1cd4606(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm0, %xmm3, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,0,3,2]
vminps %xmm0, %xmm3, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vminps %xmm3, %xmm4, %xmm3
vcmpeqps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x217442
vandps %xmm2, %xmm0, %xmm1
vmovmskps %xmm1, %eax
bsfq %rax, %rax
movq %rax, (%rdi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat4_sse2.h
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiIntersector1Pluecker<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
return _mm256_blendv_ps(f, t, m);
}
|
vbroadcastss 0x1cd45c9(%rip), %ymm2 # 0x1eeba20
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vshufps $0xb1, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm0, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vperm2f128 $0x1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[2,3,0,1]
vminps %ymm3, %ymm2, %ymm2
vcmpeqps %ymm2, %ymm0, %ymm0
vtestps %ymm1, %ymm0
je 0x217489
vandps %ymm1, %ymm0, %ymm1
vmovmskps %ymm1, %eax
bsfl %eax, %eax
movl %eax, (%rdi)
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat8_avx.h
|
embree::avx::PatchEval<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::eval(embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref, float const&, float const&, float, unsigned long) (.cold.1)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x217546
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::GeneralCatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::GeneralCatmullClarkPatchT(embree::HalfEdge const*, char const*, unsigned long) (.cold.1)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq $0x10, 0x38(%rdi)
leaq 0x2c0(%rdi), %rax
leaq 0x80(%rdi), %rcx
xorl %edx, %edx
leaq (%rcx,%rdx), %r8
movq %r8, 0x280(%rdi,%rdx)
leaq (%rax,%rdx), %r8
movq %r8, 0x340(%rdi,%rdx)
andq $0x0, 0x39c(%rdi,%rdx)
andl $0x0, 0x3a4(%rdi,%rdx)
addq $0x380, %rdx # imm = 0x380
cmpq $0x3800, %rdx # imm = 0x3800
jne 0x217578
addq $0x40, %rdi
movq %rdi, (%rsi)
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::GeneralCatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::GeneralCatmullClarkPatchT(embree::HalfEdge const*, char const*, unsigned long) (.cold.2)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x2175be
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::GeneralCatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::GeneralCatmullClarkPatchT(embree::HalfEdge const*, char const*, unsigned long) (.cold.3)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x2175dd
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::GeneralCatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::GeneralCatmullClarkPatchT(embree::HalfEdge const*, char const*, unsigned long) (.cold.4)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x2175fc
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::CatmullClark1RingT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::CatmullClark1RingT(embree::CatmullClark1RingT<embree::vfloat_impl<4>, embree::vfloat_impl<4>> const&) (.cold.2)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x217756
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::avx::PatchEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::eval(embree::vboolf_impl<4> const&, embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float, unsigned long) (.cold.1)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x217846
retq
|
/embree[P]embree/kernels/common/../../common/sys/array.h
|
embree::avx::evalGrid(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float*, float*, float*, float*, float*, embree::SubdivMesh const*) (.cold.1)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq $0x10, 0x38(%rdi)
leaq 0x2c0(%rdi), %rax
leaq 0x80(%rdi), %rcx
xorl %edx, %edx
leaq (%rcx,%rdx), %r8
movq %r8, 0x280(%rdi,%rdx)
leaq (%rax,%rdx), %r8
movq %r8, 0x340(%rdi,%rdx)
andq $0x0, 0x39c(%rdi,%rdx)
andl $0x0, 0x3a4(%rdi,%rdx)
addq $0x380, %rdx # imm = 0x380
cmpq $0x3800, %rdx # imm = 0x3800
jne 0x2178d4
addq $0x40, %rdi
movq %rdi, (%rsi)
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::evalGrid(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float*, float*, float*, float*, float*, embree::SubdivMesh const*) (.cold.2)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x21791a
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::evalGrid(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float*, float*, float*, float*, float*, embree::SubdivMesh const*) (.cold.3)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x217939
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::evalGrid(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float*, float*, float*, float*, float*, embree::SubdivMesh const*) (.cold.4)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x217958
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::evalGridBounds(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, embree::SubdivMesh const*) (.cold.2)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x2179d0
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::evalGridBounds(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, embree::SubdivMesh const*) (.cold.3)
|
__forceinline void resize(size_t M)
{
assert(M <= max_total_elements);
if (likely(M <= max_stack_elements)) return;
if (likely(!isStackAllocated())) return;
data = new Ty[max_total_elements];
for (size_t i=0; i<max_stack_elements; i++)
data[i] = arr[i];
}
|
movq %rdi, (%rsi)
xorl %eax, %eax
movq (%rsi), %rcx
vmovaps (%rdx,%rax), %xmm0
vmovaps %xmm0, (%rcx,%rax)
addq $0x10, %rax
cmpq $0x200, %rax # imm = 0x200
jne 0x2179ef
retq
|
/embree[P]embree/kernels/subdiv/../geometry/../common/../../common/sys/array.h
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKMoeller<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x2212de
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x221111
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x2212f0
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x2212de
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cffd4e(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1ccfe65(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm2, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1ccb557(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d3974f(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d33771(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d39730(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d39718(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d39713(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cca795(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1ccb8e2(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x256180
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x2212b0
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x254a6e
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKMoeller<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x19f8, %rsp # imm = 0x19F8
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x22253e
movq %rcx, %r10
movq %rdx, %r14
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x22134c
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x222553
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r14), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x22253e
vandps %xmm3, %xmm4, %xmm8
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x200(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x210(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x220(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x230(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x240(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x250(%rsp)
vbroadcastss 0x1cffaed(%rip), %xmm9 # 0x1f20ec4
vandps %xmm3, %xmm9, %xmm6
vbroadcastss 0x1ccfc04(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm3, %xmm3
vandps %xmm4, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm4, %xmm4
vandps %xmm5, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm5, %xmm5
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1ccb2f6(%rip), %xmm7 # 0x1eec714
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vrcpps %xmm4, %xmm6
vmulps %xmm6, %xmm4, %xmm4
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm3, 0x260(%rsp)
vmovaps %xmm4, 0x270(%rsp)
vmovaps %xmm5, 0x280(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d394e9(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x290(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d3350a(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d394c9(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d394b9(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d394b4(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps %xmm4, 0x2b0(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cca535(%rip), %xmm10 # 0x1eeba20
vblendvps %xmm8, %xmm3, %xmm10, %xmm1
vmovaps %xmm1, 0x2c0(%rsp)
vbroadcastss 0x1ccb681(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm8, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x2d0(%rsp)
vmovaps %xmm8, 0x300(%rsp)
vxorps %xmm0, %xmm8, %xmm0
vmovaps %xmm0, 0xb0(%rsp)
testq %rax, %rax
je 0x221541
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r9d, %r9d
cmpb $0x1, %al
adcq $0x2, %r9
jmp 0x221545
pushq $0x3
popq %r9
leaq 0x80(%r14), %rax
movq %rax, 0x98(%rsp)
leaq 0x360(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb00(%rsp), %rbx
vmovaps %xmm10, -0x20(%rbx)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%rbx)
leaq 0x1f2e9fb(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x2f0(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x2e0(%rsp)
vpcmpeqd %xmm11, %xmm11, %xmm11
addq $-0x10, %rbx
movq -0x8(%r15), %r13
addq $-0x8, %r15
cmpq $-0x8, %r13
je 0x222438
vmovaps (%rbx), %xmm12
vcmpltps 0x2d0(%rsp), %xmm12, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x222449
movzbl %al, %ebp
popcntl %ebp, %r12d
xorl %eax, %eax
cmpq %r9, %r12
jbe 0x22244d
cmpq %r9, %r12
jbe 0x22243b
testb $0x8, %r13b
pushq $0x8
popq %r12
jne 0x22178f
movq %r13, %rax
movq %r13, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %r12, %r13
vmovaps %xmm10, %xmm12
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x221728
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x200(%rsp), %xmm1
vmovaps 0x210(%rsp), %xmm2
vmovaps 0x220(%rsp), %xmm3
vmovaps 0x260(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x270(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x280(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vpminsd %xmm1, %xmm5, %xmm0
vpminsd %xmm2, %xmm7, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpminsd %xmm3, %xmm9, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm1, %xmm5, %xmm1
vpmaxsd %xmm2, %xmm7, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd %xmm3, %xmm9, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd 0x2c0(%rsp), %xmm0, %xmm2
vpminsd 0x2d0(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x221728
vblendvps %xmm1, %xmm0, %xmm10, %xmm0
cmpq $0x8, %r13
je 0x221721
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm12
movq %rdi, %r13
cmpq $0x8, %rdi
je 0x22173f
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x221616
xorl %eax, %eax
cmpq $0x8, %r13
je 0x221788
vmovaps 0x2d0(%rsp), %xmm0
vcmpnleps %xmm12, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r9
jae 0x221779
testb %cl, %cl
je 0x22243b
testb $0x8, %r13b
je 0x221602
jmp 0x22178f
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x221765
cmpq $-0x8, %r13
je 0x222438
vmovaps 0x2d0(%rsp), %xmm0
vcmpnleps %xmm12, %xmm0, %xmm0
vtestps %xmm0, %xmm0
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
je 0x222449
movl %r13d, %edx
andl $0xf, %edx
vmovdqa 0xb0(%rsp), %xmm0
addq $-0x8, %rdx
je 0x2223fa
andq $-0x10, %r13
vpxor %xmm0, %xmm11, %xmm1
leaq 0xc0(%r13), %r12
xorl %edi, %edi
imulq $0xe0, %rdi, %rax
addq %r13, %rax
movq %rax, 0x138(%rsp)
xorl %ebp, %ebp
vmovdqa %xmm1, %xmm0
vmovdqa %xmm1, 0x70(%rsp)
movl (%r12,%rbp,4), %eax
cmpq %rcx, %rax
je 0x2223d0
vmovdqa %xmm0, 0x20(%rsp)
vbroadcastss -0xc0(%r12,%rbp,4), %xmm0
vbroadcastss -0xb0(%r12,%rbp,4), %xmm1
vbroadcastss -0xa0(%r12,%rbp,4), %xmm2
vbroadcastss -0x90(%r12,%rbp,4), %xmm8
vbroadcastss -0x80(%r12,%rbp,4), %xmm4
vbroadcastss -0x70(%r12,%rbp,4), %xmm6
vbroadcastss -0x60(%r12,%rbp,4), %xmm3
vmovaps %xmm3, 0x110(%rsp)
vbroadcastss -0x50(%r12,%rbp,4), %xmm3
vmovaps %xmm3, 0x100(%rsp)
vbroadcastss -0x40(%r12,%rbp,4), %xmm3
vmovaps %xmm3, 0xc0(%rsp)
vbroadcastss -0x30(%r12,%rbp,4), %xmm9
vbroadcastss -0x20(%r12,%rbp,4), %xmm7
vbroadcastss -0x10(%r12,%rbp,4), %xmm10
movq 0x138(%rsp), %r11
vmovaps 0xd0(%r11), %xmm3
vmovaps %xmm3, 0x160(%rsp)
vmovaps %xmm4, 0x120(%rsp)
vsubss %xmm4, %xmm1, %xmm5
vmovaps %xmm6, 0x340(%rsp)
vsubss %xmm6, %xmm2, %xmm6
vmovaps %xmm7, 0x320(%rsp)
vsubss %xmm1, %xmm7, %xmm3
vmovaps %xmm10, 0x310(%rsp)
vsubss %xmm2, %xmm10, %xmm11
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm3, %xmm6, %xmm7
vsubss %xmm4, %xmm7, %xmm4
vmovaps %xmm4, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vsubss %xmm8, %xmm0, %xmm10
vmovaps %xmm9, 0x330(%rsp)
vsubss %xmm0, %xmm9, %xmm12
vmovaps %xmm6, 0x50(%rsp)
vmulss %xmm6, %xmm12, %xmm4
vmulss %xmm11, %xmm10, %xmm7
vsubss %xmm4, %xmm7, %xmm13
vmulss %xmm3, %xmm10, %xmm4
vmovaps %xmm5, 0x30(%rsp)
vmulss %xmm5, %xmm12, %xmm7
vsubss %xmm4, %xmm7, %xmm6
vsubps (%r14), %xmm0, %xmm5
vsubps 0x10(%r14), %xmm1, %xmm7
vsubps 0x20(%r14), %xmm2, %xmm8
vmovaps 0x50(%r14), %xmm2
vmovaps 0x60(%r14), %xmm1
vmulps %xmm2, %xmm8, %xmm14
vmulps %xmm1, %xmm7, %xmm15
vsubps %xmm14, %xmm15, %xmm4
vmovaps 0x40(%r14), %xmm0
vmulps %xmm1, %xmm5, %xmm14
vmulps %xmm0, %xmm8, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vmulps %xmm0, %xmm7, %xmm15
vmulps %xmm2, %xmm5, %xmm9
vsubps %xmm15, %xmm9, %xmm15
vshufps $0x0, %xmm13, %xmm13, %xmm9 # xmm9 = xmm13[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm13 # xmm13 = xmm6[0,0,0,0]
vmovaps %xmm9, %xmm6
vmulps %xmm1, %xmm13, %xmm1
vmulps %xmm2, %xmm9, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vpermilps $0x0, 0x60(%rsp), %xmm2 # xmm2 = mem[0,0,0,0]
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm1, %xmm0, %xmm9
vshufps $0x0, %xmm3, %xmm3, %xmm0 # xmm0 = xmm3[0,0,0,0]
vshufps $0x0, %xmm11, %xmm11, %xmm1 # xmm1 = xmm11[0,0,0,0]
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm0, %xmm14, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vshufps $0x0, %xmm12, %xmm12, %xmm1 # xmm1 = xmm12[0,0,0,0]
vmovaps %xmm4, %xmm3
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cff4eb(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm9, %xmm11
vxorps %xmm0, %xmm11, %xmm4
vmovaps 0x20(%rsp), %xmm0
vcmpnltps 0x1cca024(%rip), %xmm4, %xmm12 # 0x1eeba10
vtestps %xmm0, %xmm12
jne 0x221bad
vtestps %xmm0, %xmm0
je 0x221b79
vmovaps 0x110(%rsp), %xmm7
vsubps 0x330(%rsp), %xmm7, %xmm5
vmovaps 0x100(%rsp), %xmm8
vsubps 0x320(%rsp), %xmm8, %xmm4
vmovaps 0xc0(%rsp), %xmm9
vsubps 0x310(%rsp), %xmm9, %xmm10
vmovaps 0xa0(%rsp), %xmm1
vsubps %xmm7, %xmm1, %xmm3
vmovaps 0x120(%rsp), %xmm1
vsubps %xmm8, %xmm1, %xmm11
vmovaps 0x340(%rsp), %xmm1
vsubps %xmm9, %xmm1, %xmm15
vmovaps %xmm0, 0x20(%rsp)
vmulps %xmm4, %xmm15, %xmm0
vmulps %xmm10, %xmm11, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmulps %xmm3, %xmm10, %xmm1
vmulps %xmm5, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm6
vmovaps %xmm5, 0xa0(%rsp)
vmulps %xmm5, %xmm11, %xmm2
vmovaps %xmm4, 0x120(%rsp)
vmulps %xmm4, %xmm3, %xmm5
vsubps %xmm2, %xmm5, %xmm5
vsubps (%r14), %xmm7, %xmm7
vsubps 0x10(%r14), %xmm8, %xmm8
vsubps 0x20(%r14), %xmm9, %xmm4
vmovaps 0x50(%r14), %xmm9
vmovaps 0x60(%r14), %xmm0
vmulps %xmm4, %xmm9, %xmm12
vmulps %xmm0, %xmm8, %xmm13
vsubps %xmm12, %xmm13, %xmm12
vmovaps 0x40(%r14), %xmm1
vmulps %xmm0, %xmm7, %xmm13
vmulps %xmm1, %xmm4, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vmulps %xmm1, %xmm8, %xmm14
vmulps %xmm7, %xmm9, %xmm2
vsubps %xmm14, %xmm2, %xmm14
vmovaps %xmm5, 0x100(%rsp)
vmulps %xmm0, %xmm5, %xmm0
vmovaps %xmm6, 0x110(%rsp)
vmulps %xmm6, %xmm9, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmovaps 0x60(%rsp), %xmm2
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm9
vmulps %xmm14, %xmm15, %xmm0
vmulps %xmm13, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cff38f(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm9, %xmm11
vmulps %xmm3, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm3
vmovaps 0x20(%rsp), %xmm0
vcmpnltps 0x1cc9ec0(%rip), %xmm3, %xmm15 # 0x1eeba10
vtestps %xmm0, %xmm15
jne 0x221c0d
vbroadcastss 0x1cc9ebc(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x70(%rsp), %xmm1
vtestps %xmm0, %xmm0
setne %al
jmp 0x221b8f
xorl %eax, %eax
vbroadcastss 0x1cc9e9c(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x70(%rsp), %xmm1
testb %al, %al
je 0x2223d0
leaq 0x1(%rbp), %rax
cmpq $0x3, %rbp
movq %rax, %rbp
jb 0x221800
jmp 0x2223d0
vmovaps %xmm13, 0x150(%rsp)
vmovaps %xmm6, %xmm13
vmovaps %xmm5, 0x60(%rsp)
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vpermilps $0x0, 0x30(%rsp), %xmm1 # xmm1 = mem[0,0,0,0]
vpermilps $0x0, 0x50(%rsp), %xmm5 # xmm5 = mem[0,0,0,0]
vandps %xmm0, %xmm12, %xmm6
vmulps %xmm5, %xmm15, %xmm5
vmulps %xmm1, %xmm14, %xmm1
vaddps %xmm5, %xmm1, %xmm1
vmulps %xmm3, %xmm10, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vxorps %xmm0, %xmm11, %xmm5
vcmpnltps 0x1cc9e15(%rip), %xmm5, %xmm12 # 0x1eeba10
vtestps %xmm6, %xmm12
jne 0x221c6b
vmovdqa 0x20(%rsp), %xmm0
jmp 0x2219f7
vmovaps %xmm4, %xmm5
vandps %xmm0, %xmm15, %xmm15
vmulps %xmm14, %xmm10, %xmm0
vmulps 0x120(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0xa0(%rsp), %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm4
vcmpnltps 0x1cc9dcf(%rip), %xmm4, %xmm10 # 0x1eeba10
vtestps %xmm15, %xmm10
jne 0x221cd4
vbroadcastss 0x1cc9dcb(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x70(%rsp), %xmm1
vmovdqa 0x20(%rsp), %xmm0
jmp 0x221b6f
movq %rdi, 0x18(%rsp)
movq %rdx, 0x50(%rsp)
movq %r9, 0x30(%rsp)
movq %r8, 0x8(%rsp)
movq %rsi, 0x10(%rsp)
movq %r10, (%rsp)
vbroadcastss 0x1cff233(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm10
vandps %xmm6, %xmm12, %xmm6
vsubps %xmm4, %xmm10, %xmm0
vcmpnltps %xmm5, %xmm0, %xmm12
vtestps %xmm6, %xmm12
jne 0x221d4c
movq (%rsp), %r10
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
movq 0x30(%rsp), %r9
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq 0x50(%rsp), %rdx
movq 0x18(%rsp), %rdi
jmp 0x221c02
movq %rdi, 0x18(%rsp)
movq %rdx, 0x50(%rsp)
movq %r9, 0x30(%rsp)
movq %r8, 0x8(%rsp)
movq %rsi, 0x10(%rsp)
movq %r10, (%rsp)
vbroadcastss 0x1cff1ca(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm6
vandps %xmm15, %xmm10, %xmm10
vsubps %xmm3, %xmm6, %xmm0
vcmpnltps %xmm4, %xmm0, %xmm12
vtestps %xmm10, %xmm12
jne 0x221e58
movq (%rsp), %r10
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
vbroadcastss 0x1cc9cf2(%rip), %xmm10 # 0x1eeba20
movq 0x30(%rsp), %r9
vpcmpeqd %xmm11, %xmm11, %xmm11
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq 0x50(%rsp), %rdx
movq 0x18(%rsp), %rdi
jmp 0x221c5a
vandps %xmm6, %xmm12, %xmm6
vmulps 0x150(%rsp), %xmm8, %xmm0
vmulps %xmm7, %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x60(%rsp), %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm0
vmulps 0x30(%r14), %xmm10, %xmm1
vcmpltps %xmm0, %xmm1, %xmm1
vmovaps 0x80(%r14), %xmm7
vmovaps %xmm7, 0x140(%rsp)
vmulps %xmm7, %xmm10, %xmm7
vcmpleps %xmm7, %xmm0, %xmm7
vandps %xmm1, %xmm7, %xmm7
vtestps %xmm6, %xmm7
je 0x221cad
vandps %xmm6, %xmm7, %xmm6
vcmpneqps 0x1cc9c5f(%rip), %xmm9, %xmm7 # 0x1eeba10
vtestps %xmm6, %xmm7
je 0x221cad
vandps %xmm6, %xmm7, %xmm6
movq (%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rcx
movq %rcx, 0x60(%rsp)
vbroadcastss 0x34(%rcx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd 0x1cc9c22(%rip), %xmm1, %xmm7 # 0x1eeba10
vtestps %xmm6, %xmm7
jb 0x221cad
movl 0x160(%rsp,%rbp,4), %ecx
vandnps %xmm6, %xmm7, %xmm6
movq (%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x221f84
movq 0x60(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
jne 0x221f84
vmovdqa 0x20(%rsp), %xmm0
vpandn %xmm0, %xmm6, %xmm0
movq (%rsp), %r10
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
movq 0x30(%rsp), %r9
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq 0x50(%rsp), %rdx
movq 0x18(%rsp), %rdi
jmp 0x2219f7
vmovaps %xmm2, %xmm13
vandps %xmm10, %xmm12, %xmm10
vmulps 0x100(%rsp), %xmm5, %xmm0
vmulps 0x110(%rsp), %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm7, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm5
vmulps 0x30(%r14), %xmm6, %xmm0
vcmpltps %xmm5, %xmm0, %xmm0
vmovaps 0x80(%r14), %xmm1
vmovaps %xmm1, 0xa0(%rsp)
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm5, %xmm1
vandps %xmm0, %xmm1, %xmm7
vtestps %xmm10, %xmm7
je 0x221d17
vandps %xmm7, %xmm10, %xmm7
vcmpneqps 0x1cc9b4b(%rip), %xmm9, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
je 0x221d17
vandps %xmm7, %xmm8, %xmm7
movq (%rsp), %rax
movq (%rax), %rcx
movl (%r12,%rbp,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rcx
movq %rcx, 0xc0(%rsp)
vbroadcastss 0x34(%rcx), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd 0x1cc9b07(%rip), %xmm0, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
jb 0x221d17
movl 0x160(%rsp,%rbp,4), %ecx
vandnps %xmm7, %xmm8, %xmm7
movq (%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x22216c
movq 0xc0(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
jne 0x22216c
vmovdqa 0x20(%rsp), %xmm0
vpandn %xmm0, %xmm7, %xmm0
movq (%rsp), %r10
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
vbroadcastss 0x1cc9aba(%rip), %xmm10 # 0x1eeba20
movq 0x30(%rsp), %r9
vpcmpeqd %xmm11, %xmm11, %xmm11
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
movq 0x50(%rsp), %rdx
movq 0x18(%rsp), %rdi
jmp 0x221b69
vrcpps %xmm10, %xmm1
vmulps %xmm1, %xmm10, %xmm7
vbroadcastss 0x1cca77e(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps %xmm4, %xmm1, %xmm3
vminps %xmm8, %xmm3, %xmm3
vmulps %xmm5, %xmm1, %xmm5
vminps %xmm8, %xmm5, %xmm5
vsubps %xmm3, %xmm8, %xmm7
vsubps %xmm5, %xmm8, %xmm8
vmovaps 0x2f0(%rsp), %xmm9
vblendvps %xmm9, %xmm7, %xmm3, %xmm3
vblendvps %xmm9, %xmm8, %xmm5, %xmm5
movq (%rsp), %rsi
movq 0x8(%rsi), %rdx
vmovd %eax, %xmm7
vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmovd %ecx, %xmm8
vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmovaps %xmm2, 0x170(%rsp)
vmovaps %xmm13, 0x180(%rsp)
vmovaps 0x150(%rsp), %xmm2
vmovaps %xmm2, 0x190(%rsp)
vmovaps %xmm3, 0x1a0(%rsp)
vmovaps %xmm5, 0x1b0(%rsp)
vmovdqa %xmm8, 0x1c0(%rsp)
vmovdqa %xmm7, 0x1d0(%rsp)
vmulps %xmm0, %xmm1, %xmm0
leaq 0x1e0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x1e0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x1f0(%rsp)
vmovaps 0x140(%rsp), %xmm1
vblendvps %xmm6, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm6, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0x60(%rsp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0xd8(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0xe0(%rsp)
movq %r14, 0xe8(%rsp)
leaq 0x170(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x48(%rcx), %rax
testq %rax, %rax
je 0x2220ed
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x22235c
movq (%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x22212f
testb $0x2, (%rcx)
jne 0x222122
movq 0x60(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x22212f
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x80(%rsp), %xmm0, %xmm1
vpxor 0x1cc9cdc(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0xe8(%rsp), %rax
vbroadcastss 0x1ccaa2f(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x22236c
vrcpps %xmm6, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vbroadcastss 0x1cca597(%rip), %xmm6 # 0x1eec714
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm3, %xmm0, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps %xmm4, %xmm0, %xmm2
vminps %xmm6, %xmm2, %xmm2
vsubps %xmm1, %xmm6, %xmm3
vsubps %xmm2, %xmm6, %xmm4
vmovaps 0x2e0(%rsp), %xmm6
vblendvps %xmm6, %xmm3, %xmm1, %xmm1
vblendvps %xmm6, %xmm4, %xmm2, %xmm2
movq (%rsp), %rsi
movq 0x8(%rsi), %rdx
vmovd %eax, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %ecx, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovaps %xmm13, 0x170(%rsp)
vmovaps 0x110(%rsp), %xmm6
vmovaps %xmm6, 0x180(%rsp)
vmovaps 0x100(%rsp), %xmm6
vmovaps %xmm6, 0x190(%rsp)
vmovaps %xmm1, 0x1a0(%rsp)
vmovaps %xmm2, 0x1b0(%rsp)
vmovdqa %xmm4, 0x1c0(%rsp)
vmovdqa %xmm3, 0x1d0(%rsp)
vmulps %xmm5, %xmm0, %xmm0
leaq 0x1e0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x1e0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x1f0(%rsp)
vmovaps 0xa0(%rsp), %xmm1
vblendvps %xmm7, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm7, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xd0(%rsp)
movq 0xc0(%rsp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0xd8(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0xe0(%rsp)
movq %r14, 0xe8(%rsp)
leaq 0x170(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x48(%rcx), %rax
testq %rax, %rax
je 0x2222dd
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x222396
movq (%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x222322
testb $0x2, (%rcx)
jne 0x222315
movq 0xc0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x222322
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x80(%rsp), %xmm0, %xmm1
vpxor 0x1cc9ae9(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0xe8(%rsp), %rax
vbroadcastss 0x1cca83c(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x2223a6
vpcmpeqd 0x1cc96ac(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc9ab4(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm6
movq 0x98(%rsp), %rax
vmovaps 0x140(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x221e27
vpcmpeqd 0x1cc9672(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc9a7a(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm7
movq 0x98(%rsp), %rax
vmovaps 0xa0(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x221f45
vmovaps %xmm0, %xmm2
vpand %xmm1, %xmm0, %xmm0
vtestps %xmm1, %xmm2
je 0x2223f6
incq %rdi
addq $0xe0, %r12
vmovdqa %xmm0, %xmm1
cmpq %rdx, %rdi
jb 0x2217e2
vpxor %xmm0, %xmm11, %xmm0
vpor 0xb0(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0xb0(%rsp)
vtestps %xmm11, %xmm0
jb 0x222438
vmovaps 0x2d0(%rsp), %xmm1
vbroadcastss 0x1cca75f(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x2d0(%rsp)
xorl %eax, %eax
jmp 0x22243b
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x2215a8
jmp 0x222516
pushq $0x2
jmp 0x22243a
vmovaps %xmm12, 0x60(%rsp)
movq %r9, 0x30(%rsp)
movq %r10, (%rsp)
movq %rsi, 0x10(%rsp)
movq %r8, 0x8(%rsp)
bsfq %rbp, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %r13, %rdx
movq %rcx, 0x20(%rsp)
leaq 0x4f(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x208(%rsp), %rax
pushq %rax
vzeroupper
callq 0x258442
popq %rcx
popq %rdx
testb %al, %al
je 0x2224a6
movq 0x20(%rsp), %rax
orl $-0x1, 0xb0(%rsp,%rax,4)
leaq -0x1(%rbp), %rax
andq %rax, %rbp
movq (%rsp), %r10
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
jne 0x222466
vmovaps 0xb0(%rsp), %xmm0
vpcmpeqd %xmm11, %xmm11, %xmm11
vtestps %xmm11, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1cc9544(%rip), %xmm10 # 0x1eeba20
movq 0x30(%rsp), %r9
vmovaps 0x60(%rsp), %xmm12
jb 0x2215eb
vmovaps 0x2d0(%rsp), %xmm1
vbroadcastss 0x1cca685(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x2d0(%rsp)
pushq $0x2
popq %rax
jmp 0x2215eb
vmovaps 0x300(%rsp), %xmm0
vandps 0xb0(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cca653(%rip), %xmm1 # 0x1eecb84
movq 0x98(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x19f8, %rsp # imm = 0x19F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r14, %rdx
movq %r10, %rcx
addq $0x19f8, %rsp # imm = 0x19F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x256eb4
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKMoeller<4, 4, false>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x22277a
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x2225ad
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x22278c
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22277a
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cfe8b2(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cce9c9(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm2, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cca0bb(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d382b3(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d322d5(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d38294(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d3827c(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d38277(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc92f9(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cca446(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x25a26c
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x22274c
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x258ece
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKMoeller<4, 4, false>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
movq (%rsi), %r11
cmpq $0x8, 0x70(%r11)
je 0x2234c9
movq %rcx, %r10
movq %rdx, %r9
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x2227d7
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x2234ca
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r9), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x2234c9
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1958, %rsp # imm = 0x1958
vandps %xmm3, %xmm4, %xmm8
vmovaps (%r9), %xmm3
vmovaps %xmm3, 0x150(%rsp)
vmovaps 0x10(%r9), %xmm3
vmovaps %xmm3, 0x160(%rsp)
vmovaps 0x20(%r9), %xmm3
vmovaps %xmm3, 0x170(%rsp)
vmovaps 0x40(%r9), %xmm3
vmovaps %xmm3, 0x180(%rsp)
vmovaps 0x50(%r9), %xmm4
vmovaps %xmm4, 0x190(%rsp)
vmovaps 0x60(%r9), %xmm5
vmovaps %xmm5, 0x1a0(%rsp)
vbroadcastss 0x1cfe651(%rip), %xmm9 # 0x1f20ec4
vandps %xmm3, %xmm9, %xmm6
vbroadcastss 0x1cce768(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm3, %xmm3
vandps %xmm4, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm4, %xmm4
vandps %xmm5, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm5, %xmm5
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1cc9e5a(%rip), %xmm7 # 0x1eec714
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vrcpps %xmm4, %xmm6
vmulps %xmm6, %xmm4, %xmm4
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm3, 0x1b0(%rsp)
vmovaps %xmm4, 0x1c0(%rsp)
vmovaps %xmm5, 0x1d0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d3804d(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x1e0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d3206e(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d3802d(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d3801d(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d38018(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x1f0(%rsp)
vmovaps %xmm4, 0x200(%rsp)
vmovaps 0x30(%r9), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc9099(%rip), %xmm10 # 0x1eeba20
vblendvps %xmm8, %xmm3, %xmm10, %xmm1
vmovaps %xmm1, 0x210(%rsp)
vbroadcastss 0x1cca1e5(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm8, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x220(%rsp)
vmovaps %xmm8, 0x250(%rsp)
vxorps %xmm0, %xmm8, %xmm0
vmovaps %xmm0, 0x50(%rsp)
testq %rax, %rax
je 0x2229da
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r13d, %r13d
cmpb $0x1, %al
adcq $0x2, %r13
jmp 0x2229de
pushq $0x3
popq %r13
leaq 0x80(%r9), %r8
leaq 0x2c0(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xa60(%rsp), %rbx
vmovaps %xmm10, -0x20(%rbx)
movq 0x70(%r11), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%rbx)
vpcmpeqd %xmm11, %xmm11, %xmm11
addq $-0x10, %rbx
movq -0x8(%r15), %rbp
addq $-0x8, %r15
cmpq $-0x8, %rbp
je 0x2233a6
vmovaps (%rbx), %xmm12
vcmpltps 0x220(%rsp), %xmm12, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x2233b7
movzbl %al, %r12d
popcntl %r12d, %r14d
xorl %eax, %eax
cmpq %r13, %r14
jbe 0x2233bb
cmpq %r13, %r14
jbe 0x2233a9
testb $0x8, %bpl
pushq $0x8
popq %r14
jne 0x222bfc
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %r14, %rbp
vmovaps %xmm10, %xmm12
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x222b95
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x150(%rsp), %xmm1
vmovaps 0x160(%rsp), %xmm2
vmovaps 0x170(%rsp), %xmm3
vmovaps 0x1b0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x1c0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x1d0(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vpminsd %xmm1, %xmm5, %xmm0
vpminsd %xmm2, %xmm7, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpminsd %xmm3, %xmm9, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm1, %xmm5, %xmm1
vpmaxsd %xmm2, %xmm7, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd %xmm3, %xmm9, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd 0x210(%rsp), %xmm0, %xmm2
vpminsd 0x220(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x222b95
vblendvps %xmm1, %xmm0, %xmm10, %xmm0
cmpq $0x8, %rbp
je 0x222b8e
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm12
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x222bac
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x222a83
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x222bf5
vmovaps 0x220(%rsp), %xmm0
vcmpnleps %xmm12, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r13
jae 0x222be6
testb %cl, %cl
je 0x2233a9
testb $0x8, %bpl
je 0x222a6f
jmp 0x222bfc
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x222bd2
cmpq $-0x8, %rbp
je 0x2233a6
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm0, 0x240(%rsp)
vcmpnleps %xmm12, %xmm0, %xmm0
vtestps %xmm0, %xmm0
movl $0xffffffff, %r14d # imm = 0xFFFFFFFF
je 0x2233b7
movl %ebp, %eax
andl $0xf, %eax
vmovdqa 0x50(%rsp), %xmm1
vmovdqa %xmm1, %xmm0
addq $-0x8, %rax
je 0x223370
movq %r8, %r12
andq $-0x10, %rbp
vpxor %xmm1, %xmm11, %xmm2
vmovaps (%r9), %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps 0x10(%r9), %xmm0
vmovaps %xmm0, 0x140(%rsp)
vmovaps 0x20(%r9), %xmm0
vmovaps %xmm0, 0x130(%rsp)
vmovaps 0x30(%r9), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x50(%r9), %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmovaps 0x60(%r9), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps 0x40(%r9), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x80(%r9), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x90(%r9), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
addq $0xc0, %rbp
xorl %ecx, %ecx
vmovdqa %xmm1, 0x20(%rsp)
xorl %edx, %edx
vmovdqa %xmm2, %xmm0
vmovdqa %xmm2, 0x30(%rsp)
movl (%rbp,%rdx,4), %r8d
cmpq %r14, %r8
je 0x223343
vbroadcastss -0xc0(%rbp,%rdx,4), %xmm4
vmovdqa %xmm0, (%rsp)
vbroadcastss -0xb0(%rbp,%rdx,4), %xmm0
vbroadcastss -0xa0(%rbp,%rdx,4), %xmm3
vbroadcastss -0x90(%rbp,%rdx,4), %xmm1
vbroadcastss -0x80(%rbp,%rdx,4), %xmm2
vbroadcastss -0x70(%rbp,%rdx,4), %xmm5
vbroadcastss -0x30(%rbp,%rdx,4), %xmm9
vbroadcastss -0x20(%rbp,%rdx,4), %xmm6
vbroadcastss -0x10(%rbp,%rdx,4), %xmm7
vmovaps %xmm1, 0xa0(%rsp)
vsubss %xmm1, %xmm4, %xmm1
vmovaps %xmm2, 0x90(%rsp)
vsubss %xmm2, %xmm0, %xmm10
vmovaps %xmm5, 0x80(%rsp)
vsubss %xmm5, %xmm3, %xmm2
vmovaps %xmm6, 0x2a0(%rsp)
vsubss %xmm0, %xmm6, %xmm14
vmovaps %xmm7, 0x290(%rsp)
vsubss %xmm3, %xmm7, %xmm5
vmulss %xmm5, %xmm10, %xmm6
vmulss %xmm2, %xmm14, %xmm7
vsubss %xmm6, %xmm7, %xmm6
vmovaps %xmm6, 0x40(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vsubss %xmm4, %xmm9, %xmm15
vmovaps %xmm2, 0x270(%rsp)
vmulss %xmm2, %xmm15, %xmm6
vmulss %xmm5, %xmm1, %xmm7
vsubss %xmm6, %xmm7, %xmm6
vmovaps %xmm1, 0x280(%rsp)
vmulss %xmm1, %xmm14, %xmm7
vmovaps %xmm10, 0x260(%rsp)
vmulss %xmm15, %xmm10, %xmm9
vsubss %xmm7, %xmm9, %xmm7
vsubps 0x140(%rsp), %xmm0, %xmm2
vsubps 0x130(%rsp), %xmm3, %xmm8
vmovaps 0x120(%rsp), %xmm11
vmulps %xmm11, %xmm8, %xmm9
vmovaps 0x110(%rsp), %xmm0
vmulps %xmm0, %xmm2, %xmm10
vsubps %xmm9, %xmm10, %xmm3
vsubps 0x60(%rsp), %xmm4, %xmm4
vmulps %xmm0, %xmm4, %xmm9
vmovaps 0x100(%rsp), %xmm1
vmulps %xmm1, %xmm8, %xmm10
vsubps %xmm9, %xmm10, %xmm9
vmulps %xmm1, %xmm2, %xmm10
vmulps %xmm4, %xmm11, %xmm12
vsubps %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm6, %xmm6, %xmm13 # xmm13 = xmm6[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm0, %xmm7, %xmm12
vmovaps (%rsp), %xmm0
vmulps %xmm13, %xmm11, %xmm6
vaddps %xmm6, %xmm12, %xmm6
vpermilps $0x0, 0x40(%rsp), %xmm11 # xmm11 = mem[0,0,0,0]
vmulps %xmm1, %xmm11, %xmm12
vaddps %xmm6, %xmm12, %xmm12
vshufps $0x0, %xmm14, %xmm14, %xmm6 # xmm6 = xmm14[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm10, %xmm5
vmulps %xmm6, %xmm9, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm15, %xmm15, %xmm6 # xmm6 = xmm15[0,0,0,0]
vmulps %xmm3, %xmm6, %xmm6
vaddps %xmm5, %xmm6, %xmm6
vbroadcastss 0x1cfe025(%rip), %xmm5 # 0x1f20ec0
vandps %xmm5, %xmm12, %xmm5
vxorps %xmm6, %xmm5, %xmm14
vcmpnltps 0x1cc8b64(%rip), %xmm14, %xmm15 # 0x1eeba10
vtestps %xmm0, %xmm15
jne 0x223074
vtestps %xmm0, %xmm0
je 0x223039
vbroadcastss -0x60(%rbp,%rdx,4), %xmm2
vbroadcastss -0x50(%rbp,%rdx,4), %xmm5
vbroadcastss -0x40(%rbp,%rdx,4), %xmm6
vsubps 0x70(%rsp), %xmm2, %xmm9
vsubps 0x2a0(%rsp), %xmm5, %xmm12
vsubps 0x290(%rsp), %xmm6, %xmm3
vmovaps 0xa0(%rsp), %xmm1
vsubps %xmm2, %xmm1, %xmm8
vmovaps 0x90(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm10
vmovaps 0x80(%rsp), %xmm1
vsubps %xmm6, %xmm1, %xmm15
vmovaps %xmm0, (%rsp)
vmulps %xmm12, %xmm15, %xmm0
vmulps %xmm3, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm11
vmovaps %xmm3, 0x80(%rsp)
vmulps %xmm3, %xmm8, %xmm1
vmulps %xmm9, %xmm15, %xmm3
vsubps %xmm1, %xmm3, %xmm4
vmovaps %xmm9, 0x40(%rsp)
vmulps %xmm9, %xmm10, %xmm3
vmovaps %xmm12, 0xa0(%rsp)
vmulps %xmm12, %xmm8, %xmm9
vsubps %xmm3, %xmm9, %xmm7
vsubps 0x60(%rsp), %xmm2, %xmm2
vsubps 0x140(%rsp), %xmm5, %xmm5
vsubps 0x130(%rsp), %xmm6, %xmm6
vmovaps 0x120(%rsp), %xmm0
vmulps %xmm0, %xmm6, %xmm9
vmovaps 0x110(%rsp), %xmm1
vmulps %xmm1, %xmm5, %xmm12
vsubps %xmm9, %xmm12, %xmm12
vmulps %xmm1, %xmm2, %xmm9
vmovaps 0x100(%rsp), %xmm3
vmulps %xmm3, %xmm6, %xmm13
vsubps %xmm9, %xmm13, %xmm13
vmulps %xmm3, %xmm5, %xmm9
vmulps %xmm0, %xmm2, %xmm14
vsubps %xmm9, %xmm14, %xmm14
vmovaps %xmm7, 0x90(%rsp)
vmulps %xmm1, %xmm7, %xmm9
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm9, %xmm0
vmulps %xmm3, %xmm11, %xmm9
vaddps %xmm0, %xmm9, %xmm9
vmulps %xmm14, %xmm15, %xmm0
vmulps %xmm13, %xmm10, %xmm10
vaddps %xmm0, %xmm10, %xmm0
vbroadcastss 0x1cfded7(%rip), %xmm10 # 0x1f20ec0
vandps %xmm10, %xmm9, %xmm10
vmulps %xmm12, %xmm8, %xmm8
vaddps %xmm0, %xmm8, %xmm0
vxorps %xmm0, %xmm10, %xmm8
vmovaps (%rsp), %xmm0
vcmpnltps 0x1cc8a07(%rip), %xmm8, %xmm15 # 0x1eeba10
vtestps %xmm0, %xmm15
jne 0x223110
vbroadcastss 0x1cc8a03(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x20(%rsp), %xmm1
vmovdqa 0x30(%rsp), %xmm2
vtestps %xmm0, %xmm0
setne %dil
jmp 0x223055
xorl %edi, %edi
vbroadcastss 0x1cc89dc(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x20(%rsp), %xmm1
vmovdqa 0x30(%rsp), %xmm2
testb %dil, %dil
je 0x223343
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x222cf7
jmp 0x223343
vmovaps %xmm3, %xmm1
vmovaps %xmm13, 0x40(%rsp)
vmovaps %xmm2, 0x230(%rsp)
vpermilps $0x0, 0x280(%rsp), %xmm3 # xmm3 = mem[0,0,0,0]
vpermilps $0x0, 0x260(%rsp), %xmm6 # xmm6 = mem[0,0,0,0]
vpermilps $0x0, 0x270(%rsp), %xmm13 # xmm13 = mem[0,0,0,0]
vandps %xmm0, %xmm15, %xmm2
vmulps %xmm10, %xmm13, %xmm10
vmulps %xmm6, %xmm9, %xmm6
vaddps %xmm6, %xmm10, %xmm6
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vxorps %xmm1, %xmm5, %xmm9
vcmpnltps 0x1cc8942(%rip), %xmm9, %xmm10 # 0x1eeba10
vtestps %xmm2, %xmm10
je 0x222eb7
vbroadcastss 0x1cfdde2(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm12, %xmm1
vandps %xmm2, %xmm10, %xmm2
vsubps %xmm14, %xmm1, %xmm6
vcmpnltps %xmm9, %xmm6, %xmm9
vtestps %xmm2, %xmm9
jne 0x2231c8
movl $0xffffffff, %r14d # imm = 0xFFFFFFFF
vmovaps (%rsp), %xmm0
jmp 0x222eb7
vmovaps %xmm11, %xmm1
vmovaps 0x90(%rsp), %xmm3
vmovaps %xmm4, 0x70(%rsp)
vandps %xmm0, %xmm15, %xmm15
vmulps 0x80(%rsp), %xmm14, %xmm0
vmulps 0xa0(%rsp), %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vmulps 0x40(%rsp), %xmm12, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vxorps %xmm0, %xmm10, %xmm7
vcmpnltps 0x1cc88bc(%rip), %xmm7, %xmm11 # 0x1eeba10
vtestps %xmm15, %xmm11
jne 0x22317f
vbroadcastss 0x1cc88bc(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x20(%rsp), %xmm1
vmovdqa 0x30(%rsp), %xmm2
vmovaps (%rsp), %xmm0
jmp 0x22302e
movq %r11, %r14
movq %rsi, %r11
vbroadcastss 0x1cfdd36(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm4
vandps %xmm15, %xmm11, %xmm11
vsubps %xmm8, %xmm4, %xmm0
vcmpnltps %xmm7, %xmm0, %xmm7
vtestps %xmm11, %xmm7
jne 0x22327e
movq %r11, %rsi
movq %r14, %r11
vbroadcastss 0x1cc8865(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
movl $0xffffffff, %r14d # imm = 0xFFFFFFFF
jmp 0x223169
vandps %xmm2, %xmm9, %xmm2
vmulps %xmm7, %xmm8, %xmm3
vmovaps 0x230(%rsp), %xmm0
vmulps 0x40(%rsp), %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmulps %xmm4, %xmm11, %xmm3
vaddps %xmm0, %xmm3, %xmm0
vxorps %xmm0, %xmm5, %xmm0
vmulps 0xf0(%rsp), %xmm1, %xmm3
vcmpltps %xmm0, %xmm3, %xmm3
vmulps 0xe0(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm0, %xmm0
vtestps %xmm2, %xmm0
je 0x223100
vandps %xmm2, %xmm0, %xmm0
vcmpneqps 0x1cc87e9(%rip), %xmm12, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
je 0x223100
vandps %xmm0, %xmm1, %xmm0
movq (%r10), %rdi
movq 0x1e8(%rdi), %rdi
movq (%rdi,%r8,8), %rdi
vbroadcastss 0x34(%rdi), %xmm1
vandps 0xd0(%rsp), %xmm1, %xmm1
vpcmpeqd 0x1cc87b5(%rip), %xmm1, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
jb 0x223100
vandnps %xmm0, %xmm1, %xmm0
vmovaps (%rsp), %xmm1
vandnps %xmm1, %xmm0, %xmm0
movl $0xffffffff, %r14d # imm = 0xFFFFFFFF
jmp 0x222eb7
vmovaps %xmm1, %xmm12
vandps %xmm7, %xmm11, %xmm7
vmulps %xmm6, %xmm3, %xmm0
vmulps 0x70(%rsp), %xmm5, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm2, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm10, %xmm0
vmulps 0xf0(%rsp), %xmm4, %xmm1
vcmpltps %xmm0, %xmm1, %xmm1
vmulps 0xe0(%rsp), %xmm4, %xmm2
vcmpleps %xmm2, %xmm0, %xmm0
vandps %xmm1, %xmm0, %xmm0
vtestps %xmm7, %xmm0
je 0x2231ac
vandps %xmm7, %xmm0, %xmm0
vcmpneqps 0x1cc8738(%rip), %xmm9, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
je 0x2231ac
vandps %xmm0, %xmm1, %xmm0
movq (%r10), %rdi
movq 0x1e8(%rdi), %rdi
movq (%rdi,%r8,8), %rsi
vbroadcastss 0x34(%rsi), %xmm1
vandps 0xd0(%rsp), %xmm1, %xmm1
vpcmpeqd 0x1cc8704(%rip), %xmm1, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
jb 0x2231ac
vandnps %xmm0, %xmm1, %xmm0
vmovaps (%rsp), %xmm1
vandnps %xmm1, %xmm0, %xmm0
movq %r11, %rsi
movq %r14, %r11
vbroadcastss 0x1cc86ed(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
movl $0xffffffff, %r14d # imm = 0xFFFFFFFF
jmp 0x223022
vmovaps %xmm0, %xmm3
vandps %xmm2, %xmm0, %xmm0
vtestps %xmm2, %xmm3
je 0x223369
incq %rcx
addq $0xe0, %rbp
vmovaps %xmm0, %xmm2
cmpq %rax, %rcx
jb 0x222ceb
vxorps %xmm0, %xmm11, %xmm0
movq %r12, %r8
vpor %xmm0, %xmm1, %xmm0
vmovdqa %xmm0, 0x50(%rsp)
vtestps %xmm11, %xmm0
jb 0x2233a6
vbroadcastss 0x1cc97fa(%rip), %xmm1 # 0x1eecb84
vmovaps 0x240(%rsp), %xmm2
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vmovaps %xmm0, 0x220(%rsp)
xorl %eax, %eax
jmp 0x2233a9
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x222a14
jmp 0x22349b
pushq $0x2
jmp 0x2233a8
vmovaps %xmm12, 0x60(%rsp)
movq %r8, 0x40(%rsp)
movq %r10, 0xc8(%rsp)
movq %r9, 0xc0(%rsp)
movq %rsi, 0xb8(%rsp)
movq %r11, 0xb0(%rsp)
bsfq %r12, %rcx
movq %rsi, %rdi
movq %r11, %rsi
movq %rbp, %rdx
movq %rcx, (%rsp)
leaq 0x1f(%rsp), %r8
pushq %r10
leaq 0x158(%rsp), %rax
pushq %rax
callq 0x25bd0c
popq %rcx
popq %rdx
testb %al, %al
je 0x22341b
movq (%rsp), %rax
orl $-0x1, 0x50(%rsp,%rax,4)
leaq -0x1(%r12), %rax
andq %rax, %r12
movq 0xc8(%rsp), %r10
movq 0xc0(%rsp), %r9
movq 0xb8(%rsp), %rsi
movq 0xb0(%rsp), %r11
jne 0x2233e6
vmovaps 0x50(%rsp), %xmm0
vpcmpeqd %xmm11, %xmm11, %xmm11
vtestps %xmm11, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1cc85bf(%rip), %xmm10 # 0x1eeba20
movq 0x40(%rsp), %r8
vmovaps 0x60(%rsp), %xmm12
jb 0x222a58
vmovaps 0x220(%rsp), %xmm1
vbroadcastss 0x1cc9700(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x220(%rsp)
pushq $0x2
popq %rax
jmp 0x222a58
vmovaps 0x250(%rsp), %xmm0
vandps 0x50(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc96d1(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r8)
addq $0x1958, %rsp # imm = 0x1958
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r9, %rdx
movq %r10, %rcx
jmp 0x25ad06
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiIntersectorKMoeller<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x2236e0
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x223513
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x2236f2
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x2236e0
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cfd94c(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1ccda63(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm2, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cc9155(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d3734d(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d3136f(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d3732e(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d37316(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d37311(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc8393(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cc94e0(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x25dade
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x2236b2
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x25c3c8
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiIntersectorKMoeller<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a08, %rsp # imm = 0x1A08
movq %rsi, %r9
movq (%rsi), %rsi
cmpq $0x8, 0x70(%rsi)
je 0x22494f
movq %rcx, %r10
movq %rdx, %r14
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x223751
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x224964
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r14), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x22494f
vandps %xmm3, %xmm4, %xmm8
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x230(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x240(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x250(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x260(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x270(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x280(%rsp)
vbroadcastss 0x1cfd6e8(%rip), %xmm9 # 0x1f20ec4
vandps %xmm3, %xmm9, %xmm6
vbroadcastss 0x1ccd7ff(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm3, %xmm3
vandps %xmm4, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm4, %xmm4
vandps %xmm5, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm5, %xmm5
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1cc8ef1(%rip), %xmm7 # 0x1eec714
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vrcpps %xmm4, %xmm6
vmulps %xmm6, %xmm4, %xmm4
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm3, 0x290(%rsp)
vmovaps %xmm4, 0x2a0(%rsp)
vmovaps %xmm5, 0x2b0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d370e4(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x2c0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d31105(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d370c4(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d370b4(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d370af(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x2d0(%rsp)
vmovaps %xmm4, 0x2e0(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc8130(%rip), %xmm10 # 0x1eeba20
vblendvps %xmm8, %xmm3, %xmm10, %xmm1
vmovaps %xmm1, 0x2f0(%rsp)
vbroadcastss 0x1cc927c(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm8, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x300(%rsp)
vmovaps %xmm8, 0x330(%rsp)
vxorps %xmm0, %xmm8, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
testq %rax, %rax
je 0x223946
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r11d, %r11d
cmpb $0x1, %al
adcq $0x2, %r11
jmp 0x22394a
pushq $0x3
popq %r11
leaq 0x80(%r14), %rax
movq %rax, 0xa8(%rsp)
leaq 0x370(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb10(%rsp), %r12
vmovaps %xmm10, -0x20(%r12)
movq 0x70(%rsi), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f2c5f2(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x320(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x310(%rsp)
vpcmpeqd %xmm11, %xmm11, %xmm11
addq $-0x10, %r12
movq -0x8(%r15), %r13
addq $-0x8, %r15
cmpq $-0x8, %r13
je 0x22483f
vmovaps (%r12), %xmm12
vcmpltps 0x300(%rsp), %xmm12, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x224850
movzbl %al, %ebp
popcntl %ebp, %ebx
xorl %eax, %eax
cmpq %r11, %rbx
jbe 0x224854
cmpq %r11, %rbx
jbe 0x224842
testb $0x8, %r13b
pushq $0x8
popq %r8
jne 0x223b9d
movq %r13, %rax
movq %r13, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %r8, %r13
vmovaps %xmm10, %xmm12
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x223b34
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x230(%rsp), %xmm1
vmovaps 0x240(%rsp), %xmm2
vmovaps 0x250(%rsp), %xmm3
vmovaps 0x290(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x2a0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x2b0(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vpminsd %xmm1, %xmm5, %xmm0
vpminsd %xmm2, %xmm7, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpminsd %xmm3, %xmm9, %xmm4
vpmaxsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm1, %xmm5, %xmm1
vpmaxsd %xmm2, %xmm7, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd %xmm3, %xmm9, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd 0x2f0(%rsp), %xmm0, %xmm2
vpminsd 0x300(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x223b34
vblendvps %xmm1, %xmm0, %xmm10, %xmm0
cmpq $0x8, %r13
je 0x223b2d
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%r12)
addq $0x10, %r12
vmovaps %xmm0, %xmm12
movq %rdi, %r13
cmpq $0x8, %rdi
je 0x223b4b
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x223a20
xorl %eax, %eax
cmpq $0x8, %r13
je 0x223b96
vmovaps 0x300(%rsp), %xmm0
vcmpnleps %xmm12, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r11
jae 0x223b85
testb %cl, %cl
je 0x224842
testb $0x8, %r13b
je 0x223a0c
jmp 0x223b9d
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm12, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x223b71
cmpq $-0x8, %r13
je 0x22483f
movq %r9, 0x58(%rsp)
vmovaps 0x300(%rsp), %xmm0
vcmpnleps %xmm12, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22491a
movl %r13d, %r9d
andl $0xf, %r9d
vmovdqa 0xe0(%rsp), %xmm0
addq $-0x8, %r9
je 0x2247fc
andq $-0x10, %r13
vpxor %xmm0, %xmm11, %xmm1
addq $0x50, %r13
xorl %ecx, %ecx
movq (%r10), %rax
movq %rax, 0x168(%rsp)
xorl %ebx, %ebx
vmovdqa %xmm1, %xmm0
vmovdqa %xmm1, 0x60(%rsp)
movl (%r13,%rbx,4), %eax
cmpl $-0x1, %eax
je 0x2247d5
movq %rcx, %rbp
movl -0x10(%r13,%rbx,4), %ecx
movq 0x168(%rsp), %rdx
movq 0x228(%rdx), %rdx
movq (%rdx,%rcx,8), %rdx
movl -0x50(%r13,%rbx,4), %r8d
movl -0x40(%r13,%rbx,4), %edi
vmovdqa %xmm0, 0x10(%rsp)
vbroadcastss (%rdx,%r8,4), %xmm0
vbroadcastss 0x4(%rdx,%r8,4), %xmm1
vbroadcastss 0x8(%rdx,%r8,4), %xmm2
vbroadcastss (%rdx,%rdi,4), %xmm3
vbroadcastss 0x4(%rdx,%rdi,4), %xmm4
vbroadcastss 0x8(%rdx,%rdi,4), %xmm5
movl -0x20(%r13,%rbx,4), %edi
vbroadcastss (%rdx,%rdi,4), %xmm9
vbroadcastss 0x4(%rdx,%rdi,4), %xmm10
vbroadcastss 0x8(%rdx,%rdi,4), %xmm11
vmovaps %xmm3, 0x150(%rsp)
vsubss %xmm3, %xmm0, %xmm6
vmovaps %xmm4, 0x80(%rsp)
vsubss %xmm4, %xmm1, %xmm7
vmovaps %xmm5, 0xd0(%rsp)
vsubss %xmm5, %xmm2, %xmm8
vmovaps %xmm10, 0x350(%rsp)
vsubss %xmm1, %xmm10, %xmm3
vmovaps %xmm11, 0x340(%rsp)
vsubss %xmm2, %xmm11, %xmm13
vmulss %xmm7, %xmm13, %xmm4
vmulss %xmm3, %xmm8, %xmm5
vsubss %xmm4, %xmm5, %xmm4
vmovaps %xmm4, 0x70(%rsp)
vmovaps %xmm9, 0x140(%rsp)
vsubss %xmm0, %xmm9, %xmm12
vmovaps %xmm8, 0xf0(%rsp)
vmulss %xmm12, %xmm8, %xmm4
vmulss %xmm6, %xmm13, %xmm5
vsubss %xmm4, %xmm5, %xmm11
vmovaps %xmm6, 0x20(%rsp)
vmulss %xmm3, %xmm6, %xmm4
vmovaps %xmm7, 0x40(%rsp)
vmulss %xmm7, %xmm12, %xmm5
vsubss %xmm4, %xmm5, %xmm4
vsubps (%r14), %xmm0, %xmm6
vsubps 0x10(%r14), %xmm1, %xmm7
vsubps 0x20(%r14), %xmm2, %xmm10
vmovaps 0x50(%r14), %xmm2
vmovaps 0x60(%r14), %xmm1
vmulps %xmm2, %xmm10, %xmm14
vmulps %xmm1, %xmm7, %xmm15
vsubps %xmm14, %xmm15, %xmm8
vmovaps 0x40(%r14), %xmm0
vmulps %xmm1, %xmm6, %xmm14
vmulps %xmm0, %xmm10, %xmm15
vsubps %xmm14, %xmm15, %xmm9
vmulps %xmm0, %xmm7, %xmm15
vmulps %xmm2, %xmm6, %xmm14
vsubps %xmm15, %xmm14, %xmm15
vshufps $0x0, %xmm11, %xmm11, %xmm5 # xmm5 = xmm11[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm11 # xmm11 = xmm4[0,0,0,0]
vmulps %xmm1, %xmm11, %xmm1
vmulps %xmm2, %xmm5, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vpermilps $0x0, 0x70(%rsp), %xmm2 # xmm2 = mem[0,0,0,0]
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm1, %xmm0, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm0 # xmm0 = xmm3[0,0,0,0]
vshufps $0x0, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[0,0,0,0]
vmulps %xmm1, %xmm15, %xmm1
vmovaps %xmm9, 0x130(%rsp)
vmulps %xmm0, %xmm9, %xmm0
vmovaps %xmm4, %xmm9
vaddps %xmm1, %xmm0, %xmm0
vshufps $0x0, %xmm12, %xmm12, %xmm1 # xmm1 = xmm12[0,0,0,0]
vmovaps %xmm8, %xmm3
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
movl -0x30(%r13,%rbx,4), %edi
vbroadcastss 0x1cfd0fe(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm4, %xmm12
vxorps %xmm0, %xmm12, %xmm8
vmovaps 0x10(%rsp), %xmm0
vcmpnltps 0x1cc7c37(%rip), %xmm8, %xmm13 # 0x1eeba10
vtestps %xmm0, %xmm13
vbroadcastss (%rdx,%rdi,4), %xmm14
vbroadcastss 0x4(%rdx,%rdi,4), %xmm1
vbroadcastss 0x8(%rdx,%rdi,4), %xmm4
vmovaps %xmm4, 0x70(%rsp)
jne 0x223faa
movq %rbp, %rcx
vtestps %xmm0, %xmm0
je 0x223f76
vsubps 0x140(%rsp), %xmm14, %xmm5
vsubps 0x350(%rsp), %xmm1, %xmm4
vmovaps 0x70(%rsp), %xmm8
vsubps 0x340(%rsp), %xmm8, %xmm10
vmovaps %xmm1, %xmm7
vmovaps 0x150(%rsp), %xmm1
vsubps %xmm14, %xmm1, %xmm3
vmovaps 0x80(%rsp), %xmm1
vsubps %xmm7, %xmm1, %xmm11
vmovaps 0xd0(%rsp), %xmm1
vsubps %xmm8, %xmm1, %xmm15
vmovaps %xmm0, 0x10(%rsp)
vmulps %xmm4, %xmm15, %xmm0
vmulps %xmm10, %xmm11, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x150(%rsp)
vmulps %xmm3, %xmm10, %xmm1
vmulps %xmm5, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm6
vmovaps %xmm5, 0x80(%rsp)
vmulps %xmm5, %xmm11, %xmm2
vmovaps %xmm4, 0xd0(%rsp)
vmulps %xmm4, %xmm3, %xmm5
vsubps %xmm2, %xmm5, %xmm5
vsubps (%r14), %xmm14, %xmm4
vsubps 0x10(%r14), %xmm7, %xmm7
vsubps 0x20(%r14), %xmm8, %xmm8
vmovaps 0x50(%r14), %xmm9
vmovaps 0x60(%r14), %xmm0
vmulps %xmm9, %xmm8, %xmm12
vmulps %xmm0, %xmm7, %xmm13
vsubps %xmm12, %xmm13, %xmm12
vmovaps 0x40(%r14), %xmm1
vmulps %xmm0, %xmm4, %xmm13
vmulps %xmm1, %xmm8, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vmulps %xmm1, %xmm7, %xmm14
vmulps %xmm4, %xmm9, %xmm2
vsubps %xmm14, %xmm2, %xmm14
vmovaps %xmm5, 0x140(%rsp)
vmulps %xmm0, %xmm5, %xmm0
vmovaps %xmm6, 0x70(%rsp)
vmulps %xmm6, %xmm9, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmovaps 0x150(%rsp), %xmm2
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm9
vmulps %xmm14, %xmm15, %xmm0
vmulps %xmm13, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cfcf92(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm9, %xmm11
vmulps %xmm3, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm3
vmovaps 0x10(%rsp), %xmm0
vcmpnltps 0x1cc7ac3(%rip), %xmm3, %xmm15 # 0x1eeba10
vtestps %xmm0, %xmm15
jne 0x22404c
vbroadcastss 0x1cc7abf(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x60(%rsp), %xmm1
vtestps %xmm0, %xmm0
setne %al
jmp 0x223f8c
xorl %eax, %eax
vbroadcastss 0x1cc7a9f(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x60(%rsp), %xmm1
testb %al, %al
je 0x2247d5
leaq 0x1(%rbx), %rax
cmpq $0x3, %rbx
movq %rax, %rbx
jb 0x223c05
jmp 0x2247d5
vmovaps %xmm8, 0x190(%rsp)
vmovaps %xmm2, 0x170(%rsp)
vmovaps %xmm6, %xmm4
vmovaps %xmm5, 0x180(%rsp)
vmovaps %xmm1, 0xb0(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovaps %xmm0, %xmm14
vpermilps $0x0, 0x20(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
vpermilps $0x0, 0x40(%rsp), %xmm1 # xmm1 = mem[0,0,0,0]
vpermilps $0x0, 0xf0(%rsp), %xmm6 # xmm6 = mem[0,0,0,0]
vandps %xmm14, %xmm13, %xmm8
vmulps %xmm6, %xmm15, %xmm6
vmulps 0x130(%rsp), %xmm1, %xmm1
vaddps %xmm6, %xmm1, %xmm1
vmulps %xmm3, %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vxorps %xmm0, %xmm12, %xmm6
vcmpnltps 0x1cc79eb(%rip), %xmm6, %xmm1 # 0x1eeba10
vtestps %xmm8, %xmm1
jne 0x2240a6
movq %rbp, %rcx
vmovdqa 0x10(%rsp), %xmm0
vmovaps 0xc0(%rsp), %xmm14
vmovaps 0xb0(%rsp), %xmm1
jmp 0x223e01
vmovaps %xmm4, %xmm5
vandps %xmm0, %xmm15, %xmm15
vmulps %xmm14, %xmm10, %xmm0
vmulps 0xd0(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x80(%rsp), %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm4
vcmpnltps 0x1cc7990(%rip), %xmm4, %xmm10 # 0x1eeba10
vtestps %xmm15, %xmm10
jne 0x2240f7
vbroadcastss 0x1cc7990(%rip), %xmm10 # 0x1eeba20
vpcmpeqd %xmm11, %xmm11, %xmm11
vmovdqa 0x60(%rsp), %xmm1
vmovdqa 0x10(%rsp), %xmm0
jmp 0x223f6c
movq %r9, 0x40(%rsp)
movq %r11, 0x20(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
vbroadcastss 0x1cfce02(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm13
vandps %xmm1, %xmm8, %xmm8
vsubps 0x190(%rsp), %xmm13, %xmm0
vcmpnltps %xmm6, %xmm0, %xmm1
vtestps %xmm8, %xmm1
jne 0x224159
movq (%rsp), %r10
movq 0x8(%rsp), %rsi
movq 0x20(%rsp), %r11
movq 0x40(%rsp), %r9
jmp 0x22402c
movq %r9, 0x40(%rsp)
movq %r11, 0x20(%rsp)
movq %rsi, 0x8(%rsp)
movq %r10, (%rsp)
vbroadcastss 0x1cfcdb1(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm6
vandps %xmm15, %xmm10, %xmm10
vsubps %xmm3, %xmm6, %xmm0
vcmpnltps %xmm4, %xmm0, %xmm12
vtestps %xmm10, %xmm12
jne 0x22425c
movq (%rsp), %r10
movq 0x8(%rsp), %rsi
vbroadcastss 0x1cc78de(%rip), %xmm10 # 0x1eeba20
movq 0x20(%rsp), %r11
vpcmpeqd %xmm11, %xmm11, %xmm11
movq 0x40(%rsp), %r9
movq %rbp, %rcx
jmp 0x224095
vandps %xmm1, %xmm8, %xmm8
vmulps %xmm10, %xmm11, %xmm0
vmulps 0x180(%rsp), %xmm7, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x170(%rsp), %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm12, %xmm0
vmulps 0x30(%r14), %xmm13, %xmm1
vcmpltps %xmm0, %xmm1, %xmm1
vmovaps 0x80(%r14), %xmm5
vmovaps %xmm5, 0x130(%rsp)
vmulps %xmm5, %xmm13, %xmm5
vcmpleps %xmm5, %xmm0, %xmm5
vandps %xmm1, %xmm5, %xmm5
vtestps %xmm8, %xmm5
je 0x2240df
vandps %xmm5, %xmm8, %xmm5
vcmpneqps 0x1cc784e(%rip), %xmm9, %xmm8 # 0x1eeba10
vtestps %xmm5, %xmm8
je 0x2240df
vandps %xmm5, %xmm8, %xmm5
movq (%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rdx
movq %rdx, 0xf0(%rsp)
vbroadcastss 0x34(%rdx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd 0x1cc780e(%rip), %xmm1, %xmm8 # 0x1eeba10
vtestps %xmm5, %xmm8
jb 0x2240df
vandnps %xmm5, %xmm8, %xmm5
movq (%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x224378
movq 0xf0(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
jne 0x224378
vmovdqa 0x10(%rsp), %xmm0
vpandn %xmm0, %xmm5, %xmm0
movq (%rsp), %r10
movq 0x8(%rsp), %rsi
movq 0x20(%rsp), %r11
movq 0x40(%rsp), %r9
movq %rbp, %rcx
jmp 0x224035
vmovaps %xmm2, %xmm13
vandps %xmm10, %xmm12, %xmm10
vmulps 0x140(%rsp), %xmm8, %xmm0
vmulps 0x70(%rsp), %xmm7, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm5, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm5
vmulps 0x30(%r14), %xmm6, %xmm0
vcmpltps %xmm5, %xmm0, %xmm0
vmovaps 0x80(%r14), %xmm1
vmovaps %xmm1, 0xd0(%rsp)
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm5, %xmm1
vandps %xmm0, %xmm1, %xmm7
vtestps %xmm10, %xmm7
je 0x224130
vandps %xmm7, %xmm10, %xmm7
vcmpneqps 0x1cc774a(%rip), %xmm9, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
je 0x224130
vandps %xmm7, %xmm8, %xmm7
movq (%rsp), %rax
movq (%rax), %rcx
movl -0x10(%r13,%rbx,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rcx
movq %rcx, 0x80(%rsp)
vbroadcastss 0x34(%rcx), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd 0x1cc7705(%rip), %xmm0, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
jb 0x224130
movl (%r13,%rbx,4), %ecx
vandnps %xmm7, %xmm8, %xmm7
movq (%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x224574
movq 0x80(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
jne 0x224574
vmovdqa 0x10(%rsp), %xmm0
vpandn %xmm0, %xmm7, %xmm0
movq (%rsp), %r10
movq 0x8(%rsp), %rsi
vbroadcastss 0x1cc76bf(%rip), %xmm10 # 0x1eeba20
movq 0x20(%rsp), %r11
vpcmpeqd %xmm11, %xmm11, %xmm11
movq 0x40(%rsp), %r9
movq %rbp, %rcx
jmp 0x223f66
vrcpps %xmm13, %xmm1
vmulps %xmm1, %xmm13, %xmm7
vbroadcastss 0x1cc838a(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps 0x190(%rsp), %xmm1, %xmm3
vminps %xmm8, %xmm3, %xmm3
vmulps %xmm6, %xmm1, %xmm6
vminps %xmm8, %xmm6, %xmm6
vsubps %xmm3, %xmm8, %xmm7
vsubps %xmm6, %xmm8, %xmm8
vmovaps 0x320(%rsp), %xmm9
vblendvps %xmm9, %xmm7, %xmm3, %xmm3
vblendvps %xmm9, %xmm8, %xmm6, %xmm6
movq (%rsp), %rsi
movq 0x8(%rsi), %rdx
vmovd %ecx, %xmm7
vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmovd %eax, %xmm8
vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmovaps 0x170(%rsp), %xmm2
vmovaps %xmm2, 0x1a0(%rsp)
vmovaps 0x180(%rsp), %xmm2
vmovaps %xmm2, 0x1b0(%rsp)
vmovaps %xmm11, 0x1c0(%rsp)
vmovaps %xmm3, 0x1d0(%rsp)
vmovaps %xmm6, 0x1e0(%rsp)
vmovdqa %xmm8, 0x1f0(%rsp)
vmovdqa %xmm7, 0x200(%rsp)
vmulps %xmm0, %xmm1, %xmm0
leaq 0x210(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x210(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmovaps 0x130(%rsp), %xmm1
vblendvps %xmm5, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm5, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x100(%rsp)
movq 0xf0(%rsp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0x108(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0x110(%rsp)
movq %r14, 0x118(%rsp)
leaq 0x1a0(%rsp), %rax
movq %rax, 0x120(%rsp)
movl $0x4, 0x128(%rsp)
movq 0x48(%rcx), %rax
testq %rax, %rax
je 0x2244f2
leaq 0x100(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x90(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x224761
movq (%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x224537
testb $0x2, (%rcx)
jne 0x22452a
movq 0xf0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x224537
leaq 0x100(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x90(%rsp), %xmm0, %xmm1
vpxor 0x1cc78d4(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x118(%rsp), %rax
vbroadcastss 0x1cc8627(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x224771
vrcpps %xmm6, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vbroadcastss 0x1cc818f(%rip), %xmm6 # 0x1eec714
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm3, %xmm0, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps %xmm4, %xmm0, %xmm2
vminps %xmm6, %xmm2, %xmm2
vsubps %xmm1, %xmm6, %xmm3
vsubps %xmm2, %xmm6, %xmm4
vmovaps 0x310(%rsp), %xmm6
vblendvps %xmm6, %xmm3, %xmm1, %xmm1
vblendvps %xmm6, %xmm4, %xmm2, %xmm2
movq (%rsp), %rsi
movq 0x8(%rsi), %rdx
vmovd %eax, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %ecx, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovaps %xmm13, 0x1a0(%rsp)
vmovaps 0x70(%rsp), %xmm6
vmovaps %xmm6, 0x1b0(%rsp)
vmovaps 0x140(%rsp), %xmm6
vmovaps %xmm6, 0x1c0(%rsp)
vmovaps %xmm1, 0x1d0(%rsp)
vmovaps %xmm2, 0x1e0(%rsp)
vmovdqa %xmm4, 0x1f0(%rsp)
vmovdqa %xmm3, 0x200(%rsp)
vmulps %xmm5, %xmm0, %xmm0
leaq 0x210(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x210(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x220(%rsp)
vmovaps 0xd0(%rsp), %xmm1
vblendvps %xmm7, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm7, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x100(%rsp)
movq 0x80(%rsp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0x108(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0x110(%rsp)
movq %r14, 0x118(%rsp)
leaq 0x1a0(%rsp), %rax
movq %rax, 0x120(%rsp)
movl $0x4, 0x128(%rsp)
movq 0x48(%rcx), %rax
testq %rax, %rax
je 0x2246e2
leaq 0x100(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x90(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x22479b
movq (%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x224727
testb $0x2, (%rcx)
jne 0x22471a
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x224727
leaq 0x100(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x90(%rsp), %xmm0, %xmm1
vpxor 0x1cc76e4(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x118(%rsp), %rax
vbroadcastss 0x1cc8437(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x2247ab
vpcmpeqd 0x1cc72a7(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc76af(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm5
movq 0xa8(%rsp), %rax
vmovaps 0x130(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x224237
vpcmpeqd 0x1cc726d(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc7675(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm7
movq 0xa8(%rsp), %rax
vmovaps 0xd0(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x224345
vmovaps %xmm0, %xmm2
vpand %xmm1, %xmm0, %xmm0
vtestps %xmm1, %xmm2
je 0x2247f8
incq %rcx
addq $0x60, %r13
vmovdqa %xmm0, %xmm1
cmpq %r9, %rcx
jb 0x223bee
vpxor %xmm0, %xmm11, %xmm0
vpor 0xe0(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
vtestps %xmm11, %xmm0
movq 0x58(%rsp), %r9
jb 0x22483f
vmovaps 0x300(%rsp), %xmm1
vbroadcastss 0x1cc8358(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x300(%rsp)
xorl %eax, %eax
jmp 0x224842
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x2239b1
jmp 0x224927
pushq $0x2
jmp 0x224841
vmovaps %xmm12, 0x70(%rsp)
movq %r11, 0x20(%rsp)
movq %r10, (%rsp)
movq %r9, 0x58(%rsp)
movq %rsi, 0x8(%rsp)
bsfq %rbp, %rcx
movq %r9, %rdi
movq %r13, %rdx
movq %rcx, 0x10(%rsp)
leaq 0x3f(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x238(%rsp), %rax
pushq %rax
vzeroupper
callq 0x25fe68
popq %rcx
popq %rdx
testb %al, %al
je 0x2248aa
movq 0x10(%rsp), %rax
orl $-0x1, 0xe0(%rsp,%rax,4)
leaq -0x1(%rbp), %rax
andq %rax, %rbp
movq (%rsp), %r10
movq 0x58(%rsp), %r9
movq 0x8(%rsp), %rsi
jne 0x22486d
vmovaps 0xe0(%rsp), %xmm0
vpcmpeqd %xmm11, %xmm11, %xmm11
vtestps %xmm11, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1cc7140(%rip), %xmm10 # 0x1eeba20
movq 0x20(%rsp), %r11
vmovaps 0x70(%rsp), %xmm12
jb 0x2239f5
vmovaps 0x300(%rsp), %xmm1
vbroadcastss 0x1cc8281(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x300(%rsp)
pushq $0x2
popq %rax
jmp 0x2239f5
pushq $0x2
popq %rax
movq 0x58(%rsp), %r9
jmp 0x224842
vmovaps 0x330(%rsp), %xmm0
vandps 0xe0(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc8242(%rip), %xmm1 # 0x1eecb84
movq 0xa8(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x1a08, %rsp # imm = 0x1A08
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r9, %rsi
movq %r14, %rdx
movq %r10, %rcx
addq $0x1a08, %rsp # imm = 0x1A08
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x25e8fe
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKPluecker<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x224b67
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x2249c1
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x224b79
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x224b67
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cfc49e(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1ccc5b5(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cc7cd3(%rip), %xmm7 # 0x1eec714
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm2, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1cfc4fc(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vblendvps %xmm8, %xmm6, %xmm2, %xmm2
vdivps %xmm3, %xmm7, %xmm3
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d35ec6(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d2fee8(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d35ea7(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d35e8f(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d35e8a(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc6f0c(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cc8059(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x2624ac
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x224b39
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2609ee
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMvIntersectorKPluecker<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x226215
movq %rcx, %r10
movq %rdx, %r9
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x224bc3
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x226219
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r9), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x226215
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a88, %rsp # imm = 0x1A88
vandps %xmm3, %xmm4, %xmm10
vmovaps (%r9), %xmm3
vmovaps %xmm3, 0x280(%rsp)
vmovaps 0x10(%r9), %xmm3
vmovaps %xmm3, 0x290(%rsp)
vmovaps 0x20(%r9), %xmm3
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps 0x40(%r9), %xmm3
vmovaps %xmm3, 0x2b0(%rsp)
vmovaps 0x50(%r9), %xmm4
vmovaps %xmm4, 0x2c0(%rsp)
vmovaps 0x60(%r9), %xmm5
vmovaps %xmm5, 0x2d0(%rsp)
vbroadcastss 0x1cfc265(%rip), %xmm8 # 0x1f20ec4
vandps %xmm3, %xmm8, %xmm6
vbroadcastss 0x1ccc37c(%rip), %xmm9 # 0x1ef0fe8
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cc7a99(%rip), %xmm11 # 0x1eec714
vdivps %xmm3, %xmm11, %xmm3
vandps %xmm4, %xmm8, %xmm7
vcmpltps %xmm9, %xmm7, %xmm7
vdivps %xmm4, %xmm11, %xmm4
vandps %xmm5, %xmm8, %xmm8
vcmpltps %xmm9, %xmm8, %xmm8
vbroadcastss 0x1cfc2c0(%rip), %xmm9 # 0x1f20f60
vblendvps %xmm6, %xmm9, %xmm3, %xmm3
vblendvps %xmm7, %xmm9, %xmm4, %xmm4
vdivps %xmm5, %xmm11, %xmm5
vblendvps %xmm8, %xmm9, %xmm5, %xmm5
vmovaps %xmm3, 0x2e0(%rsp)
vmovaps %xmm4, 0x2f0(%rsp)
vmovaps %xmm5, 0x300(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d35c85(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x310(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d2fca6(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d35c65(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d35c55(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d35c50(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x320(%rsp)
vmovaps %xmm4, 0x330(%rsp)
vmovaps 0x30(%r9), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc6cd1(%rip), %xmm11 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm11, %xmm1
vmovaps %xmm1, 0x340(%rsp)
vbroadcastss 0x1cc7e1d(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm10, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x350(%rsp)
vmovaps %xmm10, 0x380(%rsp)
vxorps %xmm0, %xmm10, %xmm0
vmovaps %xmm0, 0x90(%rsp)
testq %rax, %rax
je 0x224da5
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r11d, %r11d
cmpb $0x1, %al
adcq $0x2, %r11
jmp 0x224da9
pushq $0x3
popq %r11
leaq 0x80(%r9), %r13
leaq 0x3f0(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb90(%rsp), %r12
vmovaps %xmm11, -0x20(%r12)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f2b19b(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vbroadcastss 0x1cfb104(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cfb0ff(%rip), %xmm12 # 0x1f1ff14
vpcmpeqd %xmm13, %xmm13, %xmm13
addq $-0x10, %r12
movq -0x8(%r15), %rbp
addq $-0x8, %r15
cmpq $-0x8, %rbp
je 0x2260d9
vmovaps (%r12), %xmm14
vcmpltps 0x350(%rsp), %xmm14, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x2260ea
movzbl %al, %r14d
popcntl %r14d, %ebx
xorl %eax, %eax
cmpq %r11, %rbx
jbe 0x2260ee
cmpq %r11, %rbx
jbe 0x2260dc
testb $0x8, %bpl
pushq $0x8
popq %rbx
jne 0x225003
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %rbx, %rbp
vmovaps %xmm11, %xmm14
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x224f9a
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x280(%rsp), %xmm1
vmovaps 0x290(%rsp), %xmm2
vmovaps 0x2a0(%rsp), %xmm3
vmovaps 0x2e0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x2f0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x300(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vminps %xmm1, %xmm5, %xmm0
vminps %xmm2, %xmm7, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vminps %xmm3, %xmm9, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmaxps %xmm1, %xmm5, %xmm1
vmaxps %xmm2, %xmm7, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm9, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vmaxps 0x340(%rsp), %xmm0, %xmm2
vminps 0x350(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x224f9a
vblendvps %xmm1, %xmm0, %xmm11, %xmm0
cmpq $0x8, %rbp
je 0x224f93
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm14, (%r12)
addq $0x10, %r12
vmovaps %xmm0, %xmm14
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x224fb1
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x224e8a
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x224ffc
vmovaps 0x350(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r11
jae 0x224feb
testb %cl, %cl
je 0x2260dc
testb $0x8, %bpl
je 0x224e76
jmp 0x225003
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm14, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x224fd7
cmpq $-0x8, %rbp
je 0x2260d9
movq %r13, 0x38(%rsp)
vmovaps 0x350(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x2261d6
movl %ebp, %edi
andl $0xf, %edi
vmovdqa 0x90(%rsp), %xmm0
addq $-0x8, %rdi
je 0x226096
andq $-0x10, %rbp
vpxor %xmm0, %xmm13, %xmm1
leaq 0xc0(%rbp), %rbx
xorl %eax, %eax
movq %rax, 0x1c0(%rsp)
imulq $0xe0, %rax, %rax
addq %rbp, %rax
movq %rax, 0x1c8(%rsp)
xorl %r13d, %r13d
vmovdqa %xmm1, 0x40(%rsp)
vmovdqa %xmm1, 0x1d0(%rsp)
movl (%rbx,%r13,4), %eax
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
cmpq %rcx, %rax
je 0x226062
vbroadcastss -0xc0(%rbx,%r13,4), %xmm14
vbroadcastss -0xb0(%rbx,%r13,4), %xmm9
vbroadcastss -0xa0(%rbx,%r13,4), %xmm3
vbroadcastss -0x90(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vbroadcastss -0x80(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vbroadcastss -0x70(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vbroadcastss -0x60(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0x140(%rsp)
vbroadcastss -0x50(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0x130(%rsp)
vbroadcastss -0x40(%rbx,%r13,4), %xmm0
vmovaps %xmm0, 0x120(%rsp)
vbroadcastss -0x30(%rbx,%r13,4), %xmm12
vbroadcastss -0x20(%rbx,%r13,4), %xmm5
vbroadcastss -0x10(%rbx,%r13,4), %xmm7
movq 0x1c8(%rsp), %rcx
vmovaps 0xd0(%rcx), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps 0x10(%r9), %xmm6
vmovaps 0x20(%r9), %xmm11
vsubps %xmm6, %xmm9, %xmm4
vsubps %xmm11, %xmm3, %xmm3
vmovaps %xmm5, 0x3d0(%rsp)
vsubps %xmm6, %xmm5, %xmm5
vmovaps %xmm5, 0xa0(%rsp)
vmovaps %xmm7, 0x3c0(%rsp)
vsubps %xmm11, %xmm7, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vsubps %xmm4, %xmm5, %xmm2
vsubps %xmm3, %xmm0, %xmm1
vaddps %xmm4, %xmm5, %xmm7
vaddps %xmm3, %xmm0, %xmm8
vmulps %xmm1, %xmm7, %xmm5
vmulps %xmm2, %xmm8, %xmm10
vsubps %xmm5, %xmm10, %xmm13
vmovaps (%r9), %xmm15
vsubps %xmm15, %xmm14, %xmm0
vmovaps %xmm12, 0x110(%rsp)
vsubps %xmm15, %xmm12, %xmm5
vsubps %xmm0, %xmm5, %xmm9
vmulps %xmm8, %xmm9, %xmm8
vaddps %xmm0, %xmm5, %xmm10
vmovaps %xmm1, 0x160(%rsp)
vmulps %xmm1, %xmm10, %xmm12
vsubps %xmm8, %xmm12, %xmm12
vmovaps %xmm2, 0x180(%rsp)
vmulps %xmm2, %xmm10, %xmm8
vmovaps %xmm9, 0x150(%rsp)
vmulps %xmm7, %xmm9, %xmm7
vsubps %xmm8, %xmm7, %xmm7
vmovaps 0x60(%r9), %xmm14
vmulps %xmm7, %xmm14, %xmm7
vmovaps %xmm14, 0x20(%rsp)
vmovaps 0x50(%r9), %xmm9
vmulps %xmm12, %xmm9, %xmm12
vaddps %xmm7, %xmm12, %xmm7
vmovaps 0x40(%r9), %xmm1
vmovaps %xmm1, 0x1b0(%rsp)
vmulps %xmm1, %xmm13, %xmm13
vaddps %xmm7, %xmm13, %xmm12
vmovaps 0xf0(%rsp), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmovaps 0xe0(%rsp), %xmm2
vsubps %xmm11, %xmm2, %xmm2
vsubps %xmm1, %xmm4, %xmm13
vsubps %xmm2, %xmm3, %xmm7
vmovaps %xmm4, 0x1a0(%rsp)
vaddps %xmm1, %xmm4, %xmm4
vmovaps %xmm3, 0x190(%rsp)
vaddps %xmm2, %xmm3, %xmm3
vmulps %xmm7, %xmm4, %xmm6
vmulps %xmm3, %xmm13, %xmm8
vsubps %xmm6, %xmm8, %xmm6
vmovaps 0x100(%rsp), %xmm8
vsubps %xmm15, %xmm8, %xmm8
vsubps %xmm8, %xmm0, %xmm15
vmulps %xmm3, %xmm15, %xmm3
vmovaps %xmm0, 0x170(%rsp)
vaddps %xmm0, %xmm8, %xmm11
vmulps %xmm7, %xmm11, %xmm10
vsubps %xmm3, %xmm10, %xmm3
vmulps %xmm13, %xmm11, %xmm10
vmulps %xmm4, %xmm15, %xmm0
vsubps %xmm10, %xmm0, %xmm0
vmulps %xmm0, %xmm14, %xmm0
vmulps %xmm3, %xmm9, %xmm3
vaddps %xmm3, %xmm0, %xmm0
vmovaps 0x1b0(%rsp), %xmm14
vmulps %xmm6, %xmm14, %xmm3
vaddps %xmm0, %xmm3, %xmm11
vsubps %xmm5, %xmm8, %xmm3
vaddps %xmm5, %xmm8, %xmm5
vmovaps 0xa0(%rsp), %xmm4
vsubps %xmm4, %xmm1, %xmm0
vaddps %xmm4, %xmm1, %xmm1
vmovaps 0x70(%rsp), %xmm4
vsubps %xmm4, %xmm2, %xmm10
vaddps %xmm4, %xmm2, %xmm2
vmulps %xmm1, %xmm10, %xmm6
vmulps %xmm2, %xmm0, %xmm8
vsubps %xmm6, %xmm8, %xmm6
vmulps %xmm2, %xmm3, %xmm2
vmulps %xmm5, %xmm10, %xmm8
vsubps %xmm2, %xmm8, %xmm2
vmulps %xmm0, %xmm5, %xmm5
vmulps %xmm1, %xmm3, %xmm1
vsubps %xmm5, %xmm1, %xmm1
vmulps 0x20(%rsp), %xmm1, %xmm1
vmovaps %xmm9, 0x70(%rsp)
vmulps %xmm2, %xmm9, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm6, %xmm14, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm11, %xmm12, %xmm2
vaddps %xmm2, %xmm1, %xmm4
vminps %xmm11, %xmm12, %xmm2
vminps %xmm1, %xmm2, %xmm2
vbroadcastss 0x1cfbb79(%rip), %xmm5 # 0x1f20ec4
vandps %xmm5, %xmm4, %xmm6
vbroadcastss 0x1cfbb74(%rip), %xmm5 # 0x1f20ecc
vmovaps %xmm6, 0x3b0(%rsp)
vmulps %xmm5, %xmm6, %xmm5
vbroadcastss 0x1cfbb52(%rip), %xmm6 # 0x1f20ec0
vxorps %xmm6, %xmm5, %xmm6
vcmpnltps %xmm6, %xmm2, %xmm2
vmovaps %xmm12, 0xa0(%rsp)
vmaxps %xmm11, %xmm12, %xmm6
vmaxps %xmm1, %xmm6, %xmm1
vcmpleps %xmm5, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm9
vtestps 0x40(%rsp), %xmm9
je 0x225fe7
vmovaps %xmm4, 0x390(%rsp)
vmovaps %xmm11, 0x3a0(%rsp)
vmovaps 0x160(%rsp), %xmm11
vmulps %xmm11, %xmm13, %xmm1
vmovaps 0x180(%rsp), %xmm4
vmulps %xmm7, %xmm4, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm7, %xmm0, %xmm5
vmulps %xmm10, %xmm13, %xmm6
vsubps %xmm5, %xmm6, %xmm6
vbroadcastss 0x1cfbade(%rip), %xmm8 # 0x1f20ec4
vandps %xmm1, %xmm8, %xmm1
vandps %xmm5, %xmm8, %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm12
vmulps %xmm10, %xmm15, %xmm1
vmulps %xmm11, %xmm15, %xmm2
vmovaps 0x150(%rsp), %xmm10
vmulps %xmm7, %xmm10, %xmm5
vsubps %xmm5, %xmm2, %xmm2
vmulps %xmm7, %xmm3, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm5, %xmm8, %xmm5
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm1, %xmm5, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm13, %xmm1
vmulps %xmm13, %xmm10, %xmm2
vmulps %xmm4, %xmm15, %xmm3
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm8, %xmm3
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm5
vmulps 0x20(%rsp), %xmm5, %xmm0
vmulps 0x70(%rsp), %xmm6, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm12, %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm1
vmulps 0x190(%rsp), %xmm5, %xmm0
vmulps 0x1a0(%rsp), %xmm6, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0x170(%rsp), %xmm12, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cc7264(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmovaps 0x30(%r9), %xmm2
vmovaps 0x80(%r9), %xmm15
vcmpleps %xmm0, %xmm2, %xmm2
vcmpleps %xmm15, %xmm0, %xmm3
vandps %xmm3, %xmm2, %xmm2
vxorps %xmm14, %xmm14, %xmm14
vcmpneqps %xmm1, %xmm14, %xmm1
vandps %xmm2, %xmm1, %xmm2
vandps 0x40(%rsp), %xmm9, %xmm1
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vtestps %xmm1, %xmm2
vbroadcastss 0x1cc6516(%rip), %xmm11 # 0x1eeba20
je 0x225ff0
vmovaps %xmm12, %xmm9
vandps %xmm1, %xmm2, %xmm1
movq (%r10), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r14
vbroadcastss 0x34(%r14), %xmm2
vandps 0x90(%r9), %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm14, %xmm2
vtestps %xmm1, %xmm2
vbroadcastss 0x1cfa9c8(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cfa9c3(%rip), %xmm12 # 0x1f1ff14
vpcmpeqd %xmm13, %xmm13, %xmm13
jb 0x22558e
movl 0x1e0(%rsp,%r13,4), %ecx
vandnps %xmm1, %xmm2, %xmm1
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x225a97
cmpq $0x0, 0x48(%r14)
jne 0x225a97
vmovdqa 0x40(%rsp), %xmm0
vpandn %xmm0, %xmm1, %xmm0
vmovdqa %xmm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %xmm0
vtestps %xmm0, %xmm0
je 0x225a6e
vmovaps (%r9), %xmm1
vmovaps 0x10(%r9), %xmm3
vmovaps 0x20(%r9), %xmm5
vmovaps 0x140(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm9
vmovaps 0x130(%rsp), %xmm0
vsubps %xmm3, %xmm0, %xmm4
vmovaps 0x120(%rsp), %xmm0
vsubps %xmm5, %xmm0, %xmm15
vmovaps 0x110(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps 0x3d0(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm6
vmovaps 0x3c0(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm8
vmovaps 0x100(%rsp), %xmm2
vsubps %xmm1, %xmm2, %xmm13
vmovaps 0xf0(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm14
vmovaps 0xe0(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm1
vmovaps %xmm1, 0x20(%rsp)
vsubps %xmm9, %xmm13, %xmm2
vsubps %xmm4, %xmm14, %xmm12
vsubps %xmm15, %xmm1, %xmm0
vaddps %xmm4, %xmm14, %xmm3
vaddps %xmm1, %xmm15, %xmm7
vmulps %xmm0, %xmm3, %xmm10
vmulps %xmm7, %xmm12, %xmm11
vsubps %xmm10, %xmm11, %xmm1
vaddps %xmm9, %xmm13, %xmm10
vmulps %xmm7, %xmm2, %xmm7
vmovaps %xmm0, 0x190(%rsp)
vmulps %xmm0, %xmm10, %xmm11
vsubps %xmm7, %xmm11, %xmm7
vmovaps %xmm12, 0x1a0(%rsp)
vmulps %xmm12, %xmm10, %xmm10
vmovaps %xmm2, 0x1b0(%rsp)
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm3, %xmm3
vmovaps 0x60(%r9), %xmm12
vmulps %xmm3, %xmm12, %xmm3
vmovaps 0x50(%r9), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vmovaps 0x40(%r9), %xmm5
vmulps %xmm1, %xmm5, %xmm7
vaddps %xmm3, %xmm7, %xmm11
vmovaps %xmm6, %xmm2
vsubps %xmm6, %xmm4, %xmm7
vmovaps %xmm8, %xmm6
vsubps %xmm8, %xmm15, %xmm1
vmovaps %xmm4, 0xe0(%rsp)
vaddps %xmm2, %xmm4, %xmm3
vmovaps %xmm2, %xmm8
vmovaps %xmm15, 0xa0(%rsp)
vaddps %xmm6, %xmm15, %xmm2
vmulps %xmm1, %xmm3, %xmm15
vmulps %xmm2, %xmm7, %xmm4
vsubps %xmm15, %xmm4, %xmm4
vmovaps 0x70(%rsp), %xmm15
vsubps %xmm15, %xmm9, %xmm0
vmulps %xmm2, %xmm0, %xmm2
vmovaps %xmm9, 0xf0(%rsp)
vaddps %xmm15, %xmm9, %xmm9
vmovaps %xmm1, 0x170(%rsp)
vmulps %xmm1, %xmm9, %xmm10
vsubps %xmm2, %xmm10, %xmm2
vmovaps %xmm7, 0x180(%rsp)
vmulps %xmm7, %xmm9, %xmm9
vmovaps %xmm0, 0x160(%rsp)
vmulps %xmm3, %xmm0, %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm3, %xmm12, %xmm3
vmovaps 0x100(%rsp), %xmm7
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmulps %xmm4, %xmm5, %xmm3
vaddps %xmm2, %xmm3, %xmm9
vsubps %xmm13, %xmm15, %xmm3
vaddps %xmm13, %xmm15, %xmm1
vsubps %xmm14, %xmm8, %xmm15
vaddps %xmm14, %xmm8, %xmm2
vmovaps 0x20(%rsp), %xmm0
vsubps %xmm0, %xmm6, %xmm10
vaddps %xmm0, %xmm6, %xmm4
vmulps %xmm2, %xmm10, %xmm0
vmulps %xmm4, %xmm15, %xmm6
vsubps %xmm0, %xmm6, %xmm0
vmulps %xmm4, %xmm3, %xmm4
vmulps %xmm1, %xmm10, %xmm6
vsubps %xmm4, %xmm6, %xmm4
vmovaps 0x40(%rsp), %xmm6
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm2, %xmm3, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm12, 0x20(%rsp)
vmulps %xmm1, %xmm12, %xmm1
vmulps %xmm4, %xmm7, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm5, 0x70(%rsp)
vmulps %xmm0, %xmm5, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm9, %xmm11, %xmm2
vaddps %xmm2, %xmm1, %xmm4
vminps %xmm9, %xmm11, %xmm2
vminps %xmm1, %xmm2, %xmm2
vmovaps %xmm4, 0x130(%rsp)
vbroadcastss 0x1cfb6d1(%rip), %xmm0 # 0x1f20ec4
vmovaps %xmm0, %xmm13
vandps %xmm0, %xmm4, %xmm5
vbroadcastss 0x1cfb6c8(%rip), %xmm4 # 0x1f20ecc
vmovaps %xmm5, 0x120(%rsp)
vmulps %xmm4, %xmm5, %xmm4
vbroadcastss 0x1cfb6a6(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm4, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm11, 0x140(%rsp)
vmaxps %xmm9, %xmm11, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm0
vtestps %xmm6, %xmm0
je 0x225d2e
vmovaps %xmm7, %xmm12
vmovaps %xmm9, 0x110(%rsp)
vmovaps 0x190(%rsp), %xmm11
vmovaps %xmm0, 0x150(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmulps %xmm0, %xmm11, %xmm1
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x170(%rsp), %xmm8
vmulps %xmm8, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm8, %xmm15, %xmm4
vmulps %xmm0, %xmm10, %xmm5
vsubps %xmm4, %xmm5, %xmm5
vandps %xmm1, %xmm13, %xmm1
vandps %xmm4, %xmm13, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm14
vmovaps %xmm15, %xmm7
vmovaps 0x160(%rsp), %xmm15
vmulps %xmm10, %xmm15, %xmm1
vmulps %xmm11, %xmm15, %xmm2
vmovaps 0x1b0(%rsp), %xmm11
vmulps %xmm8, %xmm11, %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm3, %xmm8, %xmm5
vsubps %xmm1, %xmm5, %xmm5
vandps %xmm4, %xmm13, %xmm4
vandps %xmm1, %xmm13, %xmm1
vcmpltps %xmm1, %xmm4, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm8
vmulps %xmm0, %xmm3, %xmm1
vmulps %xmm0, %xmm11, %xmm2
vmulps %xmm9, %xmm15, %xmm3
vmulps %xmm7, %xmm15, %xmm0
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm13, %xmm3
vandps %xmm1, %xmm13, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm5
vmulps 0x20(%rsp), %xmm5, %xmm0
vmulps %xmm8, %xmm12, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x70(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm1
vmulps 0xa0(%rsp), %xmm5, %xmm0
vmulps 0xe0(%rsp), %xmm8, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmovaps %xmm14, %xmm9
vmulps 0xf0(%rsp), %xmm14, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cc6d9a(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmovaps 0x30(%r9), %xmm2
vmovaps 0x80(%r9), %xmm15
vcmpleps %xmm0, %xmm2, %xmm2
vcmpleps %xmm15, %xmm0, %xmm3
vandps %xmm3, %xmm2, %xmm2
vxorps %xmm14, %xmm14, %xmm14
vcmpneqps %xmm1, %xmm14, %xmm1
vandps %xmm2, %xmm1, %xmm2
vandps 0x150(%rsp), %xmm6, %xmm1
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vtestps %xmm1, %xmm2
je 0x225d2e
vandps %xmm1, %xmm2, %xmm1
movq (%r10), %rcx
movl (%rbx,%r13,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r14
vbroadcastss 0x34(%r14), %xmm2
vandps 0x90(%r9), %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm14, %xmm2
vtestps %xmm1, %xmm2
vbroadcastss 0x1cc6015(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cfa4fc(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cfa4f7(%rip), %xmm12 # 0x1f1ff14
jb 0x225d49
movl 0x1e0(%rsp,%r13,4), %ecx
vandnps %xmm1, %xmm2, %xmm1
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x225d53
cmpq $0x0, 0x48(%r14)
jne 0x225d53
vpcmpeqd %xmm13, %xmm13, %xmm13
vmovdqa 0x40(%rsp), %xmm0
vpandn %xmm0, %xmm1, %xmm0
vmovdqa %xmm0, 0x40(%rsp)
vmovaps 0x40(%rsp), %xmm0
vtestps %xmm0, %xmm0
setne %al
jmp 0x225a70
xorl %eax, %eax
vmovdqa 0x1d0(%rsp), %xmm1
testb %al, %al
je 0x226062
leaq 0x1(%r13), %rax
cmpq $0x3, %r13
movq %rax, %r13
jb 0x225081
jmp 0x226062
vmovaps 0x390(%rsp), %xmm3
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cc6c63(%rip), %xmm7 # 0x1eec714
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1ccb522(%rip), %xmm3 # 0x1ef0fe8
vmovaps 0x3b0(%rsp), %xmm4
vcmpnltps %xmm3, %xmm4, %xmm3
vandps %xmm2, %xmm3, %xmm2
vmulps 0xa0(%rsp), %xmm2, %xmm3
vminps %xmm7, %xmm3, %xmm3
vmulps 0x3a0(%rsp), %xmm2, %xmm2
vminps %xmm7, %xmm2, %xmm2
vsubps %xmm3, %xmm7, %xmm4
vsubps %xmm2, %xmm7, %xmm7
vmovaps 0x370(%rsp), %xmm8
vblendvps %xmm8, %xmm4, %xmm3, %xmm3
vblendvps %xmm8, %xmm7, %xmm2, %xmm2
movq 0x8(%r10), %rdx
vmovd %eax, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovd %ecx, %xmm7
vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmovaps %xmm9, 0x1f0(%rsp)
vmovaps %xmm6, 0x200(%rsp)
vmovaps %xmm5, 0x210(%rsp)
vmovaps %xmm3, 0x220(%rsp)
vmovaps %xmm2, 0x230(%rsp)
vmovdqa %xmm7, 0x240(%rsp)
vmovdqa %xmm4, 0x250(%rsp)
leaq 0x260(%rsp), %rax
vcmptrueps %ymm2, %ymm2, %ymm2
vmovups %ymm2, (%rax)
vbroadcastss (%rdx), %xmm2
vmovaps %xmm2, 0x260(%rsp)
vbroadcastss 0x4(%rdx), %xmm2
vmovaps %xmm2, 0x270(%rsp)
vblendvps %xmm1, %xmm0, %xmm15, %xmm0
vmovaps %xmm0, 0x80(%r9)
vmovaps %xmm1, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r9, 0xc8(%rsp)
leaq 0x1f0(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
movq %r10, 0x68(%rsp)
movq %rsi, 0x10(%rsp)
movq %r8, 0x8(%rsp)
movq %r11, (%rsp)
movq %rdi, 0x60(%rsp)
vmovaps %xmm15, 0x20(%rsp)
je 0x225c71
leaq 0xb0(%rsp), %rdi
movq %r9, 0x58(%rsp)
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm15
movq 0x60(%rsp), %rdi
vxorps %xmm14, %xmm14, %xmm14
vbroadcastss 0x1cfa2cd(%rip), %xmm12 # 0x1f1ff14
vbroadcastss 0x1cfa2c0(%rip), %xmm10 # 0x1f1ff10
movq (%rsp), %r11
vbroadcastss 0x1cc5dc3(%rip), %xmm11 # 0x1eeba20
movq 0x8(%rsp), %r8
movq 0x10(%rsp), %rsi
movq 0x58(%rsp), %r9
movq 0x68(%rsp), %r10
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x22600c
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm13, %xmm13, %xmm13
je 0x225cf9
testb $0x2, (%rcx)
jne 0x225ca3
testb $0x40, 0x3e(%r14)
je 0x225cf9
leaq 0xb0(%rsp), %rdi
movq %r9, %r14
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm15
movq 0x60(%rsp), %rdi
vxorps %xmm14, %xmm14, %xmm14
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0x1cfa243(%rip), %xmm12 # 0x1f1ff14
vbroadcastss 0x1cfa236(%rip), %xmm10 # 0x1f1ff10
movq (%rsp), %r11
vbroadcastss 0x1cc5d39(%rip), %xmm11 # 0x1eeba20
movq 0x8(%rsp), %r8
movq 0x10(%rsp), %rsi
movq %r14, %r9
movq 0x68(%rsp), %r10
vpcmpeqd 0x80(%rsp), %xmm14, %xmm1
vpxor %xmm1, %xmm13, %xmm0
movq 0xc8(%rsp), %rax
vbroadcastss 0x1cc6e6d(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x226019
vbroadcastss 0x1cc5ce9(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cfa1d0(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cfa1cb(%rip), %xmm12 # 0x1f1ff14
vpcmpeqd %xmm13, %xmm13, %xmm13
jmp 0x225a5e
vmovaps 0x130(%rsp), %xmm3
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cc69a7(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1ccb266(%rip), %xmm3 # 0x1ef0fe8
vmovaps 0x120(%rsp), %xmm4
vcmpnltps %xmm3, %xmm4, %xmm3
vandps %xmm2, %xmm3, %xmm2
vmulps 0x140(%rsp), %xmm2, %xmm3
vminps %xmm6, %xmm3, %xmm3
vmulps 0x110(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vsubps %xmm3, %xmm6, %xmm4
vsubps %xmm2, %xmm6, %xmm6
vmovaps 0x360(%rsp), %xmm7
vblendvps %xmm7, %xmm4, %xmm3, %xmm3
vblendvps %xmm7, %xmm6, %xmm2, %xmm2
movq 0x8(%r10), %rdx
vmovd %eax, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovd %ecx, %xmm6
vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps %xmm9, 0x1f0(%rsp)
vmovaps %xmm8, 0x200(%rsp)
vmovaps %xmm5, 0x210(%rsp)
vmovaps %xmm3, 0x220(%rsp)
vmovaps %xmm2, 0x230(%rsp)
vmovdqa %xmm6, 0x240(%rsp)
vmovdqa %xmm4, 0x250(%rsp)
leaq 0x260(%rsp), %rax
vcmptrueps %ymm2, %ymm2, %ymm2
vmovups %ymm2, (%rax)
vbroadcastss (%rdx), %xmm2
vmovaps %xmm2, 0x260(%rsp)
vbroadcastss 0x4(%rdx), %xmm2
vmovaps %xmm2, 0x270(%rsp)
vblendvps %xmm1, %xmm0, %xmm15, %xmm0
vmovaps %xmm0, 0x80(%r9)
vmovaps %xmm1, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r9, 0xc8(%rsp)
leaq 0x1f0(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
movq %r10, 0x68(%rsp)
movq %rsi, 0x10(%rsp)
movq %r8, 0x8(%rsp)
movq %r11, (%rsp)
movq %rdi, 0x60(%rsp)
vmovaps %xmm15, 0x20(%rsp)
je 0x225f2d
leaq 0xb0(%rsp), %rdi
movq %r9, 0x58(%rsp)
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm15
movq 0x60(%rsp), %rdi
vxorps %xmm14, %xmm14, %xmm14
vbroadcastss 0x1cfa011(%rip), %xmm12 # 0x1f1ff14
vbroadcastss 0x1cfa004(%rip), %xmm10 # 0x1f1ff10
movq (%rsp), %r11
vbroadcastss 0x1cc5b07(%rip), %xmm11 # 0x1eeba20
movq 0x8(%rsp), %r8
movq 0x10(%rsp), %rsi
movq 0x58(%rsp), %r9
movq 0x68(%rsp), %r10
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x226037
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm13, %xmm13, %xmm13
je 0x225fb5
testb $0x2, (%rcx)
jne 0x225f5f
testb $0x40, 0x3e(%r14)
je 0x225fb5
leaq 0xb0(%rsp), %rdi
movq %r9, %r14
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm15
movq 0x60(%rsp), %rdi
vxorps %xmm14, %xmm14, %xmm14
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0x1cf9f87(%rip), %xmm12 # 0x1f1ff14
vbroadcastss 0x1cf9f7a(%rip), %xmm10 # 0x1f1ff10
movq (%rsp), %r11
vbroadcastss 0x1cc5a7d(%rip), %xmm11 # 0x1eeba20
movq 0x8(%rsp), %r8
movq 0x10(%rsp), %rsi
movq %r14, %r9
movq 0x68(%rsp), %r10
vpcmpeqd 0x80(%rsp), %xmm14, %xmm1
vpxor %xmm1, %xmm13, %xmm0
movq 0xc8(%rsp), %rax
vbroadcastss 0x1cc6bb1(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x226044
vbroadcastss 0x1cc5a30(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf9f17(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cf9f12(%rip), %xmm12 # 0x1f1ff14
vpcmpeqd %xmm13, %xmm13, %xmm13
jmp 0x22558e
vpcmpeqd %xmm0, %xmm14, %xmm0
vpcmpeqd %xmm13, %xmm13, %xmm13
vpxor %xmm0, %xmm13, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
movq 0x38(%rsp), %rax
vblendvps %xmm0, (%rax), %xmm15, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x22557e
vpcmpeqd %xmm0, %xmm14, %xmm0
vpcmpeqd %xmm13, %xmm13, %xmm13
vpxor %xmm0, %xmm13, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
movq 0x38(%rsp), %rax
vblendvps %xmm0, (%rax), %xmm15, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x225a4e
vmovdqa 0x40(%rsp), %xmm2
vpand %xmm1, %xmm2, %xmm0
vtestps %xmm1, %xmm2
movq 0x1c0(%rsp), %rax
je 0x226092
incq %rax
addq $0xe0, %rbx
vmovdqa %xmm0, %xmm1
cmpq %rdi, %rax
jb 0x225055
vpxor %xmm0, %xmm13, %xmm0
vpor 0x90(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0x90(%rsp)
vtestps %xmm13, %xmm0
movq 0x38(%rsp), %r13
jb 0x2260d9
vmovaps 0x350(%rsp), %xmm1
vbroadcastss 0x1cc6abe(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x350(%rsp)
xorl %eax, %eax
jmp 0x2260dc
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x224e1a
jmp 0x2261e3
pushq $0x2
jmp 0x2260db
vmovaps %xmm14, 0x20(%rsp)
movq %r13, 0x38(%rsp)
movq %r11, (%rsp)
movq %r9, 0x58(%rsp)
movq %rsi, 0x10(%rsp)
movq %r8, 0x8(%rsp)
bsfq %r14, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x40(%rsp)
leaq 0x1f(%rsp), %r8
movq %r10, %r13
pushq %r10
leaq 0x288(%rsp), %rax
pushq %rax
vzeroupper
callq 0x264e02
popq %rcx
popq %rdx
testb %al, %al
je 0x22614c
movq 0x40(%rsp), %rax
orl $-0x1, 0x90(%rsp,%rax,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq %r13, %r10
movq 0x58(%rsp), %r9
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %r8
jne 0x22610c
vmovaps 0x90(%rsp), %xmm0
vpcmpeqd %xmm13, %xmm13, %xmm13
vtestps %xmm13, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1cc589a(%rip), %xmm11 # 0x1eeba20
movq (%rsp), %r11
movq 0x38(%rsp), %r13
vbroadcastss 0x1cf9d78(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cf9d73(%rip), %xmm12 # 0x1f1ff14
vmovaps 0x20(%rsp), %xmm14
jb 0x224e60
vmovaps 0x350(%rsp), %xmm1
vbroadcastss 0x1cc69c5(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x350(%rsp)
pushq $0x2
popq %rax
jmp 0x224e60
pushq $0x2
popq %rax
movq 0x38(%rsp), %r13
jmp 0x2260dc
vmovaps 0x380(%rsp), %xmm0
vandps 0x90(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc6986(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r13)
addq $0x1a88, %rsp # imm = 0x1A88
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r9, %rdx
movq %r10, %rcx
jmp 0x263460
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiIntersectorKPluecker<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x226407
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x226261
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x226419
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x226407
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cfabfe(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1ccad15(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cc6433(%rip), %xmm7 # 0x1eec714
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm2, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1cfac5c(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vblendvps %xmm8, %xmm6, %xmm2, %xmm2
vdivps %xmm3, %xmm7, %xmm3
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d34626(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d2e648(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d34607(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d345ef(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d345ea(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc566c(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cc67b9(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x2675c4
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x2263d9
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x265b8c
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiIntersectorKPluecker<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a78, %rsp # imm = 0x1A78
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x227a76
movq %rcx, %r10
movq %rdx, %r14
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x226474
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x227a8b
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r14), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x227a76
vandps %xmm3, %xmm4, %xmm10
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x240(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x250(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x260(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x270(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x280(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x290(%rsp)
vbroadcastss 0x1cfa9c5(%rip), %xmm8 # 0x1f20ec4
vandps %xmm3, %xmm8, %xmm6
vbroadcastss 0x1ccaadc(%rip), %xmm9 # 0x1ef0fe8
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cc61f9(%rip), %xmm11 # 0x1eec714
vdivps %xmm3, %xmm11, %xmm3
vandps %xmm4, %xmm8, %xmm7
vcmpltps %xmm9, %xmm7, %xmm7
vdivps %xmm4, %xmm11, %xmm4
vandps %xmm5, %xmm8, %xmm8
vcmpltps %xmm9, %xmm8, %xmm8
vbroadcastss 0x1cfaa20(%rip), %xmm9 # 0x1f20f60
vblendvps %xmm6, %xmm9, %xmm3, %xmm3
vblendvps %xmm7, %xmm9, %xmm4, %xmm4
vdivps %xmm5, %xmm11, %xmm5
vblendvps %xmm8, %xmm9, %xmm5, %xmm5
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps %xmm4, 0x2b0(%rsp)
vmovaps %xmm5, 0x2c0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d343e5(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x2d0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d2e406(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d343c5(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d343b5(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d343b0(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x2e0(%rsp)
vmovaps %xmm4, 0x2f0(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc5431(%rip), %xmm11 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm11, %xmm1
vmovaps %xmm1, 0x300(%rsp)
vbroadcastss 0x1cc657d(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm10, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x310(%rsp)
vmovaps %xmm10, 0x340(%rsp)
vxorps %xmm0, %xmm10, %xmm0
vmovaps %xmm0, 0xa0(%rsp)
testq %rax, %rax
je 0x226645
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r9d, %r9d
cmpb $0x1, %al
adcq $0x2, %r9
jmp 0x226649
pushq $0x3
popq %r9
leaq 0x80(%r14), %r11
leaq 0x3e0(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb80(%rsp), %r12
vmovaps %xmm11, -0x20(%r12)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f298fb(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x330(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x320(%rsp)
vbroadcastss 0x1cf9864(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf985f(%rip), %xmm13 # 0x1f1ff14
addq $-0x10, %r12
movq -0x8(%r15), %rbp
addq $-0x8, %r15
cmpq $-0x8, %rbp
je 0x22795d
vmovaps (%r12), %xmm14
vcmpltps 0x310(%rsp), %xmm14, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22796e
movzbl %al, %r13d
popcntl %r13d, %ebx
xorl %eax, %eax
cmpq %r9, %rbx
jbe 0x227972
cmpq %r9, %rbx
jbe 0x227960
testb $0x8, %bpl
pushq $0x8
popq %rbx
jne 0x22689e
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %rbx, %rbp
vmovaps %xmm11, %xmm14
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x226835
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x240(%rsp), %xmm1
vmovaps 0x250(%rsp), %xmm2
vmovaps 0x260(%rsp), %xmm3
vmovaps 0x2a0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x2b0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x2c0(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vminps %xmm1, %xmm5, %xmm0
vminps %xmm2, %xmm7, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vminps %xmm3, %xmm9, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmulps %xmm0, %xmm12, %xmm0
vmaxps %xmm1, %xmm5, %xmm1
vmaxps %xmm2, %xmm7, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm9, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmulps %xmm1, %xmm13, %xmm1
vmaxps 0x300(%rsp), %xmm0, %xmm2
vminps 0x310(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x226835
vblendvps %xmm1, %xmm0, %xmm11, %xmm0
cmpq $0x8, %rbp
je 0x22682e
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm14, (%r12)
addq $0x10, %r12
vmovaps %xmm0, %xmm14
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x22684c
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x226725
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x226897
vmovaps 0x310(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r9
jae 0x226886
testb %cl, %cl
je 0x227960
testb $0x8, %bpl
je 0x226711
jmp 0x22689e
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm14, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x226872
cmpq $-0x8, %rbp
je 0x22795d
vmovaps 0x310(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22796e
movl %ebp, %eax
andl $0xf, %eax
vmovaps 0xa0(%rsp), %xmm0
addq $-0x8, %rax
movq %rax, 0x1a0(%rsp)
je 0x22791b
andq $-0x10, %rbp
vxorps 0x1cc5532(%rip), %xmm0, %xmm2 # 0x1eebe20
addq $0x50, %rbp
xorl %edi, %edi
movq (%r10), %rax
movq %rax, 0x1a8(%rsp)
xorl %ebx, %ebx
vmovaps %xmm2, %xmm10
movq %rdi, 0x28(%rsp)
vmovaps %xmm2, 0x90(%rsp)
movl (%rbp,%rbx,4), %eax
cmpl $-0x1, %eax
je 0x2278ed
movl -0x10(%rbp,%rbx,4), %ecx
movq 0x1a8(%rsp), %rdx
movq 0x228(%rdx), %rdx
movq (%rdx,%rcx,8), %rdx
movl -0x50(%rbp,%rbx,4), %edi
vbroadcastss (%rdx,%rdi,4), %xmm4
vbroadcastss 0x4(%rdx,%rdi,4), %xmm1
vbroadcastss 0x8(%rdx,%rdi,4), %xmm2
movl -0x20(%rbp,%rbx,4), %edi
vbroadcastss (%rdx,%rdi,4), %xmm13
vbroadcastss 0x4(%rdx,%rdi,4), %xmm5
vbroadcastss 0x8(%rdx,%rdi,4), %xmm7
vmovaps 0x10(%r14), %xmm3
vmovaps 0x20(%r14), %xmm0
vsubps %xmm3, %xmm1, %xmm6
vsubps %xmm0, %xmm2, %xmm15
vmovaps %xmm5, 0xf0(%rsp)
vsubps %xmm3, %xmm5, %xmm8
vmovaps %xmm8, 0x70(%rsp)
vmovaps %xmm7, 0xe0(%rsp)
vsubps %xmm0, %xmm7, %xmm5
vmovaps %xmm5, 0x60(%rsp)
vsubps %xmm6, %xmm8, %xmm1
vsubps %xmm15, %xmm5, %xmm2
vaddps %xmm6, %xmm8, %xmm7
vmovaps %xmm10, 0x50(%rsp)
vaddps %xmm5, %xmm15, %xmm8
vmulps %xmm2, %xmm7, %xmm5
vmulps %xmm1, %xmm8, %xmm9
vsubps %xmm5, %xmm9, %xmm11
vmovaps (%r14), %xmm9
vsubps %xmm9, %xmm4, %xmm4
vmovaps %xmm13, 0x100(%rsp)
vsubps %xmm9, %xmm13, %xmm5
vsubps %xmm4, %xmm5, %xmm12
vmulps %xmm8, %xmm12, %xmm8
vaddps %xmm4, %xmm5, %xmm10
vmovaps %xmm2, 0x150(%rsp)
vmulps %xmm2, %xmm10, %xmm13
vsubps %xmm8, %xmm13, %xmm13
vmovaps %xmm1, 0x160(%rsp)
vmulps %xmm1, %xmm10, %xmm8
vmovaps %xmm12, 0x140(%rsp)
vmulps %xmm7, %xmm12, %xmm7
vsubps %xmm8, %xmm7, %xmm8
vmovaps 0x60(%r14), %xmm12
vmulps %xmm12, %xmm8, %xmm10
vmovaps 0x50(%r14), %xmm14
vmulps %xmm13, %xmm14, %xmm13
vaddps %xmm13, %xmm10, %xmm13
vmovaps 0x40(%r14), %xmm10
vmulps %xmm11, %xmm10, %xmm11
vaddps %xmm13, %xmm11, %xmm1
vmovaps %xmm1, 0x30(%rsp)
movl -0x40(%rbp,%rbx,4), %edi
vbroadcastss 0x4(%rdx,%rdi,4), %xmm1
vmovaps %xmm1, 0x3c0(%rsp)
vsubps %xmm3, %xmm1, %xmm1
vbroadcastss 0x8(%rdx,%rdi,4), %xmm2
vmovaps %xmm2, 0x3b0(%rsp)
vsubps %xmm0, %xmm2, %xmm2
vsubps %xmm1, %xmm6, %xmm11
vsubps %xmm2, %xmm15, %xmm13
vmovaps %xmm6, 0x190(%rsp)
vaddps %xmm1, %xmm6, %xmm0
vmovaps %xmm15, 0x180(%rsp)
vaddps %xmm2, %xmm15, %xmm3
vmulps %xmm0, %xmm13, %xmm6
vmulps %xmm3, %xmm11, %xmm7
vsubps %xmm6, %xmm7, %xmm6
vbroadcastss (%rdx,%rdi,4), %xmm7
vmovaps %xmm7, 0x3a0(%rsp)
vsubps %xmm9, %xmm7, %xmm7
vsubps %xmm7, %xmm4, %xmm15
vmulps %xmm3, %xmm15, %xmm3
vmovaps %xmm4, 0x170(%rsp)
vaddps %xmm7, %xmm4, %xmm9
vmulps %xmm13, %xmm9, %xmm8
vsubps %xmm3, %xmm8, %xmm3
vmovaps %xmm11, 0x120(%rsp)
vmulps %xmm11, %xmm9, %xmm8
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm8, %xmm0, %xmm0
vmovaps 0x50(%rsp), %xmm4
vmulps %xmm0, %xmm12, %xmm0
vmulps %xmm3, %xmm14, %xmm3
vaddps %xmm3, %xmm0, %xmm0
vmulps %xmm6, %xmm10, %xmm3
vaddps %xmm0, %xmm3, %xmm9
vsubps %xmm5, %xmm7, %xmm3
vaddps %xmm5, %xmm7, %xmm5
vmovaps 0x70(%rsp), %xmm6
vsubps %xmm6, %xmm1, %xmm0
vaddps %xmm6, %xmm1, %xmm1
vmovaps 0x60(%rsp), %xmm6
vsubps %xmm6, %xmm2, %xmm8
vaddps %xmm6, %xmm2, %xmm2
vmulps %xmm1, %xmm8, %xmm6
vmulps %xmm2, %xmm0, %xmm7
vsubps %xmm6, %xmm7, %xmm6
vmulps %xmm2, %xmm3, %xmm2
vmulps %xmm5, %xmm8, %xmm7
vsubps %xmm2, %xmm7, %xmm2
vmulps %xmm0, %xmm5, %xmm5
vmulps %xmm1, %xmm3, %xmm1
vsubps %xmm5, %xmm1, %xmm1
vmovaps %xmm12, 0x60(%rsp)
vmulps %xmm1, %xmm12, %xmm1
vbroadcastss 0x1cfa361(%rip), %xmm12 # 0x1f20ec4
vmovaps %xmm14, 0x70(%rsp)
vmulps %xmm2, %xmm14, %xmm2
vmovaps %xmm4, %xmm14
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm10, 0x130(%rsp)
vmulps %xmm6, %xmm10, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmovaps 0x30(%rsp), %xmm7
vaddps %xmm7, %xmm9, %xmm2
vaddps %xmm2, %xmm1, %xmm4
vminps %xmm9, %xmm7, %xmm2
vminps %xmm1, %xmm2, %xmm2
vandps %xmm4, %xmm12, %xmm10
vbroadcastss 0x1cfa322(%rip), %xmm5 # 0x1f20ecc
vmulps %xmm5, %xmm10, %xmm5
vbroadcastss 0x1cfa309(%rip), %xmm6 # 0x1f20ec0
vxorps %xmm6, %xmm5, %xmm6
vcmpnltps %xmm6, %xmm2, %xmm2
vmaxps %xmm9, %xmm7, %xmm6
vmaxps %xmm1, %xmm6, %xmm1
vcmpleps %xmm5, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm2
movl -0x30(%rbp,%rbx,4), %edi
vtestps %xmm14, %xmm2
vbroadcastss (%rdx,%rdi,4), %xmm1
vmovaps %xmm1, 0x390(%rsp)
vbroadcastss 0x4(%rdx,%rdi,4), %xmm1
vmovaps %xmm1, 0x380(%rsp)
vbroadcastss 0x8(%rdx,%rdi,4), %xmm1
vmovaps %xmm1, 0x370(%rsp)
je 0x2277ff
vmovaps %xmm10, 0x350(%rsp)
vmovaps %xmm4, 0x360(%rsp)
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm11
vmulps %xmm10, %xmm11, %xmm1
vmovaps 0x160(%rsp), %xmm4
vmovaps %xmm2, 0x110(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm0, %xmm13, %xmm5
vmulps %xmm8, %xmm11, %xmm6
vsubps %xmm5, %xmm6, %xmm6
vandps %xmm1, %xmm12, %xmm1
vandps %xmm5, %xmm12, %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm7
vmulps %xmm8, %xmm15, %xmm1
vmulps %xmm10, %xmm15, %xmm2
vmovaps 0x140(%rsp), %xmm8
vmulps %xmm13, %xmm8, %xmm5
vsubps %xmm5, %xmm2, %xmm2
vmulps %xmm3, %xmm13, %xmm6
vsubps %xmm1, %xmm6, %xmm6
vandps %xmm5, %xmm12, %xmm5
vandps %xmm1, %xmm12, %xmm1
vcmpltps %xmm1, %xmm5, %xmm1
vblendvps %xmm1, %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm11, %xmm1
vmulps %xmm11, %xmm8, %xmm2
vmulps %xmm4, %xmm15, %xmm3
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm12, %xmm3
vandps %xmm1, %xmm12, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
vmulps 0x60(%rsp), %xmm0, %xmm1
vmulps 0x70(%rsp), %xmm6, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x130(%rsp), %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm1, %xmm2
vmulps 0x180(%rsp), %xmm0, %xmm1
vmulps 0x190(%rsp), %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmovaps %xmm7, 0x70(%rsp)
vmulps 0x170(%rsp), %xmm7, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm4
vbroadcastss 0x1cc59df(%rip), %xmm5 # 0x1eec714
vsubps %xmm4, %xmm5, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x30(%r14), %xmm3
vmovaps 0x80(%r14), %xmm4
vcmpleps %xmm1, %xmm3, %xmm3
vmovaps %xmm4, 0x60(%rsp)
vcmpleps %xmm4, %xmm1, %xmm4
vandps %xmm4, %xmm3, %xmm3
vxorps %xmm15, %xmm15, %xmm15
vcmpneqps %xmm2, %xmm15, %xmm2
vandps %xmm3, %xmm2, %xmm3
vandps 0x110(%rsp), %xmm14, %xmm2
vpslld $0x1f, %xmm3, %xmm3
vpsrad $0x1f, %xmm3, %xmm3
vtestps %xmm2, %xmm3
movq 0x28(%rsp), %rdi
vmovaps %xmm14, %xmm10
je 0x227829
vmovaps %xmm9, %xmm8
vandps %xmm2, %xmm3, %xmm2
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %r13
vbroadcastss 0x34(%r13), %xmm3
vandps 0x90(%r14), %xmm3, %xmm3
vpcmpeqd %xmm3, %xmm15, %xmm3
vtestps %xmm2, %xmm3
vbroadcastss 0x1cc4c4a(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf9131(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf912c(%rip), %xmm13 # 0x1f1ff14
vpcmpeqd %xmm9, %xmm9, %xmm9
jb 0x226e12
vandnps %xmm2, %xmm3, %xmm2
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x2272d7
cmpq $0x0, 0x48(%r13)
jne 0x2272d7
vpandn %xmm10, %xmm2, %xmm10
vtestps %xmm10, %xmm10
je 0x2272ae
vmovaps (%r14), %xmm1
vmovaps 0x10(%r14), %xmm3
vmovaps 0x20(%r14), %xmm5
vmovaps 0x390(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm9
vmovaps 0x380(%rsp), %xmm0
vsubps %xmm3, %xmm0, %xmm4
vmovaps 0x370(%rsp), %xmm0
vsubps %xmm5, %xmm0, %xmm15
vmovaps 0x100(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm14
vmovaps 0xf0(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm13
vmovaps %xmm10, 0x50(%rsp)
vmovaps 0xe0(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm8
vmovaps 0x3a0(%rsp), %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps 0x3c0(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm6
vmovaps 0x3b0(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm5
vmovaps %xmm5, 0x30(%rsp)
vsubps %xmm9, %xmm1, %xmm2
vsubps %xmm4, %xmm6, %xmm12
vsubps %xmm15, %xmm5, %xmm0
vaddps %xmm4, %xmm6, %xmm3
vaddps %xmm5, %xmm15, %xmm7
vmulps %xmm0, %xmm3, %xmm10
vmulps %xmm7, %xmm12, %xmm11
vsubps %xmm10, %xmm11, %xmm5
vaddps %xmm1, %xmm9, %xmm10
vmulps %xmm7, %xmm2, %xmm7
vmovaps %xmm0, 0x150(%rsp)
vmulps %xmm0, %xmm10, %xmm11
vsubps %xmm7, %xmm11, %xmm7
vmovaps %xmm12, 0x160(%rsp)
vmulps %xmm12, %xmm10, %xmm10
vmovaps %xmm2, 0x170(%rsp)
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm3, %xmm3
vmovaps 0x60(%r14), %xmm12
vmulps %xmm3, %xmm12, %xmm3
vmovaps 0x50(%r14), %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmulps %xmm7, %xmm0, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vmovaps 0x40(%r14), %xmm0
vmulps %xmm5, %xmm0, %xmm7
vaddps %xmm3, %xmm7, %xmm11
vmovaps %xmm13, %xmm2
vsubps %xmm13, %xmm4, %xmm7
vmovaps %xmm8, %xmm5
vsubps %xmm8, %xmm15, %xmm13
vmovaps %xmm4, 0x190(%rsp)
vaddps %xmm2, %xmm4, %xmm3
vmovaps %xmm2, %xmm8
vmovaps %xmm15, 0x180(%rsp)
vaddps %xmm5, %xmm15, %xmm2
vmulps %xmm3, %xmm13, %xmm15
vmulps %xmm2, %xmm7, %xmm4
vsubps %xmm15, %xmm4, %xmm4
vsubps %xmm14, %xmm9, %xmm15
vmulps %xmm2, %xmm15, %xmm2
vmovaps %xmm9, 0x70(%rsp)
vaddps %xmm14, %xmm9, %xmm9
vmulps %xmm13, %xmm9, %xmm10
vsubps %xmm2, %xmm10, %xmm2
vmovaps %xmm7, 0x130(%rsp)
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm3, %xmm15, %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm3, %xmm12, %xmm3
vmovaps 0x60(%rsp), %xmm10
vmulps %xmm2, %xmm10, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmulps %xmm4, %xmm0, %xmm3
vaddps %xmm2, %xmm3, %xmm9
vsubps %xmm1, %xmm14, %xmm3
vaddps %xmm1, %xmm14, %xmm1
vsubps %xmm6, %xmm8, %xmm14
vaddps %xmm6, %xmm8, %xmm2
vmovaps 0x30(%rsp), %xmm4
vsubps %xmm4, %xmm5, %xmm7
vaddps %xmm4, %xmm5, %xmm4
vmulps %xmm7, %xmm2, %xmm5
vmulps %xmm4, %xmm14, %xmm6
vsubps %xmm5, %xmm6, %xmm5
vmulps %xmm4, %xmm3, %xmm4
vmulps %xmm7, %xmm1, %xmm6
vsubps %xmm4, %xmm6, %xmm4
vmovaps 0x50(%rsp), %xmm6
vmulps %xmm1, %xmm14, %xmm1
vmulps %xmm2, %xmm3, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm12, 0x30(%rsp)
vmulps %xmm1, %xmm12, %xmm1
vmovaps %xmm10, %xmm12
vmulps %xmm4, %xmm10, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm0, 0x140(%rsp)
vmulps %xmm5, %xmm0, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm9, %xmm11, %xmm2
vaddps %xmm2, %xmm1, %xmm4
vminps %xmm9, %xmm11, %xmm2
vminps %xmm1, %xmm2, %xmm2
vbroadcastss 0x1cf9e78(%rip), %xmm10 # 0x1f20ec4
vmovaps %xmm4, 0x100(%rsp)
vandps %xmm4, %xmm10, %xmm5
vbroadcastss 0x1cf9e6a(%rip), %xmm4 # 0x1f20ecc
vmovaps %xmm5, 0xf0(%rsp)
vmulps %xmm4, %xmm5, %xmm4
vbroadcastss 0x1cf9e48(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm4, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm11, 0x110(%rsp)
vmaxps %xmm9, %xmm11, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm0
vtestps %xmm6, %xmm0
je 0x227871
vmovaps %xmm9, 0xe0(%rsp)
vmovaps 0x150(%rsp), %xmm11
vmovaps %xmm0, 0x120(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmulps %xmm0, %xmm11, %xmm1
vmovaps %xmm10, %xmm9
vmovaps 0x160(%rsp), %xmm10
vmulps %xmm13, %xmm10, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm13, %xmm14, %xmm4
vmulps %xmm7, %xmm0, %xmm5
vsubps %xmm4, %xmm5, %xmm5
vandps %xmm1, %xmm9, %xmm1
vandps %xmm4, %xmm9, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vmovaps %xmm14, %xmm8
vblendvps %xmm1, %xmm2, %xmm5, %xmm14
vmulps %xmm7, %xmm15, %xmm1
vmulps %xmm11, %xmm15, %xmm2
vmovaps 0x170(%rsp), %xmm11
vmulps %xmm13, %xmm11, %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm3, %xmm13, %xmm5
vsubps %xmm1, %xmm5, %xmm5
vandps %xmm4, %xmm9, %xmm4
vandps %xmm1, %xmm9, %xmm1
vcmpltps %xmm1, %xmm4, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm7
vmulps %xmm0, %xmm3, %xmm1
vmulps %xmm0, %xmm11, %xmm2
vmulps %xmm10, %xmm15, %xmm3
vmulps %xmm8, %xmm15, %xmm0
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm9, %xmm3
vandps %xmm1, %xmm9, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm5
vmulps 0x30(%rsp), %xmm5, %xmm0
vmulps %xmm7, %xmm12, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x140(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm1
vmulps 0x180(%rsp), %xmm5, %xmm0
vmulps 0x190(%rsp), %xmm7, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0x70(%rsp), %xmm14, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cc5552(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmovaps 0x30(%r14), %xmm2
vmovaps 0x80(%r14), %xmm15
vcmpleps %xmm0, %xmm2, %xmm2
vcmpleps %xmm15, %xmm0, %xmm3
vandps %xmm3, %xmm2, %xmm2
vxorps %xmm9, %xmm9, %xmm9
vcmpneqps %xmm1, %xmm9, %xmm1
vandps %xmm2, %xmm1, %xmm2
vandps 0x120(%rsp), %xmm6, %xmm1
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vtestps %xmm1, %xmm2
vmovaps %xmm6, %xmm10
je 0x22789e
vandps %xmm1, %xmm2, %xmm1
movq (%r10), %rcx
movl -0x10(%rbp,%rbx,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r13
vbroadcastss 0x34(%r13), %xmm2
vandps 0x90(%r14), %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm9, %xmm2
vtestps %xmm1, %xmm2
vbroadcastss 0x1cc47c9(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf8cb0(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf8cab(%rip), %xmm13 # 0x1f1ff14
jb 0x2278b9
vmovaps %xmm14, %xmm8
movl (%rbp,%rbx,4), %ecx
vandnps %xmm1, %xmm2, %xmm1
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x227567
cmpq $0x0, 0x48(%r13)
jne 0x227567
vmovaps 0x90(%rsp), %xmm2
vpandn %xmm10, %xmm1, %xmm10
vtestps %xmm10, %xmm10
setne %al
jmp 0x2272b9
xorl %eax, %eax
vmovaps 0x90(%rsp), %xmm2
testb %al, %al
je 0x2278ed
leaq 0x1(%rbx), %rax
cmpq $0x3, %rbx
movq %rax, %rbx
jb 0x226913
jmp 0x2278ed
vmovaps 0x360(%rsp), %xmm4
vrcpps %xmm4, %xmm3
vmulps %xmm3, %xmm4, %xmm4
vbroadcastss 0x1cc5423(%rip), %xmm7 # 0x1eec714
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1cc9ce2(%rip), %xmm4 # 0x1ef0fe8
vmovaps 0x350(%rsp), %xmm5
vcmpnltps %xmm4, %xmm5, %xmm4
vandps %xmm3, %xmm4, %xmm3
vmulps 0x30(%rsp), %xmm3, %xmm4
vminps %xmm7, %xmm4, %xmm4
vmulps %xmm3, %xmm8, %xmm3
vminps %xmm7, %xmm3, %xmm3
vsubps %xmm4, %xmm7, %xmm5
vsubps %xmm3, %xmm7, %xmm7
vmovaps 0x330(%rsp), %xmm8
vblendvps %xmm8, %xmm5, %xmm4, %xmm4
vblendvps %xmm8, %xmm7, %xmm3, %xmm3
movq 0x8(%r10), %rdx
vmovd %ecx, %xmm5
vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmovd %eax, %xmm7
vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmovaps 0x70(%rsp), %xmm8
vmovaps %xmm8, 0x1b0(%rsp)
vmovaps %xmm6, 0x1c0(%rsp)
vmovaps %xmm0, 0x1d0(%rsp)
vmovaps %xmm4, 0x1e0(%rsp)
vmovaps %xmm3, 0x1f0(%rsp)
vmovdqa %xmm7, 0x200(%rsp)
vmovdqa %xmm5, 0x210(%rsp)
leaq 0x220(%rsp), %rax
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rax)
vbroadcastss (%rdx), %xmm0
vmovaps %xmm0, 0x220(%rsp)
vbroadcastss 0x4(%rdx), %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps 0x60(%rsp), %xmm0
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm2, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x1b0(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r10, 0x18(%rsp)
movq %r8, 0x10(%rsp)
movq %r9, 0x8(%rsp)
movq %r11, (%rsp)
je 0x2274a9
leaq 0xb0(%rsp), %rdi
movq %rsi, 0x48(%rsp)
vzeroupper
callq *%rax
movq 0x28(%rsp), %rdi
vxorps %xmm15, %xmm15, %xmm15
vpcmpeqd %xmm9, %xmm9, %xmm9
vbroadcastss 0x1cf8a95(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cf8a88(%rip), %xmm12 # 0x1f1ff10
movq (%rsp), %r11
movq 0x8(%rsp), %r9
vbroadcastss 0x1cc4586(%rip), %xmm11 # 0x1eeba20
movq 0x10(%rsp), %r8
movq 0x48(%rsp), %rsi
movq 0x18(%rsp), %r10
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
vmovaps 0x50(%rsp), %xmm10
je 0x227849
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x227532
testb $0x2, (%rcx)
jne 0x2274dc
testb $0x40, 0x3e(%r13)
je 0x227532
leaq 0xb0(%rsp), %rdi
movq %rsi, %r13
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm10
movq 0x28(%rsp), %rdi
vxorps %xmm15, %xmm15, %xmm15
vpcmpeqd %xmm9, %xmm9, %xmm9
vbroadcastss 0x1cf8a0a(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cf89fd(%rip), %xmm12 # 0x1f1ff10
movq (%rsp), %r11
movq 0x8(%rsp), %r9
vbroadcastss 0x1cc44fb(%rip), %xmm11 # 0x1eeba20
movq 0x10(%rsp), %r8
movq %r13, %rsi
movq 0x18(%rsp), %r10
vpcmpeqd 0x80(%rsp), %xmm15, %xmm1
vpxor %xmm1, %xmm9, %xmm0
movq 0xc8(%rsp), %rax
vbroadcastss 0x1cc5634(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x227851
vmovaps 0x100(%rsp), %xmm3
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cc5193(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cc9a52(%rip), %xmm3 # 0x1ef0fe8
vmovaps 0xf0(%rsp), %xmm4
vcmpnltps %xmm3, %xmm4, %xmm3
vandps %xmm2, %xmm3, %xmm2
vmulps 0x110(%rsp), %xmm2, %xmm3
vminps %xmm6, %xmm3, %xmm4
vmulps 0xe0(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm3
vsubps %xmm4, %xmm6, %xmm14
vsubps %xmm3, %xmm6, %xmm6
vmovaps 0x320(%rsp), %xmm2
vblendvps %xmm2, %xmm14, %xmm4, %xmm14
vblendvps %xmm2, %xmm6, %xmm3, %xmm2
movq 0x8(%r10), %rdx
vmovd %eax, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovd %ecx, %xmm6
vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps %xmm8, 0x1b0(%rsp)
vmovaps %xmm7, 0x1c0(%rsp)
vmovaps %xmm5, 0x1d0(%rsp)
vmovaps %xmm14, 0x1e0(%rsp)
vmovaps %xmm2, 0x1f0(%rsp)
vmovdqa %xmm6, 0x200(%rsp)
vmovdqa %xmm4, 0x210(%rsp)
leaq 0x220(%rsp), %rax
vcmptrueps %ymm2, %ymm2, %ymm2
vmovups %ymm2, (%rax)
vbroadcastss (%rdx), %xmm2
vmovaps %xmm2, 0x220(%rsp)
vbroadcastss 0x4(%rdx), %xmm2
vmovaps %xmm2, 0x230(%rsp)
vblendvps %xmm1, %xmm0, %xmm15, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm1, 0x80(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x1b0(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%r13), %rax
testq %rax, %rax
movq %r10, 0x18(%rsp)
movq %r8, 0x10(%rsp)
movq %r9, 0x8(%rsp)
movq %r11, (%rsp)
vmovaps %xmm15, 0x30(%rsp)
je 0x227742
leaq 0xb0(%rsp), %rdi
movq %rsi, 0x48(%rsp)
vzeroupper
callq *%rax
vmovaps 0x30(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm10
movq 0x28(%rsp), %rdi
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x1cf87fc(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cf87ef(%rip), %xmm12 # 0x1f1ff10
movq (%rsp), %r11
movq 0x8(%rsp), %r9
vbroadcastss 0x1cc42ed(%rip), %xmm11 # 0x1eeba20
movq 0x10(%rsp), %r8
movq 0x48(%rsp), %rsi
movq 0x18(%rsp), %r10
vmovdqa 0x80(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x2278c7
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x2277c6
testb $0x2, (%rcx)
jne 0x22776f
testb $0x40, 0x3e(%r13)
je 0x2277c6
leaq 0xb0(%rsp), %rdi
movq %rsi, %r13
vzeroupper
callq *%rax
vmovaps 0x30(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm10
movq 0x28(%rsp), %rdi
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x1cf8776(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cf8769(%rip), %xmm12 # 0x1f1ff10
movq (%rsp), %r11
movq 0x8(%rsp), %r9
vbroadcastss 0x1cc4267(%rip), %xmm11 # 0x1eeba20
movq 0x10(%rsp), %r8
movq %r13, %rsi
movq 0x18(%rsp), %r10
vpcmpeqd 0x80(%rsp), %xmm9, %xmm1
vpxor 0x1cc4649(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0xc8(%rsp), %rax
vbroadcastss 0x1cc539c(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x2278d3
vbroadcastss 0x1cc4218(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf86ff(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf86fa(%rip), %xmm13 # 0x1f1ff14
movq 0x28(%rsp), %rdi
vmovaps %xmm14, %xmm10
jmp 0x226e12
vbroadcastss 0x1cc41ee(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf86d5(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf86d0(%rip), %xmm13 # 0x1f1ff14
jmp 0x226e12
vpcmpeqd %xmm0, %xmm15, %xmm0
vpxor %xmm0, %xmm9, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm2
vmovaps 0x60(%rsp), %xmm1
vblendvps %xmm0, (%r11), %xmm1, %xmm0
vmovaps %xmm0, (%r11)
jmp 0x226e0d
vbroadcastss 0x1cc41a6(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf868d(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf8688(%rip), %xmm13 # 0x1f1ff14
vmovaps 0x90(%rsp), %xmm2
vmovaps %xmm6, %xmm10
jmp 0x2272a4
vbroadcastss 0x1cc4179(%rip), %xmm11 # 0x1eeba20
vbroadcastss 0x1cf8660(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf865b(%rip), %xmm13 # 0x1f1ff14
vmovaps 0x90(%rsp), %xmm2
jmp 0x2272a4
vpcmpeqd %xmm0, %xmm9, %xmm0
vpxor 0x1cc454d(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
vblendvps %xmm0, (%r11), %xmm15, %xmm0
vmovaps %xmm0, (%r11)
jmp 0x227296
vandps %xmm2, %xmm10, %xmm0
vtestps %xmm2, %xmm10
je 0x227911
incq %rdi
addq $0x60, %rbp
vmovaps %xmm0, %xmm2
cmpq 0x1a0(%rsp), %rdi
jb 0x2268f4
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm0, %xmm0
jmp 0x22791f
vpcmpeqd %xmm1, %xmm1, %xmm1
vorps 0xa0(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0xa0(%rsp)
vtestps %xmm1, %xmm0
jb 0x22795d
vmovaps 0x310(%rsp), %xmm1
vbroadcastss 0x1cc523a(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x310(%rsp)
xorl %eax, %eax
jmp 0x227960
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x2266b5
jmp 0x227a56
pushq $0x2
jmp 0x22795f
vmovaps %xmm14, 0x30(%rsp)
movq %r11, (%rsp)
movq %r9, 0x8(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x48(%rsp)
movq %r8, 0x10(%rsp)
bsfq %r13, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x50(%rsp)
leaq 0x27(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x248(%rsp), %rax
pushq %rax
vzeroupper
callq 0x26a086
popq %rcx
popq %rdx
testb %al, %al
je 0x2279d0
movq 0x50(%rsp), %rax
orl $-0x1, 0xa0(%rsp,%rax,4)
leaq -0x1(%r13), %rax
andq %rax, %r13
movq 0x18(%rsp), %r10
movq 0x48(%rsp), %rsi
movq 0x10(%rsp), %r8
jne 0x227990
vmovaps 0xa0(%rsp), %xmm0
vtestps 0x1cc4426(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
vbroadcastss 0x1cc401a(%rip), %xmm11 # 0x1eeba20
movq 0x8(%rsp), %r9
movq (%rsp), %r11
vbroadcastss 0x1cf84f8(%rip), %xmm12 # 0x1f1ff10
vbroadcastss 0x1cf84f3(%rip), %xmm13 # 0x1f1ff14
vmovaps 0x30(%rsp), %xmm14
jb 0x2266fb
vmovaps 0x310(%rsp), %xmm1
vbroadcastss 0x1cc5145(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x310(%rsp)
pushq $0x2
popq %rax
jmp 0x2266fb
vmovaps 0x340(%rsp), %xmm0
vandps 0xa0(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc5113(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r11)
addq $0x1a78, %rsp # imm = 0x1A78
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r14, %rdx
movq %r10, %rcx
addq $0x1a78, %rsp # imm = 0x1A78
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2686c4
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiMBIntersectorKMoeller<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %rbx
cmpq $0x8, 0x70(%rbx)
je 0x227c95
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x227c95
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
movzbl %al, %ebp
vmovaps (%rdx), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%rdx), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%rdx), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%rdx), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%rdx), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%rdx), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cf9395(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cc94ac(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm2, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cc4b9e(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d32d96(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d2cdb8(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d32d77(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d32d5f(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d32d5a(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%rdx), %xmm1
vmovaps 0x80(%rdx), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc3dde(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cc4f2b(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r13
movq 0x70(%rbx), %rdx
movq %r12, %rdi
movq %rbx, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x26af5e
popq %rax
popq %rcx
andq %r13, %rbp
jne 0x227c67
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiMBIntersectorKMoeller<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x19f8, %rsp # imm = 0x19F8
movq %rsi, %rax
movq (%rsi), %rsi
cmpq $0x8, 0x70(%rsi)
je 0x229455
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%rdx), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x229455
movq %rcx, %r10
movq %rax, 0x128(%rsp)
vandps %xmm3, %xmm4, %xmm8
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x220(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x230(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x240(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x250(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x260(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x270(%rsp)
vbroadcastss 0x1cf9162(%rip), %xmm9 # 0x1f20ec4
vandps %xmm3, %xmm9, %xmm6
vbroadcastss 0x1cc9279(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm3, %xmm3
vandps %xmm4, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm4, %xmm4
vandps %xmm5, %xmm9, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vblendvps %xmm6, %xmm7, %xmm5, %xmm5
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1cc496b(%rip), %xmm7 # 0x1eec714
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vrcpps %xmm4, %xmm6
vmulps %xmm6, %xmm4, %xmm4
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm3, 0x280(%rsp)
vmovaps %xmm4, 0x290(%rsp)
vmovaps %xmm5, 0x2a0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d32b5e(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x2b0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d2cb7f(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d32b3e(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vmovaps %xmm3, 0x2c0(%rsp)
vcmpnltps %xmm2, %xmm5, %xmm3
vbroadcastss 0x1d32b25(%rip), %xmm4 # 0x1f5a96c
vbroadcastss 0x1d32b20(%rip), %xmm5 # 0x1f5a970
vblendvps %xmm3, %xmm4, %xmm5, %xmm3
vmovaps %xmm3, 0x2d0(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc3baa(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm8, %xmm3, %xmm1, %xmm1
vmovaps %xmm1, 0x2e0(%rsp)
vbroadcastss 0x1cc4cf6(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm8, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x2f0(%rsp)
vmovaps %xmm8, 0x320(%rsp)
vxorps %xmm0, %xmm8, %xmm0
vmovaps %xmm0, 0x110(%rsp)
cmpq $0x0, 0x8(%rcx)
je 0x227ece
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r8d, %r8d
cmpb $0x1, %al
adcq $0x2, %r8
jmp 0x227ed2
pushq $0x3
popq %r8
leaq 0x80(%r14), %rax
movq %rax, 0x108(%rsp)
leaq 0x360(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb00(%rsp), %r12
vbroadcastss 0x1cc3b1e(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, -0x20(%r12)
movq 0x70(%rsi), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f28061(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x310(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x300(%rsp)
addq $-0x10, %r12
movq -0x8(%r15), %r13
addq $-0x8, %r15
cmpq $-0x8, %r13
je 0x229344
vmovaps (%r12), %xmm1
vcmpltps 0x2f0(%rsp), %xmm1, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x229355
movzbl %al, %ebp
popcntl %ebp, %ebx
xorl %eax, %eax
cmpq %r8, %rbx
jbe 0x229359
cmpq %r8, %rbx
pushq $0x8
popq %r9
jbe 0x229347
testb $0x8, %r13b
jne 0x2281fb
vmovaps 0x2f0(%rsp), %xmm0
movq %r13, %rax
andq $-0x10, %rax
andl $0x7, %r13d
movl %r13d, %ecx
vcmpnleps %xmm1, %xmm0, %xmm0
xorl %edx, %edx
movq %r9, %r13
vbroadcastss 0x1cc3a5e(%rip), %xmm1 # 0x1eeba20
movq (%rax,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x228165
vmovaps %xmm1, %xmm15
vbroadcastss 0x80(%rax,%rdx,4), %xmm2
vbroadcastss 0x20(%rax,%rdx,4), %xmm3
vmovaps 0x70(%r14), %xmm1
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa0(%rax,%rdx,4), %xmm3
vbroadcastss 0x40(%rax,%rdx,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xc0(%rax,%rdx,4), %xmm4
vbroadcastss 0x60(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0x90(%rax,%rdx,4), %xmm5
vbroadcastss 0x30(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xb0(%rax,%rdx,4), %xmm6
vbroadcastss 0x50(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xd0(%rax,%rdx,4), %xmm7
vbroadcastss 0x70(%rax,%rdx,4), %xmm8
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmovaps 0x220(%rsp), %xmm8
vmovaps 0x230(%rsp), %xmm9
vmovaps 0x240(%rsp), %xmm10
vmovaps 0x280(%rsp), %xmm11
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x290(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm10, %xmm4, %xmm2
vmovaps 0x2a0(%rsp), %xmm4
vmulps %xmm4, %xmm2, %xmm14
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm7, %xmm2
vmulps %xmm4, %xmm2, %xmm4
vpminsd %xmm5, %xmm12, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm4, %xmm14, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpmaxsd %xmm5, %xmm12, %xmm5
vpmaxsd %xmm3, %xmm13, %xmm3
vpminsd %xmm3, %xmm5, %xmm5
vpmaxsd %xmm4, %xmm14, %xmm4
vpmaxsd 0x2e0(%rsp), %xmm2, %xmm3
vpminsd %xmm4, %xmm5, %xmm4
vpminsd 0x2f0(%rsp), %xmm4, %xmm4
cmpl $0x6, %ecx
je 0x22817e
vcmpleps %xmm4, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm1
vpslld $0x1f, %xmm1, %xmm1
vtestps %xmm1, %xmm1
je 0x2281ab
vbroadcastss 0x1cc38db(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
cmpq $0x8, %r13
je 0x228162
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm15, (%r12)
addq $0x10, %r12
movq %rdi, %r13
cmpq $0x8, %rdi
je 0x2281b1
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x227fc2
jmp 0x2281b1
vcmpleps %xmm4, %xmm3, %xmm3
vbroadcastss 0xe0(%rax,%rdx,4), %xmm4
vcmpleps %xmm1, %xmm4, %xmm4
vbroadcastss 0xf0(%rax,%rdx,4), %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vandps %xmm1, %xmm4, %xmm1
vandps %xmm3, %xmm1, %xmm1
jmp 0x22812c
vmovaps %xmm15, %xmm1
jmp 0x228165
xorl %eax, %eax
cmpq $0x8, %r13
je 0x2281f4
vmovaps 0x2f0(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r8
jae 0x2281e3
testb %cl, %cl
jne 0x227f8e
jmp 0x229347
movq %r13, (%r15)
addq $0x8, %r15
vmovaps %xmm1, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x2281d6
cmpq $-0x8, %r13
je 0x229344
vmovaps 0x2f0(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x229355
movl %r13d, %eax
andl $0xf, %eax
vmovaps 0x110(%rsp), %xmm0
addq $-0x8, %rax
movq %rax, 0x1f0(%rsp)
je 0x229302
movq %r8, 0x100(%rsp)
andq $-0x10, %r13
vxorps 0x1cc3bcd(%rip), %xmm0, %xmm4 # 0x1eebe20
xorl %eax, %eax
movq %rax, 0x1f8(%rsp)
imulq $0x60, %rax, %r11
addq %r13, %r11
vmovmskps %xmm4, %eax
movq %rax, 0x28(%rsp)
xorl %ebp, %ebp
vmovaps %xmm4, %xmm1
vmovaps %xmm4, 0xe0(%rsp)
movl 0x50(%r11,%rbp,4), %eax
cmpl $-0x1, %eax
je 0x2292c8
vmovaps %xmm1, 0x30(%rsp)
movq (%r10), %rdx
movl 0x40(%r11,%rbp,4), %ecx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rbx
vbroadcastss 0x2c(%rbx), %xmm0
vmovss 0x30(%rbx), %xmm1
vbroadcastss 0x28(%rbx), %xmm2
vmovaps 0x70(%r14), %xmm3
vsubps %xmm0, %xmm3, %xmm3
vsubss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vdivps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vroundps $0x1, %xmm0, %xmm1
vaddss 0x1cc86ef(%rip), %xmm2, %xmm2 # 0x1ef09cc
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vminps %xmm2, %xmm1, %xmm1
vmaxps 0x1cc3722(%rip), %xmm1, %xmm1 # 0x1eeba10
vsubps %xmm1, %xmm0, %xmm0
vcvtps2dq %xmm1, %xmm1
vmovapd %xmm1, 0x60(%rsp)
movq 0x28(%rsp), %rdx
bsfq %rdx, %rdx
movslq 0x60(%rsp,%rdx,4), %r8
vmovd %r8d, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vpcmpeqd %xmm1, %xmm2, %xmm1
vtestps %xmm4, %xmm1
jae 0x228793
movq 0xe0(%rbx), %rdi
imulq $0x38, %r8, %r8
movq (%rdi,%r8), %rdx
movq 0x38(%rdi,%r8), %r9
movl (%r11,%rbp,4), %edi
movl 0x10(%r11,%rbp,4), %r8d
vbroadcastss (%rdx,%rdi,4), %xmm2
vbroadcastss 0x4(%rdx,%rdi,4), %xmm3
vbroadcastss 0x8(%rdx,%rdi,4), %xmm4
vbroadcastss (%r9,%rdi,4), %xmm5
vbroadcastss 0x4(%r9,%rdi,4), %xmm6
vbroadcastss 0x8(%r9,%rdi,4), %xmm7
vbroadcastss 0x1cc43a3(%rip), %xmm1 # 0x1eec714
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulps %xmm7, %xmm0, %xmm7
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm7, %xmm4, %xmm14
vbroadcastss (%rdx,%r8,4), %xmm5
vbroadcastss 0x4(%rdx,%r8,4), %xmm6
vbroadcastss 0x8(%rdx,%r8,4), %xmm7
vbroadcastss (%r9,%r8,4), %xmm8
vbroadcastss 0x4(%r9,%r8,4), %xmm9
vbroadcastss 0x8(%r9,%r8,4), %xmm10
vmulps %xmm0, %xmm8, %xmm8
vmulps %xmm0, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm5, %xmm8, %xmm11
vmulps %xmm6, %xmm1, %xmm5
vaddps %xmm5, %xmm9, %xmm12
vmulps %xmm7, %xmm1, %xmm5
vaddps %xmm5, %xmm10, %xmm13
movl 0x20(%r11,%rbp,4), %edi
vbroadcastss (%rdx,%rdi,4), %xmm5
vbroadcastss 0x4(%rdx,%rdi,4), %xmm6
vbroadcastss 0x8(%rdx,%rdi,4), %xmm7
vbroadcastss (%r9,%rdi,4), %xmm8
vbroadcastss 0x4(%r9,%rdi,4), %xmm9
vbroadcastss 0x8(%r9,%rdi,4), %xmm10
vmulps %xmm0, %xmm8, %xmm8
vmulps %xmm0, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vmovaps %xmm5, 0x1e0(%rsp)
vmulps %xmm6, %xmm1, %xmm5
vaddps %xmm5, %xmm9, %xmm5
vmovaps %xmm5, 0x1d0(%rsp)
vmulps %xmm7, %xmm1, %xmm5
vaddps %xmm5, %xmm10, %xmm5
vmovaps %xmm5, 0x210(%rsp)
movl 0x30(%r11,%rbp,4), %edi
vbroadcastss (%r9,%rdi,4), %xmm5
vbroadcastss 0x4(%r9,%rdi,4), %xmm6
vbroadcastss 0x8(%r9,%rdi,4), %xmm7
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulps %xmm7, %xmm0, %xmm0
vbroadcastss (%rdx,%rdi,4), %xmm7
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm5, %xmm7, %xmm8
vbroadcastss 0x4(%rdx,%rdi,4), %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm6
vbroadcastss 0x8(%rdx,%rdi,4), %xmm5
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps %xmm11, 0xb0(%rsp)
vsubps %xmm11, %xmm2, %xmm7
vmovaps %xmm12, 0xc0(%rsp)
vsubps %xmm12, %xmm3, %xmm9
vmovaps %xmm13, 0x50(%rsp)
vsubps %xmm13, %xmm14, %xmm11
vmovaps %xmm8, 0xa0(%rsp)
vsubps %xmm2, %xmm8, %xmm12
vmovaps %xmm6, 0xd0(%rsp)
vsubps %xmm3, %xmm6, %xmm13
vmovaps %xmm0, 0x340(%rsp)
vsubps %xmm14, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm1
vmulps %xmm11, %xmm13, %xmm5
vsubps %xmm1, %xmm5, %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vmulps %xmm11, %xmm12, %xmm1
vmulps %xmm0, %xmm7, %xmm5
vsubps %xmm1, %xmm5, %xmm4
vmovaps %xmm7, 0x10(%rsp)
vmulps %xmm7, %xmm13, %xmm5
vmovaps %xmm9, 0x1c0(%rsp)
vmulps %xmm9, %xmm12, %xmm6
vsubps %xmm5, %xmm6, %xmm9
vsubps (%r14), %xmm2, %xmm6
vsubps 0x10(%r14), %xmm3, %xmm7
vsubps 0x20(%r14), %xmm14, %xmm8
vmovaps 0x50(%r14), %xmm2
vmovaps 0x60(%r14), %xmm10
vmulps %xmm2, %xmm8, %xmm3
vmulps %xmm7, %xmm10, %xmm14
vsubps %xmm3, %xmm14, %xmm3
vmovaps 0x40(%r14), %xmm1
vmulps %xmm6, %xmm10, %xmm14
vmulps %xmm1, %xmm8, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vmulps %xmm1, %xmm7, %xmm15
vmulps %xmm2, %xmm6, %xmm5
vsubps %xmm15, %xmm5, %xmm15
vmovaps %xmm9, 0x200(%rsp)
vmulps %xmm10, %xmm9, %xmm5
vmovaps %xmm4, 0x330(%rsp)
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vmovaps 0xf0(%rsp), %xmm5
vmulps %xmm1, %xmm5, %xmm1
vaddps %xmm2, %xmm1, %xmm10
vmulps %xmm0, %xmm15, %xmm0
vmulps %xmm14, %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cf8901(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm10, %xmm13
vmulps %xmm3, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm13, %xmm2
vcmpnltps 0x1cc3438(%rip), %xmm2, %xmm12 # 0x1eeba10
vmovaps 0x30(%rsp), %xmm1
vtestps %xmm1, %xmm12
jne 0x228bba
vmovaps 0xb0(%rsp), %xmm0
vmovaps 0xa0(%rsp), %xmm3
vmovaps 0xd0(%rsp), %xmm6
vtestps %xmm1, %xmm1
vmovaps 0xc0(%rsp), %xmm2
vmovaps 0x50(%rsp), %xmm5
je 0x22876a
vmovaps 0x1e0(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm12
vmovaps 0x1d0(%rsp), %xmm8
vsubps %xmm6, %xmm8, %xmm4
vmovaps 0x210(%rsp), %xmm9
vsubps 0x340(%rsp), %xmm9, %xmm10
vsubps %xmm7, %xmm0, %xmm3
vsubps %xmm8, %xmm2, %xmm11
vsubps %xmm9, %xmm5, %xmm15
vmulps %xmm4, %xmm15, %xmm0
vmovaps %xmm1, 0x30(%rsp)
vmulps %xmm11, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0xc0(%rsp)
vmulps %xmm3, %xmm10, %xmm1
vmulps %xmm15, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm6
vmovaps %xmm12, 0x50(%rsp)
vmulps %xmm11, %xmm12, %xmm2
vmovaps %xmm4, 0xf0(%rsp)
vmulps %xmm3, %xmm4, %xmm5
vsubps %xmm2, %xmm5, %xmm5
vsubps (%r14), %xmm7, %xmm7
vsubps 0x10(%r14), %xmm8, %xmm8
vsubps 0x20(%r14), %xmm9, %xmm4
vmovaps 0x50(%r14), %xmm9
vmovaps 0x60(%r14), %xmm0
vmulps %xmm4, %xmm9, %xmm12
vmulps %xmm0, %xmm8, %xmm13
vsubps %xmm12, %xmm13, %xmm12
vmovaps 0x40(%r14), %xmm1
vmulps %xmm0, %xmm7, %xmm13
vmulps %xmm1, %xmm4, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vmulps %xmm1, %xmm8, %xmm14
vmulps %xmm7, %xmm9, %xmm2
vsubps %xmm14, %xmm2, %xmm14
vmovaps %xmm5, 0xa0(%rsp)
vmulps %xmm0, %xmm5, %xmm0
vmovaps %xmm6, 0xb0(%rsp)
vmulps %xmm6, %xmm9, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmovaps 0xc0(%rsp), %xmm2
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm9
vmulps %xmm14, %xmm15, %xmm0
vmulps %xmm13, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cf8793(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm9, %xmm11
vmulps %xmm3, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmovaps 0x30(%rsp), %xmm1
vxorps %xmm0, %xmm11, %xmm3
vcmpnltps 0x1cc32c4(%rip), %xmm3, %xmm15 # 0x1eeba10
vtestps %xmm1, %xmm15
jne 0x228c06
vmovaps 0xe0(%rsp), %xmm4
vtestps %xmm1, %xmm1
setne %al
jmp 0x228775
xorl %eax, %eax
vmovaps 0xe0(%rsp), %xmm4
testb %al, %al
je 0x2292c8
leaq 0x1(%rbp), %rax
cmpq $0x3, %rbp
movq %rax, %rbp
jb 0x22827c
jmp 0x2292c8
movq %r11, 0x10(%rsp)
movq %rsi, (%rsp)
movq %r10, 0x8(%rsp)
cmpq $0x0, 0x28(%rsp)
je 0x22884a
movq 0xe0(%rbx), %rsi
movq %rsi, 0x50(%rsp)
addq $0x38, %rsi
movq %rsi, 0xc0(%rsp)
movq 0x10(%rsp), %rdi
movl (%rdi,%rbp,4), %r8d
movq 0x28(%rsp), %r9
movq %rdx, %r10
movslq 0x60(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x50(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0xc0(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm1
vmovdqu (%r11,%r8,4), %xmm2
vmovss %xmm1, 0x130(%rsp,%r10,4)
vextractps $0x1, %xmm1, 0x140(%rsp,%r10,4)
vextractps $0x2, %xmm1, 0x150(%rsp,%r10,4)
vmovd %xmm2, 0x70(%rsp,%r10,4)
vpextrd $0x1, %xmm2, 0x80(%rsp,%r10,4)
vpextrd $0x2, %xmm2, 0x90(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x2287d6
vbroadcastss 0x1cc3ec1(%rip), %xmm1 # 0x1eec714
vsubps %xmm0, %xmm1, %xmm1
vmulps 0x130(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0xf0(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0xb0(%rsp)
vmulps 0x150(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0xa0(%rsp)
vmulps 0x70(%rsp), %xmm0, %xmm2
vmovaps %xmm2, 0xd0(%rsp)
vmulps 0x80(%rsp), %xmm0, %xmm2
vmovaps %xmm2, 0x1e0(%rsp)
vmulps 0x90(%rsp), %xmm0, %xmm14
cmpq $0x0, 0x28(%rsp)
je 0x228961
movq 0xe0(%rbx), %rsi
movq %rsi, 0x50(%rsp)
addq $0x38, %rsi
movq %rsi, 0xc0(%rsp)
movq 0x10(%rsp), %rdi
movl 0x10(%rdi,%rbp,4), %r8d
movq 0x28(%rsp), %r9
movq %rdx, %r10
movslq 0x60(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x50(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0xc0(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm5
vmovups (%r11,%r8,4), %xmm6
vmovss %xmm5, 0x130(%rsp,%r10,4)
vextractps $0x1, %xmm5, 0x140(%rsp,%r10,4)
vextractps $0x2, %xmm5, 0x150(%rsp,%r10,4)
vmovss %xmm6, 0x70(%rsp,%r10,4)
vextractps $0x1, %xmm6, 0x80(%rsp,%r10,4)
vextractps $0x2, %xmm6, 0x90(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x2288ed
vmulps 0x130(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1d0(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm9
vmulps 0x150(%rsp), %xmm1, %xmm10
vmulps 0x70(%rsp), %xmm0, %xmm11
vmulps 0x80(%rsp), %xmm0, %xmm12
vmulps 0x90(%rsp), %xmm0, %xmm13
cmpq $0x0, 0x28(%rsp)
je 0x228a47
movq 0xe0(%rbx), %rsi
movq %rsi, 0x50(%rsp)
addq $0x38, %rsi
movq %rsi, 0xc0(%rsp)
movq 0x10(%rsp), %rdi
movl 0x20(%rdi,%rbp,4), %r8d
movq 0x28(%rsp), %r9
movq %rdx, %r10
movslq 0x60(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x50(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0xc0(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm5
vmovups (%r11,%r8,4), %xmm6
vmovss %xmm5, 0x130(%rsp,%r10,4)
vextractps $0x1, %xmm5, 0x140(%rsp,%r10,4)
vextractps $0x2, %xmm5, 0x150(%rsp,%r10,4)
vmovss %xmm6, 0x70(%rsp,%r10,4)
vextractps $0x1, %xmm6, 0x80(%rsp,%r10,4)
vextractps $0x2, %xmm6, 0x90(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x2289d3
vmulps 0x130(%rsp), %xmm1, %xmm5
vmulps 0x140(%rsp), %xmm1, %xmm6
vmulps 0x150(%rsp), %xmm1, %xmm4
vmulps 0x70(%rsp), %xmm0, %xmm7
vmulps 0x80(%rsp), %xmm0, %xmm8
vmulps 0x90(%rsp), %xmm0, %xmm15
cmpq $0x0, 0x28(%rsp)
je 0x228b01
movq 0xe0(%rbx), %rsi
leaq 0x38(%rsi), %rdi
movq 0x10(%rsp), %r8
movl 0x30(%r8,%rbp,4), %r8d
movq 0x28(%rsp), %r9
movslq 0x60(%rsp,%rdx,4), %r10
imulq $0x38, %r10, %r10
movq (%rsi,%r10), %r11
movq (%rdi,%r10), %r10
vmovups (%r11,%r8,4), %xmm2
vmovups (%r10,%r8,4), %xmm3
vmovss %xmm2, 0x130(%rsp,%rdx,4)
vextractps $0x1, %xmm2, 0x140(%rsp,%rdx,4)
vextractps $0x2, %xmm2, 0x150(%rsp,%rdx,4)
vmovss %xmm3, 0x70(%rsp,%rdx,4)
vextractps $0x1, %xmm3, 0x80(%rsp,%rdx,4)
vextractps $0x2, %xmm3, 0x90(%rsp,%rdx,4)
btcq %rdx, %r9
bsfq %r9, %rdx
testq %r9, %r9
jne 0x228a9c
vmovaps 0xd0(%rsp), %xmm2
vaddps 0xf0(%rsp), %xmm2, %xmm2
vmovaps 0x1e0(%rsp), %xmm3
vaddps 0xb0(%rsp), %xmm3, %xmm3
vaddps 0xa0(%rsp), %xmm14, %xmm14
vaddps 0x1d0(%rsp), %xmm11, %xmm11
vaddps %xmm12, %xmm9, %xmm12
vaddps %xmm13, %xmm10, %xmm13
vaddps %xmm7, %xmm5, %xmm5
vmovaps %xmm5, 0x1e0(%rsp)
vaddps %xmm6, %xmm8, %xmm5
vmovaps %xmm5, 0x1d0(%rsp)
vmulps 0x130(%rsp), %xmm1, %xmm5
vmulps 0x140(%rsp), %xmm1, %xmm6
vmulps 0x150(%rsp), %xmm1, %xmm1
vmulps 0x70(%rsp), %xmm0, %xmm7
vaddps %xmm4, %xmm15, %xmm8
vmovaps %xmm8, 0x210(%rsp)
vaddps %xmm7, %xmm5, %xmm8
vmulps 0x80(%rsp), %xmm0, %xmm5
vaddps %xmm5, %xmm6, %xmm6
vmulps 0x90(%rsp), %xmm0, %xmm0
vaddps %xmm0, %xmm1, %xmm0
movq 0x8(%rsp), %r10
movq (%rsp), %rsi
movq 0x10(%rsp), %r11
jmp 0x2284a2
vmovaps %xmm8, %xmm4
vmovaps 0x330(%rsp), %xmm8
vandps %xmm1, %xmm12, %xmm12
vmulps %xmm15, %xmm11, %xmm0
vmulps 0x1c0(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x10(%rsp), %xmm3, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm13, %xmm3
vcmpnltps 0x1cc2e1c(%rip), %xmm3, %xmm9 # 0x1eeba10
vtestps %xmm12, %xmm9
jne 0x228c52
vmovdqa 0x30(%rsp), %xmm1
jmp 0x2285e9
vmovaps %xmm4, %xmm5
vandps %xmm1, %xmm15, %xmm15
vmulps %xmm14, %xmm10, %xmm0
vmulps 0xf0(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x50(%rsp), %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm4
vcmpnltps 0x1cc2dd9(%rip), %xmm4, %xmm10 # 0x1eeba10
vtestps %xmm15, %xmm10
jne 0x228c9f
vmovaps 0xe0(%rsp), %xmm4
vmovaps 0x30(%rsp), %xmm1
jmp 0x228760
vmovaps %xmm5, %xmm14
movq %r11, 0x10(%rsp)
movq %rsi, (%rsp)
movq %r10, 0x8(%rsp)
vbroadcastss 0x1cf8257(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm10, %xmm5
vandps %xmm12, %xmm9, %xmm9
vsubps %xmm2, %xmm5, %xmm0
vcmpnltps %xmm3, %xmm0, %xmm11
vtestps %xmm9, %xmm11
jne 0x228cf1
movq 0x8(%rsp), %r10
movq (%rsp), %rsi
vmovdqa 0x30(%rsp), %xmm1
movq 0x10(%rsp), %r11
jmp 0x2285e9
movq %r11, 0x10(%rsp)
movq %rsi, (%rsp)
movq %r10, %rbx
vbroadcastss 0x1cf8210(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm9, %xmm6
vandps %xmm15, %xmm10, %xmm10
vsubps %xmm3, %xmm6, %xmm0
vcmpnltps %xmm4, %xmm0, %xmm12
vtestps %xmm10, %xmm12
jne 0x228dc1
movq %rbx, %r10
movq (%rsp), %rsi
vmovaps 0xe0(%rsp), %xmm4
vmovdqa 0x30(%rsp), %xmm1
movq 0x10(%rsp), %r11
jmp 0x228760
vandps %xmm9, %xmm11, %xmm9
vmulps 0x200(%rsp), %xmm4, %xmm0
vmulps %xmm7, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm6, %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm13, %xmm4
vmulps 0x30(%r14), %xmm5, %xmm0
vcmpltps %xmm4, %xmm0, %xmm0
vmovaps 0x80(%r14), %xmm1
vmovaps %xmm1, 0x1c0(%rsp)
vmulps %xmm1, %xmm5, %xmm1
vcmpleps %xmm1, %xmm4, %xmm1
vandps %xmm0, %xmm1, %xmm6
vtestps %xmm9, %xmm6
je 0x228c86
vandps %xmm6, %xmm9, %xmm6
vcmpneqps 0x1cc2cbb(%rip), %xmm10, %xmm0 # 0x1eeba10
vtestps %xmm6, %xmm0
je 0x228c86
vandps %xmm6, %xmm0, %xmm6
vbroadcastss 0x34(%rbx), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd 0x1cc2c95(%rip), %xmm0, %xmm0 # 0x1eeba10
vtestps %xmm6, %xmm0
jb 0x228c86
vandnps %xmm6, %xmm0, %xmm6
movq 0x8(%rsp), %rdx
movq 0x10(%rdx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x228ec6
cmpq $0x0, 0x48(%rbx)
jne 0x228ec6
vmovdqa 0x30(%rsp), %xmm1
vpandn %xmm1, %xmm6, %xmm1
movq 0x8(%rsp), %r10
movq (%rsp), %rsi
jmp 0x228c95
vmovaps %xmm2, %xmm13
vandps %xmm10, %xmm12, %xmm10
vmulps 0xa0(%rsp), %xmm5, %xmm0
vmulps 0xb0(%rsp), %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm7, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm11, %xmm5
vmulps 0x30(%r14), %xmm6, %xmm0
vcmpltps %xmm5, %xmm0, %xmm0
vmovaps 0x80(%r14), %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm5, %xmm1
vandps %xmm0, %xmm1, %xmm7
vtestps %xmm10, %xmm7
je 0x228cd1
vandps %xmm7, %xmm10, %xmm7
vcmpneqps 0x1cc2be2(%rip), %xmm9, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
je 0x228cd1
vandps %xmm7, %xmm8, %xmm7
movq (%rbx), %rcx
movq 0x10(%rsp), %rax
movl 0x40(%rax,%rbp,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rcx
movq %rcx, 0x50(%rsp)
vbroadcastss 0x34(%rcx), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd 0x1cc2ba0(%rip), %xmm0, %xmm8 # 0x1eeba10
vtestps %xmm7, %xmm8
jb 0x228cd1
movq 0x10(%rsp), %rcx
movl 0x50(%rcx,%rbp,4), %ecx
vandnps %xmm7, %xmm8, %xmm7
movq 0x10(%rbx), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x22908a
movq 0x50(%rsp), %rdx
cmpq $0x0, 0x48(%rdx)
jne 0x22908a
vmovdqa 0x30(%rsp), %xmm1
vpandn %xmm1, %xmm7, %xmm1
movq %rbx, %r10
movq (%rsp), %rsi
vmovaps 0xe0(%rsp), %xmm4
jmp 0x228ce7
vrcpps %xmm5, %xmm0
vmulps %xmm0, %xmm5, %xmm1
vbroadcastss 0x1cc383d(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm2, %xmm0, %xmm1
vminps %xmm5, %xmm1, %xmm1
vmulps %xmm3, %xmm0, %xmm2
vminps %xmm5, %xmm2, %xmm2
vsubps %xmm1, %xmm5, %xmm3
vsubps %xmm2, %xmm5, %xmm5
vmovaps 0x310(%rsp), %xmm7
vblendvps %xmm7, %xmm3, %xmm1, %xmm1
vblendvps %xmm7, %xmm5, %xmm2, %xmm2
movq 0x8(%rsp), %rsi
movq 0x8(%rsi), %rdx
vmovd %ecx, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %eax, %xmm5
vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmovaps %xmm14, 0x130(%rsp)
vmovaps %xmm8, 0x140(%rsp)
vmovaps 0x200(%rsp), %xmm7
vmovaps %xmm7, 0x150(%rsp)
vmovaps %xmm1, 0x160(%rsp)
vmovaps %xmm2, 0x170(%rsp)
vmovdqa %xmm5, 0x180(%rsp)
vmovdqa %xmm3, 0x190(%rsp)
vmulps %xmm4, %xmm0, %xmm0
leaq 0x1a0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x1b0(%rsp)
vmovaps 0x1c0(%rsp), %xmm1
vblendvps %xmm6, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm6, 0x60(%rsp)
leaq 0x60(%rsp), %rax
movq %rax, 0x70(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x78(%rsp)
movq 0x8(%rsi), %rax
movq %rax, 0x80(%rsp)
movq %r14, 0x88(%rsp)
leaq 0x130(%rsp), %rax
movq %rax, 0x90(%rsp)
movl $0x4, 0x98(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
je 0x229018
leaq 0x70(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x60(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x229254
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x229050
testb $0x2, (%rcx)
jne 0x229046
testb $0x40, 0x3e(%rbx)
je 0x229050
leaq 0x70(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x60(%rsp), %xmm0, %xmm1
vpxor 0x1cc2dbe(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x88(%rsp), %rax
vbroadcastss 0x1cc3b11(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x229264
vrcpps %xmm6, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vbroadcastss 0x1cc3679(%rip), %xmm6 # 0x1eec714
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm3, %xmm0, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps %xmm4, %xmm0, %xmm2
vminps %xmm6, %xmm2, %xmm2
vsubps %xmm1, %xmm6, %xmm3
vsubps %xmm2, %xmm6, %xmm4
vmovaps 0x300(%rsp), %xmm6
vblendvps %xmm6, %xmm3, %xmm1, %xmm1
vblendvps %xmm6, %xmm4, %xmm2, %xmm2
movq 0x8(%rbx), %rdx
vmovd %eax, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %ecx, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovaps %xmm13, 0x130(%rsp)
vmovaps 0xb0(%rsp), %xmm6
vmovaps %xmm6, 0x140(%rsp)
vmovaps 0xa0(%rsp), %xmm6
vmovaps %xmm6, 0x150(%rsp)
vmovaps %xmm1, 0x160(%rsp)
vmovaps %xmm2, 0x170(%rsp)
vmovdqa %xmm4, 0x180(%rsp)
vmovdqa %xmm3, 0x190(%rsp)
vmulps %xmm5, %xmm0, %xmm0
leaq 0x1a0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x1b0(%rsp)
vmovaps 0xf0(%rsp), %xmm1
vblendvps %xmm7, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm7, 0x60(%rsp)
leaq 0x60(%rsp), %rax
movq %rax, 0x70(%rsp)
movq 0x50(%rsp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0x78(%rsp)
movq 0x8(%rbx), %rax
movq %rax, 0x80(%rsp)
movq %r14, 0x88(%rsp)
leaq 0x130(%rsp), %rax
movq %rax, 0x90(%rsp)
movl $0x4, 0x98(%rsp)
movq 0x48(%rcx), %rax
testq %rax, %rax
je 0x2291e5
leaq 0x70(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x60(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x22928e
movq 0x10(%rbx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x22921d
testb $0x2, (%rcx)
jne 0x229213
movq 0x50(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x22921d
leaq 0x70(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x60(%rsp), %xmm0, %xmm1
vpxor 0x1cc2bf1(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x88(%rsp), %rax
vbroadcastss 0x1cc3944(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x22929e
vpcmpeqd 0x1cc27b4(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc2bbc(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm6
movq 0x108(%rsp), %rax
vmovaps 0x1c0(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x228da9
vpcmpeqd 0x1cc277a(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc2b82(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm7
movq 0x108(%rsp), %rax
vmovaps 0xf0(%rsp), %xmm1
vblendvps %xmm0, (%rax), %xmm1, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x228ea7
vandps %xmm4, %xmm1, %xmm0
vtestps %xmm4, %xmm1
movq 0x1f8(%rsp), %rax
je 0x2292f0
incq %rax
vmovaps %xmm0, %xmm4
cmpq 0x1f0(%rsp), %rax
jb 0x228255
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm0, %xmm0
movq 0x100(%rsp), %r8
jmp 0x229306
vpcmpeqd %xmm1, %xmm1, %xmm1
vorps 0x110(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vtestps %xmm1, %xmm0
jb 0x229344
vmovaps 0x2f0(%rsp), %xmm1
vbroadcastss 0x1cc3853(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x2f0(%rsp)
xorl %eax, %eax
jmp 0x229347
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x227f3d
jmp 0x22942d
pushq $0x2
jmp 0x229346
vmovaps %xmm1, 0xc0(%rsp)
movq %r8, 0x100(%rsp)
movq 0x128(%rsp), %rdi
movq %r10, 0x8(%rsp)
movq %rsi, (%rsp)
bsfq %rbp, %rcx
movq %r13, %rdx
movq %rcx, 0x30(%rsp)
leaq 0x4f(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x228(%rsp), %rax
pushq %rax
vzeroupper
callq 0x26c1b2
popq %rcx
popq %rdx
testb %al, %al
je 0x2293b5
movq 0x30(%rsp), %rax
orl $-0x1, 0x110(%rsp,%rax,4)
leaq -0x1(%rbp), %rax
andq %rax, %rbp
movq 0x8(%rsp), %r10
movq 0x128(%rsp), %rdi
movq (%rsp), %rsi
jne 0x22937b
vmovaps 0x110(%rsp), %xmm0
vtestps 0x1cc2a3f(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x100(%rsp), %r8
vmovaps 0xc0(%rsp), %xmm1
jb 0x227f81
vmovaps 0x2f0(%rsp), %xmm1
vbroadcastss 0x1cc3777(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps 0xc0(%rsp), %xmm1
vmovaps %xmm0, 0x2f0(%rsp)
pushq $0x2
popq %rax
jmp 0x227f81
vmovaps 0x320(%rsp), %xmm0
vandps 0x110(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc373c(%rip), %xmm1 # 0x1eecb84
movq 0x108(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x19f8, %rsp # imm = 0x19F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiMBIntersectorKPluecker<4, 4, true>>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %rbx
cmpq $0x8, 0x70(%rbx)
je 0x229630
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x229630
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
movzbl %al, %ebp
vmovaps (%rdx), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%rdx), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%rdx), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%rdx), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%rdx), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%rdx), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cf79d3(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cc7aea(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cc3208(%rip), %xmm7 # 0x1eec714
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm2, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1cf7a31(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vblendvps %xmm8, %xmm6, %xmm2, %xmm2
vdivps %xmm3, %xmm7, %xmm3
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d313fb(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d2b41d(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d313dc(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d313c4(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d313bf(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%rdx), %xmm1
vmovaps 0x80(%rdx), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc2443(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cc3590(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r13
movq 0x70(%rbx), %rdx
movq %r12, %rdi
movq %rbx, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x26d170
popq %rax
popq %rcx
andq %r13, %rbp
jne 0x229602
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::avx::ArrayIntersectorK_1<4, embree::avx::QuadMiMBIntersectorKPluecker<4, 4, true>>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1a88, %rsp # imm = 0x1A88
movq %rsi, %rax
movq (%rsi), %rsi
cmpq $0x8, 0x70(%rsi)
je 0x22b1b5
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%rdx), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x22b1b5
movq %rcx, %r10
movq %rax, 0x138(%rsp)
vandps %xmm3, %xmm4, %xmm10
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x260(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x270(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x280(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x290(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x2a0(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x2b0(%rsp)
vbroadcastss 0x1cf77c8(%rip), %xmm8 # 0x1f20ec4
vandps %xmm3, %xmm8, %xmm6
vbroadcastss 0x1cc78df(%rip), %xmm9 # 0x1ef0fe8
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cc2ffc(%rip), %xmm11 # 0x1eec714
vdivps %xmm3, %xmm11, %xmm3
vandps %xmm4, %xmm8, %xmm7
vcmpltps %xmm9, %xmm7, %xmm7
vdivps %xmm4, %xmm11, %xmm4
vandps %xmm5, %xmm8, %xmm8
vcmpltps %xmm9, %xmm8, %xmm8
vdivps %xmm5, %xmm11, %xmm5
vbroadcastss 0x1cf781f(%rip), %xmm9 # 0x1f20f60
vblendvps %xmm6, %xmm9, %xmm3, %xmm3
vblendvps %xmm7, %xmm9, %xmm4, %xmm4
vblendvps %xmm8, %xmm9, %xmm5, %xmm5
vmovaps %xmm3, 0x2c0(%rsp)
vmovaps %xmm4, 0x2d0(%rsp)
vmovaps %xmm5, 0x2e0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d311e8(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x2f0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d2b209(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d311c8(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vmovaps %xmm3, 0x300(%rsp)
vcmpnltps %xmm2, %xmm5, %xmm3
vbroadcastss 0x1d311af(%rip), %xmm4 # 0x1f5a96c
vbroadcastss 0x1d311aa(%rip), %xmm5 # 0x1f5a970
vblendvps %xmm3, %xmm4, %xmm5, %xmm3
vmovaps %xmm3, 0x310(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cc2234(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm1, %xmm1
vmovaps %xmm1, 0x320(%rsp)
vbroadcastss 0x1cc3380(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm10, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x330(%rsp)
vmovaps %xmm10, 0x360(%rsp)
vxorps %xmm0, %xmm10, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
cmpq $0x0, 0x8(%rcx)
je 0x229844
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r8d, %r8d
cmpb $0x1, %al
adcq $0x2, %r8
jmp 0x229848
pushq $0x3
popq %r8
leaq 0x80(%r14), %rax
movq %rax, 0xd8(%rsp)
leaq 0x3f0(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xb90(%rsp), %r12
vbroadcastss 0x1cc21a8(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, -0x20(%r12)
movq 0x70(%rsi), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f266eb(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x350(%rsp)
vmovaps 0xf0(%rax), %xmm0
vmovaps %xmm0, 0x340(%rsp)
addq $-0x10, %r12
movq -0x8(%r15), %rbp
addq $-0x8, %r15
cmpq $-0x8, %rbp
je 0x22b0ab
vmovaps (%r12), %xmm1
vcmpltps 0x330(%rsp), %xmm1, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22b0bc
movzbl %al, %r13d
popcntl %r13d, %ebx
xorl %eax, %eax
cmpq %r8, %rbx
jbe 0x22b0c0
cmpq %r8, %rbx
pushq $0x8
popq %r9
jbe 0x22b0ae
testb $0x8, %bpl
jne 0x229b8b
vmovaps 0x330(%rsp), %xmm0
movq %rbp, %rax
andq $-0x10, %rax
andl $0x7, %ebp
movl %ebp, %ecx
vcmpnleps %xmm1, %xmm0, %xmm0
xorl %edx, %edx
movq %r9, %rbp
vbroadcastss 0x1cc20e8(%rip), %xmm1 # 0x1eeba20
movq (%rax,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x229af5
vmovaps %xmm1, %xmm15
vbroadcastss 0x80(%rax,%rdx,4), %xmm2
vbroadcastss 0x20(%rax,%rdx,4), %xmm3
vmovaps 0x70(%r14), %xmm1
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa0(%rax,%rdx,4), %xmm3
vbroadcastss 0x40(%rax,%rdx,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xc0(%rax,%rdx,4), %xmm4
vbroadcastss 0x60(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0x90(%rax,%rdx,4), %xmm5
vbroadcastss 0x30(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xb0(%rax,%rdx,4), %xmm6
vbroadcastss 0x50(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xd0(%rax,%rdx,4), %xmm7
vbroadcastss 0x70(%rax,%rdx,4), %xmm8
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmovaps 0x260(%rsp), %xmm8
vmovaps 0x270(%rsp), %xmm9
vmovaps 0x280(%rsp), %xmm10
vmovaps 0x2c0(%rsp), %xmm11
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x2d0(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm10, %xmm4, %xmm2
vmovaps 0x2e0(%rsp), %xmm4
vmulps %xmm4, %xmm2, %xmm14
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm7, %xmm2
vmulps %xmm4, %xmm2, %xmm4
vpminsd %xmm5, %xmm12, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm4, %xmm14, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vbroadcastss 0x1cf649c(%rip), %xmm6 # 0x1f1ff10
vmulps %xmm6, %xmm2, %xmm2
vpmaxsd %xmm5, %xmm12, %xmm5
vpmaxsd %xmm3, %xmm13, %xmm3
vpminsd %xmm3, %xmm5, %xmm3
vpmaxsd %xmm4, %xmm14, %xmm4
vpminsd %xmm4, %xmm3, %xmm4
vpmaxsd 0x320(%rsp), %xmm2, %xmm3
vbroadcastss 0x1cf6470(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm4, %xmm4
vpminsd 0x330(%rsp), %xmm4, %xmm4
cmpl $0x6, %ecx
je 0x229b0e
vcmpleps %xmm4, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm1
vpslld $0x1f, %xmm1, %xmm1
vtestps %xmm1, %xmm1
je 0x229b3b
vbroadcastss 0x1cc1f4b(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
cmpq $0x8, %rbp
je 0x229af2
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm15, (%r12)
addq $0x10, %r12
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x229b41
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x229938
jmp 0x229b41
vcmpleps %xmm4, %xmm3, %xmm3
vbroadcastss 0xe0(%rax,%rdx,4), %xmm4
vcmpleps %xmm1, %xmm4, %xmm4
vbroadcastss 0xf0(%rax,%rdx,4), %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vandps %xmm1, %xmm4, %xmm1
vandps %xmm3, %xmm1, %xmm1
jmp 0x229abc
vmovaps %xmm15, %xmm1
jmp 0x229af5
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x229b84
vmovaps 0x330(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r8
jae 0x229b73
testb %cl, %cl
jne 0x229906
jmp 0x22b0ae
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm1, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x229b66
cmpq $-0x8, %rbp
je 0x22b0ab
vmovaps 0x330(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22b0bc
movl %ebp, %eax
andl $0xf, %eax
vmovaps 0xf0(%rsp), %xmm0
addq $-0x8, %rax
movq %rax, 0x240(%rsp)
je 0x22b069
movq %r8, 0xd0(%rsp)
andq $-0x10, %rbp
vxorps 0x1cc223e(%rip), %xmm0, %xmm4 # 0x1eebe20
xorl %eax, %eax
movq %r10, 0x40(%rsp)
movq %rsi, 0x38(%rsp)
movq %rax, 0x248(%rsp)
imulq $0x60, %rax, %r11
addq %rbp, %r11
vmovmskps %xmm4, %eax
movq %rax, 0x30(%rsp)
xorl %r13d, %r13d
vmovaps %xmm4, 0x60(%rsp)
vmovaps %xmm4, 0xe0(%rsp)
movl 0x50(%r11,%r13,4), %eax
cmpl $-0x1, %eax
je 0x22b029
movq (%r10), %rdx
movl 0x40(%r11,%r13,4), %ecx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rbx
vbroadcastss 0x2c(%rbx), %xmm0
vmovss 0x30(%rbx), %xmm1
vbroadcastss 0x28(%rbx), %xmm2
vmovaps 0x70(%r14), %xmm3
vsubps %xmm0, %xmm3, %xmm3
vsubss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vdivps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vroundps $0x1, %xmm0, %xmm1
vaddss 0x1cc6d59(%rip), %xmm2, %xmm2 # 0x1ef09cc
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vminps %xmm2, %xmm1, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vcvtps2dq %xmm1, %xmm1
vmovapd %xmm1, 0x70(%rsp)
movq 0x30(%rsp), %rdx
bsfq %rdx, %rdx
movslq 0x70(%rsp,%rdx,4), %r8
vmovd %r8d, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vpcmpeqd %xmm1, %xmm2, %xmm1
vtestps %xmm4, %xmm1
jae 0x22aba3
movq 0xe0(%rbx), %rdi
imulq $0x38, %r8, %r8
movq (%rdi,%r8), %rdx
movq 0x38(%rdi,%r8), %r9
movl (%r11,%r13,4), %edi
movl 0x10(%r11,%r13,4), %r8d
vbroadcastss (%rdx,%rdi,4), %xmm2
vbroadcastss 0x4(%rdx,%rdi,4), %xmm3
vbroadcastss 0x8(%rdx,%rdi,4), %xmm4
vbroadcastss (%r9,%rdi,4), %xmm5
vbroadcastss 0x4(%r9,%rdi,4), %xmm6
vbroadcastss 0x8(%r9,%rdi,4), %xmm7
vbroadcastss 0x1cc2a0d(%rip), %xmm1 # 0x1eec714
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulps %xmm7, %xmm0, %xmm7
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm5, %xmm2, %xmm12
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm6, %xmm3, %xmm14
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vbroadcastss (%rdx,%r8,4), %xmm5
vbroadcastss 0x4(%rdx,%r8,4), %xmm6
vbroadcastss 0x8(%rdx,%r8,4), %xmm7
vbroadcastss (%r9,%r8,4), %xmm8
vbroadcastss 0x4(%r9,%r8,4), %xmm9
vbroadcastss 0x8(%r9,%r8,4), %xmm10
vmulps %xmm0, %xmm8, %xmm8
vmulps %xmm0, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm5, %xmm8, %xmm2
vmovaps %xmm2, 0x20(%rsp)
vmulps %xmm6, %xmm1, %xmm5
vaddps %xmm5, %xmm9, %xmm2
vmovaps %xmm2, 0x10(%rsp)
vmulps %xmm7, %xmm1, %xmm5
vaddps %xmm5, %xmm10, %xmm2
vmovaps %xmm2, 0x50(%rsp)
movl 0x20(%r11,%r13,4), %edi
vbroadcastss (%rdx,%rdi,4), %xmm5
vbroadcastss 0x4(%rdx,%rdi,4), %xmm6
vbroadcastss 0x8(%rdx,%rdi,4), %xmm7
vbroadcastss (%r9,%rdi,4), %xmm8
vbroadcastss 0x4(%r9,%rdi,4), %xmm9
vbroadcastss 0x8(%r9,%rdi,4), %xmm10
vmulps %xmm0, %xmm8, %xmm8
vmulps %xmm0, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vmovaps %xmm5, 0x1e0(%rsp)
vmulps %xmm6, %xmm1, %xmm5
vaddps %xmm5, %xmm9, %xmm5
vmovaps %xmm5, 0x1d0(%rsp)
vmulps %xmm7, %xmm1, %xmm5
vaddps %xmm5, %xmm10, %xmm5
vmovaps %xmm5, 0x250(%rsp)
movl 0x30(%r11,%r13,4), %edi
vbroadcastss (%r9,%rdi,4), %xmm5
vbroadcastss 0x4(%r9,%rdi,4), %xmm6
vbroadcastss 0x8(%r9,%rdi,4), %xmm7
vmulps %xmm5, %xmm0, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulps %xmm7, %xmm0, %xmm0
vbroadcastss (%rdx,%rdi,4), %xmm7
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm5, %xmm7, %xmm8
vbroadcastss 0x4(%rdx,%rdi,4), %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm6
vbroadcastss 0x8(%rdx,%rdi,4), %xmm5
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm1
vmovaps (%r14), %xmm0
vmovaps 0x10(%r14), %xmm11
vmovaps 0x20(%r14), %xmm15
vsubps %xmm0, %xmm12, %xmm7
vsubps %xmm11, %xmm14, %xmm2
vsubps %xmm15, %xmm4, %xmm3
vmovaps %xmm8, 0x3d0(%rsp)
vsubps %xmm0, %xmm8, %xmm14
vmovaps %xmm0, %xmm12
vmovaps %xmm6, 0x3c0(%rsp)
vsubps %xmm11, %xmm6, %xmm4
vmovaps %xmm4, 0xc0(%rsp)
vmovaps %xmm1, 0x3b0(%rsp)
vsubps %xmm15, %xmm1, %xmm5
vmovaps %xmm5, 0xb0(%rsp)
vsubps %xmm7, %xmm14, %xmm1
vsubps %xmm2, %xmm4, %xmm6
vsubps %xmm3, %xmm5, %xmm0
vaddps %xmm2, %xmm4, %xmm4
vaddps %xmm3, %xmm5, %xmm8
vmulps %xmm0, %xmm4, %xmm9
vmulps %xmm6, %xmm8, %xmm10
vsubps %xmm9, %xmm10, %xmm5
vaddps %xmm7, %xmm14, %xmm9
vmulps %xmm1, %xmm8, %xmm8
vmovaps %xmm0, 0x210(%rsp)
vmulps %xmm0, %xmm9, %xmm10
vsubps %xmm8, %xmm10, %xmm10
vmovaps %xmm6, 0x220(%rsp)
vmulps %xmm6, %xmm9, %xmm8
vmovaps %xmm1, 0x230(%rsp)
vmulps %xmm4, %xmm1, %xmm4
vsubps %xmm8, %xmm4, %xmm4
vmovaps 0x60(%r14), %xmm13
vmulps %xmm4, %xmm13, %xmm4
vmovaps 0x50(%r14), %xmm0
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm4, %xmm10, %xmm4
vmovaps 0x40(%r14), %xmm10
vmulps %xmm5, %xmm10, %xmm5
vaddps %xmm4, %xmm5, %xmm9
vmovaps 0x10(%rsp), %xmm1
vsubps %xmm11, %xmm1, %xmm5
vmovaps 0x50(%rsp), %xmm1
vsubps %xmm15, %xmm1, %xmm6
vsubps %xmm5, %xmm2, %xmm15
vsubps %xmm6, %xmm3, %xmm11
vmovaps %xmm2, 0x110(%rsp)
vaddps %xmm5, %xmm2, %xmm4
vmovaps %xmm3, 0x100(%rsp)
vaddps %xmm6, %xmm3, %xmm1
vmulps %xmm4, %xmm11, %xmm2
vmulps %xmm1, %xmm15, %xmm3
vsubps %xmm2, %xmm3, %xmm2
vmovaps 0x20(%rsp), %xmm3
vsubps %xmm12, %xmm3, %xmm3
vsubps %xmm3, %xmm7, %xmm12
vmulps %xmm1, %xmm12, %xmm1
vmovaps %xmm7, 0x120(%rsp)
vaddps %xmm3, %xmm7, %xmm7
vmulps %xmm7, %xmm11, %xmm8
vsubps %xmm1, %xmm8, %xmm1
vmovaps %xmm15, 0x1f0(%rsp)
vmulps %xmm7, %xmm15, %xmm7
vmulps %xmm4, %xmm12, %xmm4
vsubps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm13, %xmm4
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vmulps %xmm2, %xmm10, %xmm2
vaddps %xmm1, %xmm2, %xmm7
vsubps %xmm14, %xmm3, %xmm4
vaddps %xmm3, %xmm14, %xmm1
vmovaps 0xc0(%rsp), %xmm2
vsubps %xmm2, %xmm5, %xmm15
vaddps %xmm2, %xmm5, %xmm2
vmovaps 0xb0(%rsp), %xmm3
vsubps %xmm3, %xmm6, %xmm14
vaddps %xmm3, %xmm6, %xmm3
vmulps %xmm2, %xmm14, %xmm5
vmulps %xmm3, %xmm15, %xmm6
vsubps %xmm5, %xmm6, %xmm5
vmulps %xmm3, %xmm4, %xmm3
vmulps %xmm1, %xmm14, %xmm6
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm2, %xmm4, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm13, 0xc0(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vmovaps %xmm0, 0xb0(%rsp)
vmulps %xmm3, %xmm0, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm10, 0x200(%rsp)
vmulps %xmm5, %xmm10, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm7, %xmm9, %xmm2
vaddps %xmm2, %xmm1, %xmm0
vminps %xmm7, %xmm9, %xmm2
vminps %xmm1, %xmm2, %xmm2
vbroadcastss 0x1cf6e70(%rip), %xmm6 # 0x1f20ec4
vandps %xmm6, %xmm0, %xmm8
vbroadcastss 0x1cf6e6b(%rip), %xmm3 # 0x1f20ecc
vmulps %xmm3, %xmm8, %xmm3
vbroadcastss 0x1cf6e52(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm3, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm9, 0x3a0(%rsp)
vmaxps %xmm7, %xmm9, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm3, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm13
vtestps 0x60(%rsp), %xmm13
je 0x22a734
vmovaps %xmm8, 0x370(%rsp)
vmovaps %xmm0, 0x380(%rsp)
vmovaps %xmm7, 0x390(%rsp)
vmovaps 0x210(%rsp), %xmm9
vmovaps 0x1f0(%rsp), %xmm0
vmulps %xmm0, %xmm9, %xmm1
vmovaps 0x220(%rsp), %xmm8
vmulps %xmm11, %xmm8, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm11, %xmm15, %xmm3
vmulps %xmm0, %xmm14, %xmm5
vsubps %xmm3, %xmm5, %xmm5
vandps %xmm6, %xmm1, %xmm1
vandps %xmm6, %xmm3, %xmm3
vcmpltps %xmm3, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm7
vmulps %xmm14, %xmm12, %xmm1
vmulps %xmm9, %xmm12, %xmm2
vmovaps 0x230(%rsp), %xmm9
vmulps %xmm11, %xmm9, %xmm3
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm4, %xmm11, %xmm5
vsubps %xmm1, %xmm5, %xmm5
vandps %xmm6, %xmm3, %xmm3
vandps %xmm6, %xmm1, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm11
vmulps %xmm0, %xmm4, %xmm1
vmulps %xmm0, %xmm9, %xmm2
vmulps %xmm8, %xmm12, %xmm3
vmovaps 0x60(%rsp), %xmm8
vmulps %xmm15, %xmm12, %xmm0
vmovaps %xmm7, %xmm12
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm6, %xmm3, %xmm3
vandps %xmm6, %xmm1, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm0
vmulps 0xc0(%rsp), %xmm0, %xmm1
vmulps 0xb0(%rsp), %xmm11, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x200(%rsp), %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm1, %xmm2
vmulps 0x100(%rsp), %xmm0, %xmm1
vmulps 0x110(%rsp), %xmm11, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmulps 0x120(%rsp), %xmm7, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm4
vbroadcastss 0x1cc2546(%rip), %xmm5 # 0x1eec714
vsubps %xmm4, %xmm5, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x30(%r14), %xmm3
vmovaps 0x80(%r14), %xmm7
vcmpleps %xmm1, %xmm3, %xmm3
vcmpleps %xmm7, %xmm1, %xmm4
vandps %xmm4, %xmm3, %xmm3
vxorps %xmm10, %xmm10, %xmm10
vcmpneqps %xmm2, %xmm10, %xmm2
vandps %xmm3, %xmm2, %xmm3
vandps %xmm8, %xmm13, %xmm2
vpslld $0x1f, %xmm3, %xmm3
vpsrad $0x1f, %xmm3, %xmm3
vtestps %xmm2, %xmm3
je 0x22a734
vandps %xmm2, %xmm3, %xmm2
vbroadcastss 0x34(%rbx), %xmm3
vandps 0x90(%r14), %xmm3, %xmm3
vpcmpeqd %xmm3, %xmm10, %xmm3
vtestps %xmm2, %xmm3
vpcmpeqd %xmm9, %xmm9, %xmm9
jb 0x22a734
vandnps %xmm2, %xmm3, %xmm2
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x22a74b
cmpq $0x0, 0x48(%rbx)
jne 0x22a74b
vmovaps 0x20(%rsp), %xmm6
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x50(%rsp), %xmm10
vmovdqa 0x60(%rsp), %xmm0
vpandn %xmm0, %xmm2, %xmm0
vmovdqa %xmm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %xmm0
vtestps %xmm0, %xmm0
je 0x22a70b
vmovaps (%r14), %xmm1
vmovaps 0x10(%r14), %xmm3
vmovaps 0x20(%r14), %xmm5
vmovaps 0x1e0(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm9
vmovaps 0x1d0(%rsp), %xmm0
vsubps %xmm3, %xmm0, %xmm15
vmovaps 0x250(%rsp), %xmm0
vsubps %xmm5, %xmm0, %xmm4
vmovaps 0x3d0(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x3c0(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm0
vmovaps %xmm0, 0xb0(%rsp)
vmovaps 0x3b0(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm8
vsubps %xmm1, %xmm6, %xmm1
vsubps %xmm3, %xmm7, %xmm6
vsubps %xmm5, %xmm10, %xmm5
vmovaps %xmm5, 0x50(%rsp)
vsubps %xmm9, %xmm1, %xmm2
vsubps %xmm15, %xmm6, %xmm12
vsubps %xmm4, %xmm5, %xmm0
vaddps %xmm6, %xmm15, %xmm3
vaddps %xmm4, %xmm5, %xmm7
vmulps %xmm0, %xmm3, %xmm10
vmulps %xmm7, %xmm12, %xmm11
vsubps %xmm10, %xmm11, %xmm14
vaddps %xmm1, %xmm9, %xmm10
vmulps %xmm7, %xmm2, %xmm7
vmovaps %xmm0, 0x220(%rsp)
vmulps %xmm0, %xmm10, %xmm11
vsubps %xmm7, %xmm11, %xmm7
vmovaps %xmm12, 0x230(%rsp)
vmulps %xmm12, %xmm10, %xmm10
vmovaps %xmm2, 0x100(%rsp)
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm3, %xmm3
vmovaps 0x60(%r14), %xmm0
vmulps %xmm0, %xmm3, %xmm3
vmovaps 0x50(%r14), %xmm11
vmulps %xmm7, %xmm11, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vmovaps 0x40(%r14), %xmm13
vmulps %xmm14, %xmm13, %xmm7
vaddps %xmm3, %xmm7, %xmm2
vmovaps %xmm2, 0x20(%rsp)
vmovaps 0xb0(%rsp), %xmm5
vsubps %xmm5, %xmm15, %xmm7
vsubps %xmm8, %xmm4, %xmm10
vmovaps %xmm15, 0x120(%rsp)
vaddps %xmm5, %xmm15, %xmm3
vmovaps %xmm4, 0x110(%rsp)
vaddps %xmm4, %xmm8, %xmm2
vbroadcastss 0x1cf6aeb(%rip), %xmm14 # 0x1f20ec4
vmulps %xmm3, %xmm10, %xmm15
vmulps %xmm2, %xmm7, %xmm4
vsubps %xmm15, %xmm4, %xmm12
vmovaps 0x10(%rsp), %xmm4
vsubps %xmm4, %xmm9, %xmm15
vmulps %xmm2, %xmm15, %xmm2
vmovaps %xmm9, 0xc0(%rsp)
vaddps %xmm4, %xmm9, %xmm9
vmovaps %xmm10, 0x200(%rsp)
vmulps %xmm10, %xmm9, %xmm10
vsubps %xmm2, %xmm10, %xmm2
vmovaps 0x20(%rsp), %xmm10
vmovaps %xmm7, 0x210(%rsp)
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm3, %xmm15, %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm0, %xmm3, %xmm3
vmulps %xmm2, %xmm11, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vmulps %xmm12, %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm12
vsubps %xmm1, %xmm4, %xmm3
vaddps %xmm1, %xmm4, %xmm1
vsubps %xmm6, %xmm5, %xmm9
vaddps %xmm6, %xmm5, %xmm2
vmovaps 0x50(%rsp), %xmm4
vsubps %xmm4, %xmm8, %xmm7
vaddps %xmm4, %xmm8, %xmm4
vmulps %xmm7, %xmm2, %xmm5
vmulps %xmm4, %xmm9, %xmm6
vsubps %xmm5, %xmm6, %xmm5
vmulps %xmm4, %xmm3, %xmm4
vmulps %xmm7, %xmm1, %xmm6
vsubps %xmm4, %xmm6, %xmm4
vmovaps %xmm9, %xmm6
vmulps %xmm1, %xmm9, %xmm1
vmulps %xmm2, %xmm3, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm0, 0x10(%rsp)
vmulps %xmm0, %xmm1, %xmm1
vmovaps %xmm11, 0x50(%rsp)
vmulps %xmm4, %xmm11, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm5, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm12, %xmm10, %xmm2
vaddps %xmm2, %xmm1, %xmm9
vminps %xmm12, %xmm10, %xmm2
vminps %xmm1, %xmm2, %xmm2
vandps %xmm14, %xmm9, %xmm5
vbroadcastss 0x1cf6a02(%rip), %xmm4 # 0x1f20ecc
vmovaps %xmm5, 0x1f0(%rsp)
vmulps %xmm4, %xmm5, %xmm4
vbroadcastss 0x1cf69e0(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm4, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm12, 0x1e0(%rsp)
vmaxps %xmm12, %xmm10, %xmm5
vmaxps %xmm1, %xmm5, %xmm1
vcmpleps %xmm4, %xmm1, %xmm1
vorps %xmm1, %xmm2, %xmm0
vtestps 0x60(%rsp), %xmm0
je 0x22a973
vmovaps %xmm13, %xmm11
vmovaps %xmm9, 0x1d0(%rsp)
vmovaps 0x220(%rsp), %xmm9
vmovaps %xmm0, 0xb0(%rsp)
vmovaps 0x210(%rsp), %xmm0
vmulps %xmm0, %xmm9, %xmm1
vmovaps 0x230(%rsp), %xmm10
vmovaps 0x200(%rsp), %xmm8
vmulps %xmm8, %xmm10, %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmulps %xmm6, %xmm8, %xmm4
vmulps %xmm7, %xmm0, %xmm5
vsubps %xmm4, %xmm5, %xmm5
vandps %xmm1, %xmm14, %xmm1
vandps %xmm4, %xmm14, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm12
vmulps %xmm7, %xmm15, %xmm1
vmulps %xmm9, %xmm15, %xmm2
vmovaps 0x100(%rsp), %xmm13
vmulps %xmm8, %xmm13, %xmm4
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm3, %xmm8, %xmm5
vsubps %xmm1, %xmm5, %xmm5
vandps %xmm4, %xmm14, %xmm4
vandps %xmm1, %xmm14, %xmm1
vcmpltps %xmm1, %xmm4, %xmm1
vblendvps %xmm1, %xmm2, %xmm5, %xmm9
vmulps %xmm0, %xmm3, %xmm1
vmulps %xmm0, %xmm13, %xmm2
vmulps %xmm10, %xmm15, %xmm3
vmulps %xmm6, %xmm15, %xmm0
vsubps %xmm3, %xmm2, %xmm2
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm3, %xmm14, %xmm3
vandps %xmm1, %xmm14, %xmm1
vcmpltps %xmm1, %xmm3, %xmm1
vblendvps %xmm1, %xmm2, %xmm0, %xmm5
vmulps 0x10(%rsp), %xmm5, %xmm0
vmulps 0x50(%rsp), %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm12, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm1
vmulps 0x110(%rsp), %xmm5, %xmm0
vmulps 0x120(%rsp), %xmm9, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0xc0(%rsp), %xmm12, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cc20e5(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmovaps 0x30(%r14), %xmm2
vmovaps 0x80(%r14), %xmm11
vcmpleps %xmm0, %xmm2, %xmm2
vcmpleps %xmm11, %xmm0, %xmm3
vandps %xmm3, %xmm2, %xmm2
vxorps %xmm3, %xmm3, %xmm3
vcmpneqps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm2
vmovaps 0xb0(%rsp), %xmm1
vandps 0x60(%rsp), %xmm1, %xmm1
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vtestps %xmm1, %xmm2
je 0x22a973
vandps %xmm1, %xmm2, %xmm1
movq (%r10), %rcx
movl 0x40(%r11,%r13,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rbx
vbroadcastss 0x34(%rbx), %xmm2
vandps 0x90(%r14), %xmm2, %xmm2
vpcmpeqd %xmm3, %xmm2, %xmm2
vtestps %xmm1, %xmm2
vmovaps 0xe0(%rsp), %xmm4
jb 0x22a6fb
movl 0x50(%r11,%r13,4), %ecx
vandnps %xmm1, %xmm2, %xmm1
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x22a981
cmpq $0x0, 0x48(%rbx)
jne 0x22a981
vmovdqa 0x60(%rsp), %xmm0
vpandn %xmm0, %xmm1, %xmm0
vmovdqa %xmm0, 0x60(%rsp)
vmovaps 0x60(%rsp), %xmm0
vtestps %xmm0, %xmm0
setne %al
jmp 0x22a716
xorl %eax, %eax
vmovaps 0xe0(%rsp), %xmm4
testb %al, %al
je 0x22b029
leaq 0x1(%r13), %rax
cmpq $0x3, %r13
movq %rax, %r13
jb 0x229c18
jmp 0x22b029
vmovaps 0x20(%rsp), %xmm6
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x50(%rsp), %xmm10
jmp 0x22a28a
vmovaps 0x380(%rsp), %xmm4
vrcpps %xmm4, %xmm3
vmulps %xmm3, %xmm4, %xmm4
vbroadcastss 0x1cc1faf(%rip), %xmm6 # 0x1eec714
vsubps %xmm4, %xmm6, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1cc686e(%rip), %xmm4 # 0x1ef0fe8
vmovaps 0x370(%rsp), %xmm5
vcmpnltps %xmm4, %xmm5, %xmm4
vandps %xmm3, %xmm4, %xmm3
vmulps 0x3a0(%rsp), %xmm3, %xmm4
vminps %xmm6, %xmm4, %xmm4
vmulps 0x390(%rsp), %xmm3, %xmm3
vminps %xmm6, %xmm3, %xmm3
vsubps %xmm4, %xmm6, %xmm5
vsubps %xmm3, %xmm6, %xmm6
vmovaps 0x350(%rsp), %xmm8
vblendvps %xmm8, %xmm5, %xmm4, %xmm4
vblendvps %xmm8, %xmm6, %xmm3, %xmm3
movq 0x8(%r10), %rdx
vmovd %ecx, %xmm5
vpshufd $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmovd %eax, %xmm6
vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps %xmm12, 0x140(%rsp)
vmovaps %xmm11, 0x150(%rsp)
vmovaps %xmm0, 0x160(%rsp)
vmovaps %xmm4, 0x170(%rsp)
vmovaps %xmm3, 0x180(%rsp)
vmovdqa %xmm6, 0x190(%rsp)
vmovdqa %xmm5, 0x1a0(%rsp)
leaq 0x1b0(%rsp), %rax
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rax)
vbroadcastss (%rdx), %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vbroadcastss 0x4(%rdx), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vblendvps %xmm2, %xmm1, %xmm7, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm2, 0x70(%rsp)
leaq 0x70(%rsp), %rax
movq %rax, 0x80(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x88(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x90(%rsp)
movq %r14, 0x98(%rsp)
leaq 0x140(%rsp), %rax
movq %rax, 0xa0(%rsp)
movl $0x4, 0xa8(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r11, 0x8(%rsp)
vmovaps %xmm7, 0xb0(%rsp)
je 0x22a8e9
leaq 0x80(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0xb0(%rsp), %xmm7
movq 0x8(%rsp), %r11
vxorps %xmm10, %xmm10, %xmm10
vpcmpeqd %xmm9, %xmm9, %xmm9
movq 0x38(%rsp), %rsi
movq 0x40(%rsp), %r10
vmovdqa 0x70(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x22afcf
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x22a941
testb $0x2, (%rcx)
jne 0x22a912
testb $0x40, 0x3e(%rbx)
je 0x22a941
leaq 0x80(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0xb0(%rsp), %xmm7
movq 0x8(%rsp), %r11
vxorps %xmm10, %xmm10, %xmm10
vpcmpeqd %xmm9, %xmm9, %xmm9
movq 0x38(%rsp), %rsi
movq 0x40(%rsp), %r10
vpcmpeqd 0x70(%rsp), %xmm10, %xmm1
vpxor %xmm1, %xmm9, %xmm0
movq 0x98(%rsp), %rax
vbroadcastss 0x1cc2228(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x22afd7
vmovaps 0xe0(%rsp), %xmm4
jmp 0x22a6fb
vmovaps 0x1d0(%rsp), %xmm3
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cc1d79(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cc6638(%rip), %xmm3 # 0x1ef0fe8
vmovaps 0x1f0(%rsp), %xmm4
vcmpnltps %xmm3, %xmm4, %xmm3
vandps %xmm2, %xmm3, %xmm2
vmulps 0x20(%rsp), %xmm2, %xmm3
vminps %xmm6, %xmm3, %xmm3
vmulps 0x1e0(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vsubps %xmm3, %xmm6, %xmm4
vsubps %xmm2, %xmm6, %xmm6
vmovaps 0x340(%rsp), %xmm7
vblendvps %xmm7, %xmm4, %xmm3, %xmm3
vblendvps %xmm7, %xmm6, %xmm2, %xmm2
movq 0x8(%r10), %rdx
vmovd %eax, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovd %ecx, %xmm6
vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovaps %xmm12, 0x140(%rsp)
vmovaps %xmm9, 0x150(%rsp)
vmovaps %xmm5, 0x160(%rsp)
vmovaps %xmm3, 0x170(%rsp)
vmovaps %xmm2, 0x180(%rsp)
vmovdqa %xmm6, 0x190(%rsp)
vmovdqa %xmm4, 0x1a0(%rsp)
leaq 0x1b0(%rsp), %rax
vcmptrueps %ymm2, %ymm2, %ymm2
vmovups %ymm2, (%rax)
vbroadcastss (%rdx), %xmm2
vmovaps %xmm2, 0x1b0(%rsp)
vbroadcastss 0x4(%rdx), %xmm2
vmovaps %xmm2, 0x1c0(%rsp)
vblendvps %xmm1, %xmm0, %xmm11, %xmm0
vmovaps %xmm0, 0x80(%r14)
vmovaps %xmm1, 0x70(%rsp)
leaq 0x70(%rsp), %rax
movq %rax, 0x80(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x88(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x90(%rsp)
movq %r14, 0x98(%rsp)
leaq 0x140(%rsp), %rax
movq %rax, 0xa0(%rsp)
movl $0x4, 0xa8(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r11, 0x8(%rsp)
vmovaps %xmm11, 0x10(%rsp)
je 0x22ab0c
leaq 0x80(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x10(%rsp), %xmm11
movq 0x8(%rsp), %r11
movq 0x38(%rsp), %rsi
movq 0x40(%rsp), %r10
vmovdqa 0x70(%rsp), %xmm0
vptest %xmm0, %xmm0
vmovaps 0xe0(%rsp), %xmm4
je 0x22aff8
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x22ab69
testb $0x2, (%rcx)
jne 0x22ab3e
testb $0x40, 0x3e(%rbx)
je 0x22ab69
leaq 0x80(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x10(%rsp), %xmm11
movq 0x8(%rsp), %r11
vmovaps 0xe0(%rsp), %xmm4
movq 0x38(%rsp), %rsi
movq 0x40(%rsp), %r10
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x70(%rsp), %xmm0, %xmm1
vpxor 0x1cc12a5(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x98(%rsp), %rax
vbroadcastss 0x1cc1ff8(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x22b008
movq %r11, 0x8(%rsp)
cmpq $0x0, 0x30(%rsp)
je 0x22ac4e
movq 0xe0(%rbx), %rsi
movq %rsi, 0x10(%rsp)
addq $0x38, %rsi
movq %rsi, 0x20(%rsp)
movq 0x8(%rsp), %rdi
movl (%rdi,%r13,4), %r8d
movq 0x30(%rsp), %r9
movq %rdx, %r10
movslq 0x70(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x10(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0x20(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm1
vmovdqu (%r11,%r8,4), %xmm2
vmovss %xmm1, 0x140(%rsp,%r10,4)
vextractps $0x1, %xmm1, 0x150(%rsp,%r10,4)
vextractps $0x2, %xmm1, 0x160(%rsp,%r10,4)
vmovd %xmm2, 0x80(%rsp,%r10,4)
vpextrd $0x1, %xmm2, 0x90(%rsp,%r10,4)
vpextrd $0x2, %xmm2, 0xa0(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x22abda
vbroadcastss 0x1cc1abd(%rip), %xmm1 # 0x1eec714
vsubps %xmm0, %xmm1, %xmm1
vmulps 0x140(%rsp), %xmm1, %xmm12
vmulps 0x150(%rsp), %xmm1, %xmm14
vmulps 0x160(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x50(%rsp)
vmulps 0x80(%rsp), %xmm0, %xmm2
vmovaps %xmm2, 0xb0(%rsp)
vmulps 0x90(%rsp), %xmm0, %xmm2
vmovaps %xmm2, 0xc0(%rsp)
vmulps 0xa0(%rsp), %xmm0, %xmm4
cmpq $0x0, 0x30(%rsp)
je 0x22ad50
movq 0xe0(%rbx), %rsi
movq %rsi, 0x10(%rsp)
addq $0x38, %rsi
movq %rsi, 0x20(%rsp)
movq 0x8(%rsp), %rdi
movl 0x10(%rdi,%r13,4), %r8d
movq 0x30(%rsp), %r9
movq %rdx, %r10
movslq 0x70(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x10(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0x20(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm5
vmovups (%r11,%r8,4), %xmm6
vmovss %xmm5, 0x140(%rsp,%r10,4)
vextractps $0x1, %xmm5, 0x150(%rsp,%r10,4)
vextractps $0x2, %xmm5, 0x160(%rsp,%r10,4)
vmovss %xmm6, 0x80(%rsp,%r10,4)
vextractps $0x1, %xmm6, 0x90(%rsp,%r10,4)
vextractps $0x2, %xmm6, 0xa0(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x22acdc
vmulps 0x140(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x120(%rsp)
vmulps 0x150(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x110(%rsp)
vmulps 0x160(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x100(%rsp)
vmulps 0x80(%rsp), %xmm0, %xmm11
vmulps 0x90(%rsp), %xmm0, %xmm9
vmulps 0xa0(%rsp), %xmm0, %xmm13
cmpq $0x0, 0x30(%rsp)
je 0x22ae48
movq 0xe0(%rbx), %rsi
movq %rsi, 0x10(%rsp)
addq $0x38, %rsi
movq %rsi, 0x20(%rsp)
movq 0x8(%rsp), %rdi
movl 0x20(%rdi,%r13,4), %r8d
movq 0x30(%rsp), %r9
movq %rdx, %r10
movslq 0x70(%rsp,%r10,4), %r11
imulq $0x38, %r11, %r11
movq 0x10(%rsp), %rsi
movq (%rsi,%r11), %rdi
movq 0x20(%rsp), %rsi
movq (%rsi,%r11), %r11
vmovups (%rdi,%r8,4), %xmm5
vmovups (%r11,%r8,4), %xmm6
vmovss %xmm5, 0x140(%rsp,%r10,4)
vextractps $0x1, %xmm5, 0x150(%rsp,%r10,4)
vextractps $0x2, %xmm5, 0x160(%rsp,%r10,4)
vmovss %xmm6, 0x80(%rsp,%r10,4)
vextractps $0x1, %xmm6, 0x90(%rsp,%r10,4)
vextractps $0x2, %xmm6, 0xa0(%rsp,%r10,4)
btcq %r10, %r9
bsfq %r9, %r10
testq %r9, %r9
jne 0x22add4
vmulps 0x140(%rsp), %xmm1, %xmm5
vmulps 0x150(%rsp), %xmm1, %xmm6
vmulps 0x160(%rsp), %xmm1, %xmm10
vmulps 0x80(%rsp), %xmm0, %xmm7
vmulps 0x90(%rsp), %xmm0, %xmm8
vmulps 0xa0(%rsp), %xmm0, %xmm15
cmpq $0x0, 0x30(%rsp)
je 0x22af0c
movq 0xe0(%rbx), %rsi
leaq 0x38(%rsi), %rdi
movq 0x8(%rsp), %r8
movl 0x30(%r8,%r13,4), %r8d
movq 0x30(%rsp), %r9
movslq 0x70(%rsp,%rdx,4), %r10
imulq $0x38, %r10, %r10
movq (%rsi,%r10), %r11
movq (%rdi,%r10), %r10
vmovups (%r11,%r8,4), %xmm2
vmovups (%r10,%r8,4), %xmm3
vmovss %xmm2, 0x140(%rsp,%rdx,4)
vextractps $0x1, %xmm2, 0x150(%rsp,%rdx,4)
vextractps $0x2, %xmm2, 0x160(%rsp,%rdx,4)
vmovss %xmm3, 0x80(%rsp,%rdx,4)
vextractps $0x1, %xmm3, 0x90(%rsp,%rdx,4)
vextractps $0x2, %xmm3, 0xa0(%rsp,%rdx,4)
btcq %rdx, %r9
bsfq %r9, %rdx
testq %r9, %r9
jne 0x22aea4
vaddps 0xb0(%rsp), %xmm12, %xmm12
vaddps 0xc0(%rsp), %xmm14, %xmm14
vaddps 0x50(%rsp), %xmm4, %xmm4
vaddps 0x120(%rsp), %xmm11, %xmm2
vmovaps %xmm2, 0x20(%rsp)
vaddps 0x110(%rsp), %xmm9, %xmm2
vmovaps %xmm2, 0x10(%rsp)
vaddps 0x100(%rsp), %xmm13, %xmm2
vmovaps %xmm2, 0x50(%rsp)
vaddps %xmm7, %xmm5, %xmm5
vmovaps %xmm5, 0x1e0(%rsp)
vaddps %xmm6, %xmm8, %xmm5
vmovaps %xmm5, 0x1d0(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm5
vmulps 0x150(%rsp), %xmm1, %xmm6
vmulps 0x160(%rsp), %xmm1, %xmm1
vmulps 0x80(%rsp), %xmm0, %xmm7
vaddps %xmm15, %xmm10, %xmm8
vmovaps %xmm8, 0x250(%rsp)
vaddps %xmm7, %xmm5, %xmm8
vmulps 0x90(%rsp), %xmm0, %xmm5
vaddps %xmm5, %xmm6, %xmm6
vmulps 0xa0(%rsp), %xmm0, %xmm0
vaddps %xmm0, %xmm1, %xmm1
movq 0x40(%rsp), %r10
movq 0x38(%rsp), %rsi
movq 0x8(%rsp), %r11
jmp 0x229e4a
vpcmpeqd %xmm0, %xmm10, %xmm0
vpxor %xmm0, %xmm9, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm2
movq 0xd8(%rsp), %rax
vblendvps %xmm0, (%rax), %xmm7, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x22a268
vpcmpeqd 0x1cc0a10(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cc0e18(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm1
movq 0xd8(%rsp), %rax
vblendvps %xmm0, (%rax), %xmm11, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x22a6eb
vmovaps 0x60(%rsp), %xmm1
vandps %xmm4, %xmm1, %xmm0
vtestps %xmm4, %xmm1
movq 0x248(%rsp), %rax
je 0x22b057
incq %rax
vmovaps %xmm0, %xmm4
cmpq 0x240(%rsp), %rax
jb 0x229bee
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm0, %xmm0
movq 0xd0(%rsp), %r8
jmp 0x22b06d
vpcmpeqd %xmm1, %xmm1, %xmm1
vorps 0xf0(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vtestps %xmm1, %xmm0
jb 0x22b0ab
vmovaps 0x330(%rsp), %xmm1
vbroadcastss 0x1cc1aec(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x330(%rsp)
xorl %eax, %eax
jmp 0x22b0ae
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x2298b3
jmp 0x22b18d
pushq $0x2
jmp 0x22b0ad
vmovaps %xmm1, 0x20(%rsp)
movq %r8, 0xd0(%rsp)
movq 0x138(%rsp), %rdi
movq %r10, 0x40(%rsp)
movq %rsi, 0x38(%rsp)
bsfq %r13, %rcx
movq %rbp, %rdx
movq %rcx, 0x60(%rsp)
leaq 0x4f(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x268(%rsp), %rax
pushq %rax
vzeroupper
callq 0x26e66c
popq %rcx
popq %rdx
testb %al, %al
je 0x22b11a
movq 0x60(%rsp), %rax
orl $-0x1, 0xf0(%rsp,%rax,4)
leaq -0x1(%r13), %rax
andq %rax, %r13
movq 0x40(%rsp), %r10
movq 0x138(%rsp), %rdi
movq 0x38(%rsp), %rsi
jne 0x22b0e0
vmovaps 0xf0(%rsp), %xmm0
vtestps 0x1cc0cd9(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0xd0(%rsp), %r8
vmovaps 0x20(%rsp), %xmm1
jb 0x2298f9
vmovaps 0x330(%rsp), %xmm1
vbroadcastss 0x1cc1a14(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm0, 0x330(%rsp)
pushq $0x2
popq %rax
jmp 0x2298f9
vmovaps 0x360(%rsp), %xmm0
vandps 0xf0(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc19dc(%rip), %xmm1 # 0x1eecb84
movq 0xd8(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x1a88, %rsp # imm = 0x1A88
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 257, false, embree::avx::VirtualCurveIntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18e8, %rsp # imm = 0x18E8
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x22bfbc
movq %rdx, %r9
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm4, %xmm4, %xmm4
vcmpnltps %xmm4, %xmm0, %xmm2
vandps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x60(%rsp)
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x22bfbc
movq %rcx, %r10
leaq 0x80(%r9), %r11
movzbl %al, %eax
leaq 0x180(%rsp), %rcx
vmovaps 0x40(%r9), %xmm2
vmovaps 0x50(%r9), %xmm3
vmovaps 0x60(%r9), %xmm1
vmulps %xmm1, %xmm1, %xmm5
vmulps %xmm3, %xmm3, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vmulps %xmm2, %xmm2, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vrsqrtps %xmm5, %xmm6
vbroadcastss 0x1cc10ff(%rip), %xmm7 # 0x1eec718
vmulps %xmm7, %xmm6, %xmm7
vbroadcastss 0x1cc10f6(%rip), %xmm8 # 0x1eec71c
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vmulps %xmm6, %xmm6, %xmm6
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmovaps %xmm5, -0x10(%rcx)
vbroadcastss 0x1cf5878(%rip), %xmm15 # 0x1f20ec0
vmovss 0x1cc10c8(%rip), %xmm5 # 0x1eec718
vmovss 0x1cc1528(%rip), %xmm6 # 0x1eecb80
bsfq %rax, %rdx
vmovss 0x40(%r9,%rdx,4), %xmm7
vinsertps $0x1c, 0x50(%r9,%rdx,4), %xmm7, %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r9,%rdx,4), %xmm7, %xmm8 # xmm8 = xmm7[0,1],mem[0],zero
vbroadcastss 0x170(%rsp,%rdx,4), %xmm7
vmulps %xmm7, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vmovshdup %xmm8, %xmm10 # xmm10 = xmm8[1,1,3,3]
vxorps %xmm15, %xmm10, %xmm10
vunpckhps %xmm4, %xmm8, %xmm11 # xmm11 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
vmovss %xmm10, %xmm4, %xmm10 # xmm10 = xmm10[0],xmm4[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm15, %xmm9, %xmm9
vinsertps $0x2a, %xmm8, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm8[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm11, %xmm12, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vblendvps %xmm11, %xmm10, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm5, %xmm11, %xmm12
vmulss %xmm6, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm10, %xmm10
vsubss %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm10, %xmm8, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
leaq -0x1(%rax), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm5, %xmm12, %xmm13
vmulss %xmm6, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm10
vmulps %xmm7, %xmm8, %xmm7
vunpcklps %xmm7, %xmm9, %xmm8 # xmm8 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
vunpckhps %xmm7, %xmm9, %xmm7 # xmm7 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
vunpcklps %xmm4, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
vunpckhps %xmm4, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
vunpcklps %xmm10, %xmm7, %xmm7 # xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1]
vunpcklps %xmm9, %xmm8, %xmm10 # xmm10 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
vunpckhps %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
vmovaps %xmm10, (%rcx,%rdx)
vmovaps %xmm8, 0x10(%rcx,%rdx)
vmovaps %xmm7, 0x20(%rcx,%rdx)
andq %rdi, %rax
jne 0x22b658
vbroadcastss 0x1cf5728(%rip), %xmm9 # 0x1f20ec4
vbroadcastss 0x1cc5843(%rip), %xmm10 # 0x1ef0fe8
vandps %xmm3, %xmm9, %xmm4
vcmpltps %xmm10, %xmm4, %xmm4
vblendvps %xmm4, %xmm10, %xmm3, %xmm4
vbroadcastss 0x1cc0f56(%rip), %xmm11 # 0x1eec714
vrcpps %xmm4, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vsubps %xmm4, %xmm11, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm5, %xmm5
vxorps %xmm4, %xmm4, %xmm4
vcmpnltps %xmm4, %xmm5, %xmm6
vbroadcastss 0x1d291bc(%rip), %xmm7 # 0x1f549a0
vbroadcastss 0x1d2f17b(%rip), %xmm8 # 0x1f5a968
vblendvps %xmm6, %xmm7, %xmm8, %xmm6
vmovaps (%r9), %xmm7
vmovaps %xmm7, 0x80(%rsp)
vmovaps 0x10(%r9), %xmm7
vmovaps %xmm7, 0x90(%rsp)
vmovdqa 0x20(%r9), %xmm7
vmovdqa %xmm7, 0xa0(%rsp)
vmovaps %xmm2, 0xb0(%rsp)
vmovaps %xmm3, 0xc0(%rsp)
vmovaps %xmm1, 0xd0(%rsp)
vandps %xmm2, %xmm9, %xmm3
vcmpltps %xmm10, %xmm3, %xmm3
vblendvps %xmm3, %xmm10, %xmm2, %xmm2
vandps %xmm1, %xmm9, %xmm3
vcmpltps %xmm10, %xmm3, %xmm3
vblendvps %xmm3, %xmm10, %xmm1, %xmm1
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vsubps %xmm2, %xmm11, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vrcpps %xmm1, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vsubps %xmm1, %xmm11, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vmovaps %xmm2, 0xe0(%rsp)
vmovaps %xmm5, 0xf0(%rsp)
vmovaps %xmm1, 0x100(%rsp)
vcmpltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d2f0b9(%rip), %xmm3 # 0x1f5a964
vandps %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 0x110(%rsp)
vmovaps %xmm6, 0x120(%rsp)
vcmpnltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d2f09d(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d2f098(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0x130(%rsp)
vmovaps 0x30(%r9), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm0, %xmm2
vbroadcastss 0x1cc0122(%rip), %xmm10 # 0x1eeba20
vmovaps 0x60(%rsp), %xmm3
vblendvps %xmm3, %xmm1, %xmm10, %xmm0
vmovaps %xmm0, 0x140(%rsp)
vbroadcastss 0x1cc1268(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x150(%rsp)
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0x30(%rsp)
cmpq $0x0, 0x8(%r10)
je 0x22b954
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r15d, %r15d
cmpb $0x1, %al
adcq $0x2, %r15
jmp 0x22b958
pushq $0x3
popq %r15
leaq 0x250(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0x9f0(%rsp), %rbx
vmovaps %xmm10, -0x20(%rbx)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm0, -0x10(%rbx)
leaq 0x1f245f5(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x160(%rsp)
movq %r10, 0x48(%rsp)
movq %r9, 0x40(%rsp)
addq $-0x10, %rbx
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x22bec7
vmovaps (%rbx), %xmm11
vcmpltps 0x150(%rsp), %xmm11, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22becb
movzbl %al, %r14d
popcntl %r14d, %r13d
xorl %eax, %eax
cmpq %r15, %r13
jbe 0x22bed0
cmpq %r15, %r13
jbe 0x22beb9
testb $0x8, %bpl
pushq $0x8
popq %r14
jne 0x22bda1
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %r14, %rbp
vmovaps %xmm10, %xmm11
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x22bb2d
testb $0x7, %al
jne 0x22bb4d
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x80(%rsp), %xmm3
vmovaps 0x90(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm5
vmovaps 0xe0(%rsp), %xmm6
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm4, %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm7
vmulps %xmm0, %xmm7, %xmm1
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm5, %xmm0, %xmm0
vmovaps 0x100(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm0
vbroadcastss 0x30(%rax,%rdx,4), %xmm9
vsubps %xmm3, %xmm9, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vbroadcastss 0x50(%rax,%rdx,4), %xmm6
vsubps %xmm4, %xmm6, %xmm4
vmulps %xmm4, %xmm7, %xmm4
vbroadcastss 0x70(%rax,%rdx,4), %xmm6
vsubps %xmm5, %xmm6, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vpminsd %xmm3, %xmm2, %xmm6
vpminsd %xmm4, %xmm1, %xmm7
vpmaxsd %xmm7, %xmm6, %xmm6
vpminsd %xmm5, %xmm0, %xmm7
vpmaxsd %xmm7, %xmm6, %xmm6
vpmaxsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm1, %xmm1
vpminsd %xmm1, %xmm2, %xmm1
vpmaxsd %xmm5, %xmm0, %xmm0
vpminsd %xmm0, %xmm1, %xmm0
vpmaxsd 0x140(%rsp), %xmm6, %xmm1
vpminsd 0x150(%rsp), %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
vtestps %xmm0, %xmm0
je 0x22bb2d
vblendvps %xmm0, %xmm6, %xmm10, %xmm0
cmpq $0x8, %rbp
je 0x22bb26
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm11, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm11
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x22bd50
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x22ba12
jmp 0x22bd50
vbroadcastss 0x20(%rcx,%rdx,4), %xmm2
vbroadcastss 0x30(%rcx,%rdx,4), %xmm1
vbroadcastss 0x40(%rcx,%rdx,4), %xmm0
vbroadcastss 0x50(%rcx,%rdx,4), %xmm5
vbroadcastss 0x60(%rcx,%rdx,4), %xmm4
vbroadcastss 0x70(%rcx,%rdx,4), %xmm3
vbroadcastss 0x80(%rcx,%rdx,4), %xmm8
vbroadcastss 0x90(%rcx,%rdx,4), %xmm7
vbroadcastss 0xa0(%rcx,%rdx,4), %xmm6
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmovaps %xmm11, 0x20(%rsp)
vmulps %xmm10, %xmm8, %xmm11
vmulps %xmm7, %xmm10, %xmm12
vmulps %xmm5, %xmm9, %xmm13
vaddps %xmm11, %xmm13, %xmm11
vmulps %xmm4, %xmm9, %xmm13
vaddps %xmm12, %xmm13, %xmm12
vmovaps 0xb0(%rsp), %xmm13
vmulps %xmm6, %xmm10, %xmm10
vmulps %xmm3, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm2, %xmm13, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm1, %xmm13, %xmm11
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm0, %xmm13, %xmm12
vaddps %xmm9, %xmm12, %xmm9
vbroadcastss 0x1cf52c2(%rip), %xmm13 # 0x1f20ec4
vandps %xmm13, %xmm10, %xmm12
vbroadcastss 0x1cc53d8(%rip), %xmm14 # 0x1ef0fe8
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm10, %xmm10
vandps %xmm13, %xmm11, %xmm12
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm11, %xmm11
vandps %xmm13, %xmm9, %xmm12
vcmpltps %xmm14, %xmm12, %xmm12
vblendvps %xmm12, %xmm14, %xmm9, %xmm12
vbroadcastss 0xb0(%rcx,%rdx,4), %xmm13
vrcpps %xmm10, %xmm9
vmulps %xmm10, %xmm9, %xmm10
vbroadcastss 0x1cc0ab9(%rip), %xmm14 # 0x1eec714
vsubps %xmm10, %xmm14, %xmm10
vmulps %xmm10, %xmm9, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vrcpps %xmm11, %xmm10
vmulps %xmm11, %xmm10, %xmm11
vsubps %xmm11, %xmm14, %xmm11
vmulps %xmm11, %xmm10, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vrcpps %xmm12, %xmm11
vmulps %xmm12, %xmm11, %xmm12
vsubps %xmm12, %xmm14, %xmm12
vmulps %xmm12, %xmm11, %xmm12
vaddps %xmm12, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm12
vmulps %xmm12, %xmm8, %xmm8
vaddps %xmm8, %xmm13, %xmm8
vbroadcastss 0xc0(%rcx,%rdx,4), %xmm13
vmulps %xmm7, %xmm12, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmulps %xmm6, %xmm12, %xmm6
vbroadcastss 0xd0(%rcx,%rdx,4), %xmm12
vaddps %xmm6, %xmm12, %xmm6
vmovaps 0x90(%rsp), %xmm12
vmulps %xmm5, %xmm12, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vmulps %xmm4, %xmm12, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm3, %xmm12, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vmovaps 0x80(%rsp), %xmm6
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vxorps %xmm15, %xmm9, %xmm5
vmulps %xmm5, %xmm2, %xmm2
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm4, %xmm1, %xmm1
vxorps %xmm15, %xmm10, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vmulps %xmm6, %xmm0, %xmm0
vaddps %xmm3, %xmm0, %xmm0
vxorps %xmm15, %xmm11, %xmm3
vmulps %xmm3, %xmm0, %xmm0
vaddps %xmm2, %xmm9, %xmm3
vaddps %xmm1, %xmm10, %xmm4
vbroadcastss 0x1cbfcdf(%rip), %xmm10 # 0x1eeba20
vaddps %xmm0, %xmm11, %xmm5
vmovaps 0x20(%rsp), %xmm11
jmp 0x22bab8
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x22bd9a
vmovaps 0x150(%rsp), %xmm0
vcmpnleps %xmm11, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r15
jae 0x22bd8a
testb %cl, %cl
je 0x22beb9
testb $0x8, %bpl
je 0x22b9fe
jmp 0x22bda1
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm11, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x22bd76
cmpq $-0x8, %rbp
je 0x22bec7
movq %r11, 0x8(%rsp)
movq %r8, 0x10(%rsp)
vmovaps 0x150(%rsp), %xmm0
vcmpnleps %xmm11, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22bf9a
andq $-0x10, %rbp
vmovaps 0x30(%rsp), %xmm1
movzbl (%rbp), %eax
movq %rsi, 0x18(%rsp)
movq 0x8(%rsi), %rcx
vmovaps 0x160(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps %xmm1, 0x50(%rsp)
vmovmskps %xmm1, %edx
xorl $0xf, %edx
je 0x22be53
movzbl %dl, %r14d
shll $0x6, %eax
addq %rcx, %rax
addq $0x18, %rax
movq %rax, 0x20(%rsp)
bsfq %r14, %r13
leaq 0x170(%rsp), %rdi
movq %r9, %rsi
movq %r13, %rdx
movq %r10, %rcx
movq %rbp, %r8
movq 0x20(%rsp), %rax
callq *(%rax)
testb %al, %al
je 0x22be40
orl $-0x1, 0x70(%rsp,%r13,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r9
jne 0x22be17
vmovaps 0x50(%rsp), %xmm0
vorps 0x70(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vtestps 0x1cbffb2(%rip), %xmm0 # 0x1eebe20
jb 0x22be95
vmovaps 0x150(%rsp), %xmm1
vbroadcastss 0x1cc0d02(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x150(%rsp)
xorl %eax, %eax
jmp 0x22be98
pushq $0x3
popq %rax
movq 0x18(%rsp), %rsi
movq 0x10(%rsp), %r8
movq 0x8(%rsp), %r11
vbroadcastss 0x1cf5010(%rip), %xmm15 # 0x1f20ec0
vbroadcastss 0x1cbfb67(%rip), %xmm10 # 0x1eeba20
cmpl $0x3, %eax
jne 0x22b9a2
jmp 0x22bfa2
pushq $0x3
jmp 0x22becd
pushq $0x2
popq %rax
jmp 0x22beb9
vmovaps %xmm11, 0x20(%rsp)
movq %r11, 0x8(%rsp)
movq %rsi, 0x18(%rsp)
movq %r8, 0x10(%rsp)
bsfq %r14, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x50(%rsp)
leaq 0x170(%rsp), %r8
pushq %r10
leaq 0x88(%rsp), %rax
pushq %rax
callq 0x27004e
popq %rcx
popq %rdx
testb %al, %al
je 0x22bf1f
movq 0x50(%rsp), %rax
orl $-0x1, 0x30(%rsp,%rax,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r9
movq 0x18(%rsp), %rsi
movq 0x10(%rsp), %r8
jne 0x22bee5
vmovaps 0x30(%rsp), %xmm0
vtestps 0x1cbfed5(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x8(%rsp), %r11
vbroadcastss 0x1cf4f64(%rip), %xmm15 # 0x1f20ec0
vbroadcastss 0x1cbfabb(%rip), %xmm10 # 0x1eeba20
vmovaps 0x20(%rsp), %xmm11
jb 0x22b9e7
vmovaps 0x150(%rsp), %xmm1
vbroadcastss 0x1cc0c01(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x150(%rsp)
pushq $0x2
popq %rax
jmp 0x22b9e7
pushq $0x2
popq %rax
jmp 0x22be9d
vmovaps 0x60(%rsp), %xmm0
vandps 0x30(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cc0bcd(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r11)
addq $0x18e8, %rsp # imm = 0x18E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16781328, false, embree::avx::VirtualCurveIntersectorK<4>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq (%rsi), %rbx
cmpq $0x8, 0x70(%rbx)
je 0x22c372
vmovdqa (%rdi), %xmm3
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd %xmm0, %xmm3, %xmm0
vmovmskps %xmm0, %ebp
testl %ebp, %ebp
je 0x22c372
movq %rcx, %r13
movq %rdx, %r14
movq %rsi, %r12
leaq 0x10(%rsp), %rax
vmovaps 0x40(%rdx), %xmm2
vmovaps 0x50(%rdx), %xmm1
vmovaps 0x60(%rdx), %xmm0
vmulps %xmm0, %xmm0, %xmm4
vmulps %xmm1, %xmm1, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm2, %xmm2, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vrsqrtps %xmm4, %xmm5
vbroadcastss 0x1cc06d5(%rip), %xmm6 # 0x1eec718
vmulps %xmm6, %xmm5, %xmm6
vbroadcastss 0x1cc06cc(%rip), %xmm7 # 0x1eec71c
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vmulps %xmm5, %xmm5, %xmm5
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovaps %xmm4, -0x10(%rax)
vbroadcastss 0x1cf4e4e(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm5, %xmm5, %xmm5
vmovss 0x1cc069a(%rip), %xmm6 # 0x1eec718
vmovss 0x1cc0afa(%rip), %xmm7 # 0x1eecb80
movq %rbp, %rcx
bsfq %rcx, %rdx
vmovss 0x40(%r14,%rdx,4), %xmm8
vinsertps $0x1c, 0x50(%r14,%rdx,4), %xmm8, %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r14,%rdx,4), %xmm8, %xmm9 # xmm9 = xmm8[0,1],mem[0],zero
vbroadcastss (%rsp,%rdx,4), %xmm8
vmulps %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmovshdup %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
vxorps %xmm4, %xmm11, %xmm11
vunpckhps %xmm5, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
vmovss %xmm11, %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm4, %xmm10, %xmm10
vinsertps $0x2a, %xmm9, %xmm10, %xmm10 # xmm10 = xmm10[0],zero,xmm9[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm10, %xmm13
vcmpltps %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vblendvps %xmm12, %xmm11, %xmm10, %xmm10
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm6, %xmm12, %xmm13
vmulss %xmm7, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,2,0,3]
vmulps %xmm10, %xmm12, %xmm12
vmulps %xmm11, %xmm9, %xmm11
vsubps %xmm12, %xmm11, %xmm11
vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
vdpps $0x7f, %xmm11, %xmm11, %xmm12
leaq -0x1(%rcx), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss %xmm6, %xmm13, %xmm14
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vsubss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps %xmm9, %xmm8, %xmm8
vunpcklps %xmm8, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
vunpcklps %xmm5, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
vunpckhps %xmm5, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
vunpcklps %xmm11, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vmovaps %xmm11, (%rax,%rdx)
vmovaps %xmm9, 0x10(%rax,%rdx)
vmovaps %xmm8, 0x20(%rax,%rdx)
andq %rdi, %rcx
jne 0x22c089
vpcmpeqd %xmm4, %xmm4, %xmm4
vpcmpeqd %xmm4, %xmm3, %xmm3
vmovaps (%r14), %xmm4
leaq 0xd0(%rsp), %rax
vmovaps %xmm4, (%rax)
vmovaps 0x10(%r14), %xmm4
vmovaps %xmm4, 0x10(%rax)
vmovaps 0x20(%r14), %xmm4
vmovaps %xmm4, 0x20(%rax)
vmovaps %xmm2, 0x30(%rax)
vmovaps %xmm1, 0x40(%rax)
vmovaps %xmm0, 0x50(%rax)
vbroadcastss 0x1cf4cbb(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm5
vbroadcastss 0x1cc4dd2(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm1, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm0, %xmm0
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cc04c4(%rip), %xmm5 # 0x1eec714
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm0, %xmm4
vmulps %xmm4, %xmm0, %xmm0
vsubps %xmm0, %xmm5, %xmm0
vmulps %xmm0, %xmm4, %xmm0
vaddps %xmm0, %xmm4, %xmm0
vmovaps %xmm2, 0x60(%rax)
vmovaps %xmm1, 0x70(%rax)
vmovaps %xmm0, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d2e6bc(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm2, %xmm2
vmovaps %xmm2, 0x90(%rax)
vcmpnltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d286de(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d2e69d(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm0, %xmm0
vbroadcastss 0x1d2e685(%rip), %xmm1 # 0x1f5a96c
vbroadcastss 0x1d2e680(%rip), %xmm2 # 0x1f5a970
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vmovaps %xmm0, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm0
vmovaps 0x80(%r14), %xmm1
vmaxps %xmm4, %xmm0, %xmm0
vmaxps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cbf702(%rip), %xmm2 # 0x1eeba20
vblendvps %xmm3, %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0xc0(%rax)
vbroadcastss 0x1cc084f(%rip), %xmm0 # 0x1eecb84
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r15
movq 0x70(%rbx), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movq %rsp, %r8
movq %r14, %r9
pushq %r13
leaq 0xd8(%rsp), %rax
pushq %rax
callq 0x2705b4
popq %rax
popq %rcx
andq %r15, %rbp
jne 0x22c343
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16781328, false, embree::avx::VirtualCurveIntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1938, %rsp # imm = 0x1938
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x22cfa4
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm4, %xmm4, %xmm4
vcmpnltps %xmm4, %xmm0, %xmm2
vandps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x70(%rsp)
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x22cfa4
movq %rcx, %r10
leaq 0x80(%r14), %r9
movzbl %al, %eax
leaq 0x1d0(%rsp), %rcx
vmovaps 0x40(%r14), %xmm2
vmovaps 0x50(%r14), %xmm3
vmovaps 0x60(%r14), %xmm1
vmulps %xmm1, %xmm1, %xmm5
vmulps %xmm3, %xmm3, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vmulps %xmm2, %xmm2, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vrsqrtps %xmm5, %xmm6
vbroadcastss 0x1cc02fb(%rip), %xmm7 # 0x1eec718
vmulps %xmm7, %xmm6, %xmm7
vbroadcastss 0x1cc02f2(%rip), %xmm8 # 0x1eec71c
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vmulps %xmm6, %xmm6, %xmm6
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmovaps %xmm5, -0x10(%rcx)
vbroadcastss 0x1cf4a74(%rip), %xmm5 # 0x1f20ec0
vmovss 0x1cc02c4(%rip), %xmm6 # 0x1eec718
vmovss 0x1cc0724(%rip), %xmm7 # 0x1eecb80
bsfq %rax, %rdx
vmovss 0x40(%r14,%rdx,4), %xmm8
vinsertps $0x1c, 0x50(%r14,%rdx,4), %xmm8, %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r14,%rdx,4), %xmm8, %xmm9 # xmm9 = xmm8[0,1],mem[0],zero
vbroadcastss 0x1c0(%rsp,%rdx,4), %xmm8
vmulps %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmovshdup %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
vxorps %xmm5, %xmm11, %xmm11
vunpckhps %xmm4, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vmovss %xmm11, %xmm4, %xmm11 # xmm11 = xmm11[0],xmm4[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm5, %xmm10, %xmm10
vinsertps $0x2a, %xmm9, %xmm10, %xmm10 # xmm10 = xmm10[0],zero,xmm9[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm10, %xmm13
vcmpltps %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vblendvps %xmm12, %xmm11, %xmm10, %xmm10
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm6, %xmm12, %xmm13
vmulss %xmm7, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,2,0,3]
vmulps %xmm10, %xmm12, %xmm12
vmulps %xmm11, %xmm9, %xmm11
vsubps %xmm12, %xmm11, %xmm11
vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
vdpps $0x7f, %xmm11, %xmm11, %xmm12
leaq -0x1(%rax), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss %xmm6, %xmm13, %xmm14
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vsubss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps %xmm9, %xmm8, %xmm8
vunpcklps %xmm8, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
vunpcklps %xmm4, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
vunpckhps %xmm4, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
vunpcklps %xmm11, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vmovaps %xmm11, (%rcx,%rdx)
vmovaps %xmm9, 0x10(%rcx,%rdx)
vmovaps %xmm8, 0x20(%rcx,%rdx)
andq %rdi, %rax
jne 0x22c45c
vbroadcastss 0x1cf4922(%rip), %xmm8 # 0x1f20ec4
vbroadcastss 0x1cc4a3d(%rip), %xmm9 # 0x1ef0fe8
vandps %xmm3, %xmm8, %xmm4
vcmpltps %xmm9, %xmm4, %xmm4
vblendvps %xmm4, %xmm9, %xmm3, %xmm4
vbroadcastss 0x1cc0150(%rip), %xmm10 # 0x1eec714
vrcpps %xmm4, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vsubps %xmm4, %xmm10, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm5, %xmm4
vxorps %xmm11, %xmm11, %xmm11
vcmpnltps %xmm11, %xmm4, %xmm5
vbroadcastss 0x1d283b4(%rip), %xmm6 # 0x1f549a0
vbroadcastss 0x1d2e373(%rip), %xmm7 # 0x1f5a968
vblendvps %xmm5, %xmm6, %xmm7, %xmm5
vmovaps (%r14), %xmm6
vmovaps %xmm6, 0x90(%rsp)
vmovaps 0x10(%r14), %xmm6
vmovaps %xmm6, 0xa0(%rsp)
vmovdqa 0x20(%r14), %xmm6
vmovdqa %xmm6, 0xb0(%rsp)
vmovaps %xmm2, 0xc0(%rsp)
vmovaps %xmm3, 0xd0(%rsp)
vmovaps %xmm1, 0xe0(%rsp)
vandps %xmm2, %xmm8, %xmm3
vcmpltps %xmm9, %xmm3, %xmm3
vblendvps %xmm3, %xmm9, %xmm2, %xmm2
vandps %xmm1, %xmm8, %xmm3
vcmpltps %xmm9, %xmm3, %xmm3
vblendvps %xmm3, %xmm9, %xmm1, %xmm1
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vsubps %xmm2, %xmm10, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vrcpps %xmm1, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vsubps %xmm1, %xmm10, %xmm1
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm3, %xmm1
vmovaps %xmm2, 0xf0(%rsp)
vmovaps %xmm4, 0x100(%rsp)
vmovaps %xmm1, 0x110(%rsp)
vcmpltps %xmm11, %xmm2, %xmm2
vbroadcastss 0x1d2e2b0(%rip), %xmm3 # 0x1f5a964
vandps %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 0x120(%rsp)
vmovaps %xmm5, 0x130(%rsp)
vcmpnltps %xmm11, %xmm1, %xmm1
vbroadcastss 0x1d2e293(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d2e28e(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0x140(%rsp)
vmovaps 0x30(%r14), %xmm1
vmaxps %xmm11, %xmm1, %xmm1
vmaxps %xmm11, %xmm0, %xmm2
vbroadcastss 0x1cbf316(%rip), %xmm14 # 0x1eeba20
vmovaps 0x70(%rsp), %xmm3
vblendvps %xmm3, %xmm1, %xmm14, %xmm0
vmovaps %xmm0, 0x150(%rsp)
vbroadcastss 0x1cc045c(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x160(%rsp)
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0x50(%rsp)
cmpq $0x0, 0x8(%r10)
je 0x22c760
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r11d, %r11d
cmpb $0x1, %al
adcq $0x2, %r11
jmp 0x22c764
pushq $0x3
popq %r11
leaq 0x2a0(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0xa40(%rsp), %rbx
vmovaps %xmm14, -0x20(%rbx)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm0, -0x10(%rbx)
leaq 0x1f237e9(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x170(%rsp)
movq %r10, 0x68(%rsp)
addq $-0x10, %rbx
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x22ceb2
vmovaps (%rbx), %xmm15
vcmpltps 0x160(%rsp), %xmm15, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22ceb6
movzbl %al, %r15d
popcntl %r15d, %r13d
xorl %eax, %eax
cmpq %r11, %r13
jbe 0x22cebb
cmpq %r11, %r13
jbe 0x22cea4
pushq $0x8
popq %r15
testb $0x8, %bpl
jne 0x22cd89
vmovaps 0x160(%rsp), %xmm0
movq %rbp, %rax
andq $-0x10, %rax
movl %ebp, %ecx
andl $0x7, %ecx
vcmpnleps %xmm15, %xmm0, %xmm0
vmovaps %xmm0, 0x40(%rsp)
xorl %edx, %edx
movq %r15, %rbp
vmovaps %xmm14, %xmm15
movq (%rax,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x22cd28
cmpl $0x6, %ecx
je 0x22c84c
cmpl $0x1, %ecx
jne 0x22c9d1
vbroadcastss 0x80(%rax,%rdx,4), %xmm0
vbroadcastss 0x20(%rax,%rdx,4), %xmm2
vmovaps 0x70(%r14), %xmm1
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa0(%rax,%rdx,4), %xmm2
vbroadcastss 0x40(%rax,%rdx,4), %xmm3
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xc0(%rax,%rdx,4), %xmm3
vbroadcastss 0x60(%rax,%rdx,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0x90(%rax,%rdx,4), %xmm4
vbroadcastss 0x30(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0xb0(%rax,%rdx,4), %xmm5
vbroadcastss 0x50(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xd0(%rax,%rdx,4), %xmm6
vbroadcastss 0x70(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vmovaps 0x90(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm10
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vsubps %xmm8, %xmm2, %xmm2
vmovaps 0x100(%rsp), %xmm11
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x110(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm7, %xmm4, %xmm2
vmulps %xmm2, %xmm10, %xmm4
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vpminsd %xmm4, %xmm0, %xmm2
vpminsd %xmm5, %xmm12, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm5, %xmm12, %xmm4
vpminsd %xmm4, %xmm0, %xmm4
vpmaxsd %xmm3, %xmm13, %xmm3
vpmaxsd 0x150(%rsp), %xmm2, %xmm0
vpminsd %xmm3, %xmm4, %xmm3
vpminsd 0x160(%rsp), %xmm3, %xmm3
vcmpleps %xmm3, %xmm0, %xmm0
cmpl $0x6, %ecx
jne 0x22ccf3
vbroadcastss 0xe0(%rax,%rdx,4), %xmm3
vcmpleps %xmm1, %xmm3, %xmm3
vbroadcastss 0xf0(%rax,%rdx,4), %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vandps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
jmp 0x22ccf3
vbroadcastss 0x50(%rax,%rdx,4), %xmm8
vmovaps %xmm8, 0x180(%rsp)
vbroadcastss 0x60(%rax,%rdx,4), %xmm4
vbroadcastss 0x70(%rax,%rdx,4), %xmm5
vbroadcastss 0x80(%rax,%rdx,4), %xmm11
vbroadcastss 0x90(%rax,%rdx,4), %xmm10
vbroadcastss 0xe0(%rax,%rdx,4), %xmm0
vbroadcastss 0xf0(%rax,%rdx,4), %xmm1
vbroadcastss 0x100(%rax,%rdx,4), %xmm2
vbroadcastss 0x110(%rax,%rdx,4), %xmm6
vbroadcastss 0x120(%rax,%rdx,4), %xmm7
vbroadcastss 0x130(%rax,%rdx,4), %xmm9
vmovaps 0x70(%r14), %xmm12
vbroadcastss 0x1cbfcc6(%rip), %xmm3 # 0x1eec714
vsubps %xmm12, %xmm3, %xmm13
vmulps %xmm0, %xmm12, %xmm0
vmulps %xmm1, %xmm12, %xmm1
vmulps %xmm2, %xmm12, %xmm2
vmulps 0x1cbefa9(%rip), %xmm13, %xmm14 # 0x1eeba10
vaddps %xmm0, %xmm14, %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vaddps %xmm1, %xmm14, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vaddps %xmm2, %xmm14, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmulps %xmm6, %xmm12, %xmm0
vmulps %xmm7, %xmm12, %xmm1
vmulps %xmm9, %xmm12, %xmm2
vaddps %xmm0, %xmm13, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vaddps %xmm1, %xmm13, %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vaddps %xmm2, %xmm13, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps 0xe0(%rsp), %xmm1
vmulps %xmm1, %xmm11, %xmm2
vmulps %xmm1, %xmm10, %xmm12
vmulps %xmm0, %xmm8, %xmm13
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm0, %xmm4, %xmm13
vmovaps %xmm4, %xmm9
vaddps %xmm12, %xmm13, %xmm14
vmovaps %xmm15, %xmm4
vbroadcastss 0xa0(%rax,%rdx,4), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x20(%rax,%rdx,4), %xmm12
vmovaps 0xc0(%rsp), %xmm1
vmulps %xmm1, %xmm12, %xmm13
vaddps %xmm2, %xmm13, %xmm2
vbroadcastss 0x30(%rax,%rdx,4), %xmm13
vmulps %xmm1, %xmm13, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vbroadcastss 0x40(%rax,%rdx,4), %xmm14
vmulps %xmm1, %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cf4383(%rip), %xmm7 # 0x1f20ec4
vandps %xmm7, %xmm2, %xmm1
vbroadcastss 0x1cc449a(%rip), %xmm8 # 0x1ef0fe8
vcmpltps %xmm8, %xmm1, %xmm1
vblendvps %xmm1, %xmm8, %xmm2, %xmm1
vandps %xmm7, %xmm6, %xmm2
vcmpltps %xmm8, %xmm2, %xmm2
vblendvps %xmm2, %xmm8, %xmm6, %xmm6
vandps %xmm7, %xmm0, %xmm2
vcmpltps %xmm8, %xmm2, %xmm2
vblendvps %xmm2, %xmm8, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vsubps %xmm1, %xmm3, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm2, %xmm2
vrcpps %xmm6, %xmm1
vmulps %xmm6, %xmm1, %xmm6
vsubps %xmm6, %xmm3, %xmm6
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm0, %xmm6
vmulps %xmm0, %xmm6, %xmm0
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm6, %xmm0
vmovaps 0xb0(%rsp), %xmm6
vmulps %xmm6, %xmm11, %xmm11
vbroadcastss 0xb0(%rax,%rdx,4), %xmm7
vaddps %xmm7, %xmm11, %xmm7
vmulps %xmm6, %xmm10, %xmm10
vbroadcastss 0xc0(%rax,%rdx,4), %xmm11
vaddps %xmm10, %xmm11, %xmm10
vmulps %xmm6, %xmm15, %xmm6
vmovaps %xmm4, %xmm15
vbroadcastss 0xd0(%rax,%rdx,4), %xmm11
vaddps %xmm6, %xmm11, %xmm6
vmovaps 0xa0(%rsp), %xmm11
vmulps 0x180(%rsp), %xmm11, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm11, %xmm9, %xmm3
vaddps %xmm3, %xmm10, %xmm3
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vmovaps 0x90(%rsp), %xmm6
vmulps %xmm6, %xmm12, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm6, %xmm13, %xmm7
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss 0x1cbedd9(%rip), %xmm14 # 0x1eeba20
vaddps %xmm5, %xmm6, %xmm5
vmovaps 0x1a0(%rsp), %xmm6
vsubps %xmm4, %xmm6, %xmm6
vmovaps 0x190(%rsp), %xmm7
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm6, %xmm2, %xmm6
vmulps %xmm4, %xmm2, %xmm4
vmovaps 0x10(%rsp), %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmovaps 0x1b0(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm2, %xmm1, %xmm7
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x20(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm3
vmovaps 0x30(%rsp), %xmm2
vsubps %xmm5, %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vpminsd %xmm4, %xmm6, %xmm2
vpminsd %xmm1, %xmm7, %xmm5
vpmaxsd %xmm5, %xmm2, %xmm2
vpminsd %xmm0, %xmm3, %xmm5
vpmaxsd %xmm5, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm6, %xmm4
vpmaxsd %xmm1, %xmm7, %xmm1
vpminsd %xmm1, %xmm4, %xmm1
vpmaxsd %xmm0, %xmm3, %xmm0
vpmaxsd 0x150(%rsp), %xmm2, %xmm3
vpminsd %xmm0, %xmm1, %xmm0
vpminsd 0x160(%rsp), %xmm0, %xmm0
vcmpleps %xmm0, %xmm3, %xmm0
vandps 0x40(%rsp), %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22cd28
vblendvps %xmm0, %xmm2, %xmm14, %xmm0
cmpq $0x8, %rbp
je 0x22cd21
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm15, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm15
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x22cd3f
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x22c830
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x22cd82
vmovaps 0x160(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r11
jae 0x22cd72
testb %cl, %cl
jne 0x22c7fb
jmp 0x22cea4
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm15, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x22cd65
cmpq $-0x8, %rbp
je 0x22ceb2
movq %r11, 0x10(%rsp)
movq %r9, 0x20(%rsp)
movq %r8, (%rsp)
vmovaps 0x160(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22cf82
andq $-0x10, %rbp
vmovaps 0x50(%rsp), %xmm1
movzbl (%rbp), %eax
movq %rsi, 0x8(%rsp)
movq 0x8(%rsi), %rcx
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x80(%rsp)
vmovaps %xmm1, 0x30(%rsp)
vmovmskps %xmm1, %edx
xorl $0xf, %edx
je 0x22ce40
movzbl %dl, %r15d
shll $0x6, %eax
addq %rcx, %rax
addq $0x18, %rax
movq %rax, 0x40(%rsp)
bsfq %r15, %rdx
leaq 0x1c0(%rsp), %rdi
movq %r14, %rsi
movq %rdx, %r13
movq %r10, %rcx
movq %rbp, %r8
movq 0x40(%rsp), %rax
callq *(%rax)
testb %al, %al
je 0x22ce32
orl $-0x1, 0x80(%rsp,%r13,4)
leaq -0x1(%r15), %rax
andq %rax, %r15
movq 0x68(%rsp), %r10
jne 0x22ce06
vmovaps 0x30(%rsp), %xmm0
vorps 0x80(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vtestps 0x1cbefc2(%rip), %xmm0 # 0x1eebe20
jb 0x22ce85
vmovaps 0x160(%rsp), %xmm1
vbroadcastss 0x1cbfd12(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x160(%rsp)
xorl %eax, %eax
jmp 0x22ce88
pushq $0x3
popq %rax
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
movq 0x20(%rsp), %r9
vbroadcastss 0x1cbeb81(%rip), %xmm14 # 0x1eeba20
movq 0x10(%rsp), %r11
cmpl $0x3, %eax
jne 0x22c7a9
jmp 0x22cf8a
pushq $0x3
jmp 0x22ceb8
pushq $0x2
popq %rax
jmp 0x22cea4
vmovaps %xmm15, 0x30(%rsp)
movq %r11, 0x10(%rsp)
movq %r9, 0x20(%rsp)
movq %rsi, 0x8(%rsp)
movq %r8, (%rsp)
bsfq %r15, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x40(%rsp)
leaq 0x1c0(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x98(%rsp), %rax
pushq %rax
callq 0x270de2
popq %rcx
popq %rdx
testb %al, %al
je 0x22cf11
movq 0x40(%rsp), %rax
orl $-0x1, 0x50(%rsp,%rax,4)
leaq -0x1(%r15), %rax
andq %rax, %r15
movq 0x68(%rsp), %r10
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
jne 0x22ced4
vmovaps 0x50(%rsp), %xmm0
vtestps 0x1cbeee9(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x20(%rsp), %r9
vbroadcastss 0x1cbead8(%rip), %xmm14 # 0x1eeba20
movq 0x10(%rsp), %r11
vmovaps 0x30(%rsp), %xmm15
jb 0x22c7ee
vmovaps 0x160(%rsp), %xmm1
vbroadcastss 0x1cbfc19(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x160(%rsp)
pushq $0x2
popq %rax
jmp 0x22c7ee
pushq $0x2
popq %rax
jmp 0x22ce8d
vmovaps 0x70(%rsp), %xmm0
vandps 0x50(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cbfbe5(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r9)
addq $0x1938, %rsp # imm = 0x1938
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 257, true, embree::avx::VirtualCurveIntersectorK<4>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq (%rsi), %rbx
cmpq $0x8, 0x70(%rbx)
je 0x22d333
vmovdqa (%rdi), %xmm3
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd %xmm0, %xmm3, %xmm0
vmovmskps %xmm0, %ebp
testl %ebp, %ebp
je 0x22d333
movq %rcx, %r13
movq %rdx, %r14
movq %rsi, %r12
leaq 0x10(%rsp), %rax
vmovaps 0x40(%rdx), %xmm2
vmovaps 0x50(%rdx), %xmm1
vmovaps 0x60(%rdx), %xmm0
vmulps %xmm0, %xmm0, %xmm4
vmulps %xmm1, %xmm1, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm2, %xmm2, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vrsqrtps %xmm4, %xmm5
vbroadcastss 0x1cbf6ed(%rip), %xmm6 # 0x1eec718
vmulps %xmm6, %xmm5, %xmm6
vbroadcastss 0x1cbf6e4(%rip), %xmm7 # 0x1eec71c
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vmulps %xmm5, %xmm5, %xmm5
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovaps %xmm4, -0x10(%rax)
vbroadcastss 0x1cf3e66(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm5, %xmm5, %xmm5
vmovss 0x1cbf6b2(%rip), %xmm6 # 0x1eec718
vmovss 0x1cbfb12(%rip), %xmm7 # 0x1eecb80
movq %rbp, %rcx
bsfq %rcx, %rdx
vmovss 0x40(%r14,%rdx,4), %xmm8
vinsertps $0x1c, 0x50(%r14,%rdx,4), %xmm8, %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r14,%rdx,4), %xmm8, %xmm9 # xmm9 = xmm8[0,1],mem[0],zero
vbroadcastss (%rsp,%rdx,4), %xmm8
vmulps %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmovshdup %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
vxorps %xmm4, %xmm11, %xmm11
vunpckhps %xmm5, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
vmovss %xmm11, %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm4, %xmm10, %xmm10
vinsertps $0x2a, %xmm9, %xmm10, %xmm10 # xmm10 = xmm10[0],zero,xmm9[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm10, %xmm13
vcmpltps %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vblendvps %xmm12, %xmm11, %xmm10, %xmm10
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm6, %xmm12, %xmm13
vmulss %xmm7, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,2,0,3]
vmulps %xmm10, %xmm12, %xmm12
vmulps %xmm11, %xmm9, %xmm11
vsubps %xmm12, %xmm11, %xmm11
vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
vdpps $0x7f, %xmm11, %xmm11, %xmm12
leaq -0x1(%rcx), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss %xmm6, %xmm13, %xmm14
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vsubss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps %xmm9, %xmm8, %xmm8
vunpcklps %xmm8, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
vunpcklps %xmm5, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
vunpckhps %xmm5, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
vunpcklps %xmm11, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vmovaps %xmm11, (%rax,%rdx)
vmovaps %xmm9, 0x10(%rax,%rdx)
vmovaps %xmm8, 0x20(%rax,%rdx)
andq %rdi, %rcx
jne 0x22d071
vpcmpeqd %xmm4, %xmm4, %xmm4
vpcmpeqd %xmm4, %xmm3, %xmm3
vmovaps (%r14), %xmm4
leaq 0xd0(%rsp), %rax
vmovaps %xmm4, (%rax)
vmovaps 0x10(%r14), %xmm4
vmovaps %xmm4, 0x10(%rax)
vmovaps 0x20(%r14), %xmm4
vmovaps %xmm4, 0x20(%rax)
vmovaps %xmm2, 0x30(%rax)
vmovaps %xmm1, 0x40(%rax)
vmovaps %xmm0, 0x50(%rax)
vbroadcastss 0x1cf3cd3(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm5
vbroadcastss 0x1cc3dea(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cbf508(%rip), %xmm7 # 0x1eec714
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm1, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm0, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1cf3d31(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vblendvps %xmm8, %xmm6, %xmm1, %xmm1
vdivps %xmm0, %xmm7, %xmm0
vblendvps %xmm4, %xmm6, %xmm0, %xmm0
vmovaps %xmm2, 0x60(%rax)
vmovaps %xmm1, 0x70(%rax)
vmovaps %xmm0, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d2d6fb(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm2, %xmm2
vmovaps %xmm2, 0x90(%rax)
vcmpnltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d2771d(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d2d6dc(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm0, %xmm0
vbroadcastss 0x1d2d6c4(%rip), %xmm1 # 0x1f5a96c
vbroadcastss 0x1d2d6bf(%rip), %xmm2 # 0x1f5a970
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vmovaps %xmm0, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm0
vmovaps 0x80(%r14), %xmm1
vmaxps %xmm4, %xmm0, %xmm0
vmaxps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cbe741(%rip), %xmm2 # 0x1eeba20
vblendvps %xmm3, %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0xc0(%rax)
vbroadcastss 0x1cbf88e(%rip), %xmm0 # 0x1eecb84
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r15
movq 0x70(%rbx), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movq %rsp, %r8
movq %r14, %r9
pushq %r13
leaq 0xd8(%rsp), %rax
pushq %rax
callq 0x271488
popq %rax
popq %rcx
andq %r15, %rbp
jne 0x22d304
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 257, true, embree::avx::VirtualCurveIntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x18e8, %rsp # imm = 0x18E8
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x22ddf8
movq %rdx, %r9
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm4, %xmm4, %xmm4
vcmpnltps %xmm4, %xmm0, %xmm2
vandps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x60(%rsp)
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x22ddf8
movq %rcx, %r10
leaq 0x80(%r9), %r11
movzbl %al, %eax
leaq 0x180(%rsp), %rcx
vmovaps 0x40(%r9), %xmm3
vmovaps 0x50(%r9), %xmm2
vmovaps 0x60(%r9), %xmm1
vmulps %xmm1, %xmm1, %xmm5
vmulps %xmm2, %xmm2, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vmulps %xmm3, %xmm3, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vrsqrtps %xmm5, %xmm6
vbroadcastss 0x1cbf339(%rip), %xmm7 # 0x1eec718
vmulps %xmm7, %xmm6, %xmm7
vbroadcastss 0x1cbf330(%rip), %xmm8 # 0x1eec71c
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vmulps %xmm6, %xmm6, %xmm6
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmovaps %xmm5, -0x10(%rcx)
vbroadcastss 0x1cf3ab2(%rip), %xmm15 # 0x1f20ec0
vmovss 0x1cbf302(%rip), %xmm5 # 0x1eec718
vmovss 0x1cbf762(%rip), %xmm6 # 0x1eecb80
bsfq %rax, %rdx
vmovss 0x40(%r9,%rdx,4), %xmm7
vinsertps $0x1c, 0x50(%r9,%rdx,4), %xmm7, %xmm7 # xmm7 = xmm7[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r9,%rdx,4), %xmm7, %xmm8 # xmm8 = xmm7[0,1],mem[0],zero
vbroadcastss 0x170(%rsp,%rdx,4), %xmm7
vmulps %xmm7, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vmovshdup %xmm8, %xmm10 # xmm10 = xmm8[1,1,3,3]
vxorps %xmm15, %xmm10, %xmm10
vunpckhps %xmm4, %xmm8, %xmm11 # xmm11 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
vmovss %xmm10, %xmm4, %xmm10 # xmm10 = xmm10[0],xmm4[1,2,3]
vshufps $0x41, %xmm10, %xmm11, %xmm10 # xmm10 = xmm11[1,0],xmm10[0,1]
vxorpd %xmm15, %xmm9, %xmm9
vinsertps $0x2a, %xmm8, %xmm9, %xmm9 # xmm9 = xmm9[0],zero,xmm8[0],zero
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vdpps $0x7f, %xmm9, %xmm9, %xmm12
vcmpltps %xmm11, %xmm12, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vblendvps %xmm11, %xmm10, %xmm9, %xmm9
vdpps $0x7f, %xmm9, %xmm9, %xmm10
vrsqrtss %xmm10, %xmm10, %xmm11
vmulss %xmm5, %xmm11, %xmm12
vmulss %xmm6, %xmm10, %xmm10
vmulss %xmm11, %xmm10, %xmm10
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm10, %xmm10
vsubss %xmm10, %xmm12, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm10, %xmm9, %xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,2,0,3]
vmulps %xmm9, %xmm11, %xmm11
vmulps %xmm10, %xmm8, %xmm10
vsubps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm10, %xmm10, %xmm11
leaq -0x1(%rax), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm5, %xmm12, %xmm13
vmulss %xmm6, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm10, %xmm11, %xmm10
vmulps %xmm7, %xmm8, %xmm7
vunpcklps %xmm7, %xmm9, %xmm8 # xmm8 = xmm9[0],xmm7[0],xmm9[1],xmm7[1]
vunpckhps %xmm7, %xmm9, %xmm7 # xmm7 = xmm9[2],xmm7[2],xmm9[3],xmm7[3]
vunpcklps %xmm4, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm4[0],xmm10[1],xmm4[1]
vunpckhps %xmm4, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm4[2],xmm10[3],xmm4[3]
vunpcklps %xmm10, %xmm7, %xmm7 # xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1]
vunpcklps %xmm9, %xmm8, %xmm10 # xmm10 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
vunpckhps %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
vmovaps %xmm10, (%rcx,%rdx)
vmovaps %xmm8, 0x10(%rcx,%rdx)
vmovaps %xmm7, 0x20(%rcx,%rdx)
andq %rdi, %rax
jne 0x22d41e
vmovaps (%r9), %xmm4
vmovaps %xmm4, 0x80(%rsp)
vmovaps 0x10(%r9), %xmm4
vmovaps %xmm4, 0x90(%rsp)
vmovaps 0x20(%r9), %xmm4
vmovaps %xmm4, 0xa0(%rsp)
vmovaps %xmm3, 0xb0(%rsp)
vmovaps %xmm2, 0xc0(%rsp)
vmovaps %xmm1, 0xd0(%rsp)
vbroadcastss 0x1cf391b(%rip), %xmm6 # 0x1f20ec4
vandps %xmm6, %xmm3, %xmm4
vbroadcastss 0x1cc3a32(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm4, %xmm4
vbroadcastss 0x1cbf150(%rip), %xmm8 # 0x1eec714
vdivps %xmm3, %xmm8, %xmm3
vandps %xmm6, %xmm2, %xmm5
vcmpltps %xmm7, %xmm5, %xmm5
vdivps %xmm2, %xmm8, %xmm2
vandps %xmm6, %xmm1, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vdivps %xmm1, %xmm8, %xmm1
vbroadcastss 0x1cf3975(%rip), %xmm7 # 0x1f20f60
vblendvps %xmm4, %xmm7, %xmm3, %xmm3
vblendvps %xmm5, %xmm7, %xmm2, %xmm2
vblendvps %xmm6, %xmm7, %xmm1, %xmm1
vmovaps %xmm3, 0xe0(%rsp)
vmovaps %xmm2, 0xf0(%rsp)
vmovaps %xmm1, 0x100(%rsp)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1d2d33a(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm3, %xmm3
vcmpnltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d27364(%rip), %xmm5 # 0x1f549a0
vbroadcastss 0x1d2d323(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vmovaps %xmm3, 0x110(%rsp)
vmovaps %xmm2, 0x120(%rsp)
vcmpnltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d2d301(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d2d2fc(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0x130(%rsp)
vmovaps 0x30(%r9), %xmm1
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm0, %xmm2
vbroadcastss 0x1cbe386(%rip), %xmm10 # 0x1eeba20
vmovaps 0x60(%rsp), %xmm3
vblendvps %xmm3, %xmm1, %xmm10, %xmm0
vmovaps %xmm0, 0x140(%rsp)
vbroadcastss 0x1cbf4cc(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x150(%rsp)
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0x30(%rsp)
cmpq $0x0, 0x8(%r10)
je 0x22d6f0
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r15d, %r15d
cmpb $0x1, %al
adcq $0x2, %r15
jmp 0x22d6f4
pushq $0x3
popq %r15
leaq 0x250(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0x9f0(%rsp), %rbx
vmovaps %xmm10, -0x20(%rbx)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm0, -0x10(%rbx)
leaq 0x1f22859(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x160(%rsp)
vbroadcastss 0x1cf27d3(%rip), %xmm11 # 0x1f1ff10
vbroadcastss 0x1cf27ce(%rip), %xmm12 # 0x1f1ff14
movq %r10, 0x48(%rsp)
movq %r9, 0x40(%rsp)
addq $-0x10, %rbx
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x22dcf3
vmovaps (%rbx), %xmm13
vcmpltps 0x150(%rsp), %xmm13, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22dcf7
movzbl %al, %r14d
popcntl %r14d, %r13d
xorl %eax, %eax
cmpq %r15, %r13
jbe 0x22dcfc
cmpq %r15, %r13
jbe 0x22dce5
testb $0x8, %bpl
pushq $0x8
popq %r14
jne 0x22dbbd
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %r14, %rbp
vmovaps %xmm10, %xmm13
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x22d8e4
testb $0x7, %al
jne 0x22d904
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x80(%rsp), %xmm1
vmovaps 0x90(%rsp), %xmm2
vmovaps 0xa0(%rsp), %xmm3
vmovaps 0xe0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x100(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vminps %xmm1, %xmm5, %xmm0
vminps %xmm2, %xmm7, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vminps %xmm3, %xmm9, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmulps %xmm0, %xmm11, %xmm0
vmaxps %xmm1, %xmm5, %xmm1
vmaxps %xmm2, %xmm7, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm9, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps 0x140(%rsp), %xmm0, %xmm2
vmulps %xmm1, %xmm12, %xmm1
vminps 0x150(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vmovddup 0x1cf3633(%rip), %xmm2 # xmm2 = mem[0,0]
vptest %xmm2, %xmm1
je 0x22d8e4
vpslld $0x1f, %xmm1, %xmm1
vblendvps %xmm1, %xmm0, %xmm10, %xmm0
cmpq $0x8, %rbp
je 0x22d8dd
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm13, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm13
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x22db6c
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x22d7c0
jmp 0x22db6c
vbroadcastss 0x20(%rcx,%rdx,4), %xmm2
vbroadcastss 0x30(%rcx,%rdx,4), %xmm1
vbroadcastss 0x40(%rcx,%rdx,4), %xmm0
vmovaps %xmm0, 0x10(%rsp)
vbroadcastss 0x50(%rcx,%rdx,4), %xmm5
vbroadcastss 0x60(%rcx,%rdx,4), %xmm4
vbroadcastss 0x70(%rcx,%rdx,4), %xmm3
vbroadcastss 0x80(%rcx,%rdx,4), %xmm8
vbroadcastss 0x90(%rcx,%rdx,4), %xmm7
vbroadcastss 0xa0(%rcx,%rdx,4), %xmm6
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmulps %xmm10, %xmm8, %xmm11
vmulps %xmm7, %xmm10, %xmm12
vmovaps %xmm13, 0x20(%rsp)
vmulps %xmm5, %xmm9, %xmm13
vaddps %xmm11, %xmm13, %xmm11
vmulps %xmm4, %xmm9, %xmm13
vaddps %xmm12, %xmm13, %xmm12
vmovaps 0xb0(%rsp), %xmm13
vmulps %xmm6, %xmm10, %xmm10
vmulps %xmm3, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm2, %xmm13, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm1, %xmm13, %xmm11
vaddps %xmm12, %xmm11, %xmm11
vbroadcastss 0xb0(%rcx,%rdx,4), %xmm12
vmulps %xmm0, %xmm13, %xmm13
vaddps %xmm9, %xmm13, %xmm9
vbroadcastss 0x1cf34fb(%rip), %xmm14 # 0x1f20ec4
vandps %xmm14, %xmm10, %xmm13
vbroadcastss 0x1cc3611(%rip), %xmm0 # 0x1ef0fe8
vcmpltps %xmm0, %xmm13, %xmm13
vblendvps %xmm13, %xmm0, %xmm10, %xmm10
vandps %xmm14, %xmm11, %xmm13
vcmpltps %xmm0, %xmm13, %xmm13
vblendvps %xmm13, %xmm0, %xmm11, %xmm11
vandps %xmm14, %xmm9, %xmm13
vcmpltps %xmm0, %xmm13, %xmm13
vblendvps %xmm13, %xmm0, %xmm9, %xmm13
vrcpps %xmm10, %xmm9
vmulps %xmm10, %xmm9, %xmm10
vbroadcastss 0x1cbecff(%rip), %xmm0 # 0x1eec714
vsubps %xmm10, %xmm0, %xmm10
vmulps %xmm10, %xmm9, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vrcpps %xmm11, %xmm10
vmulps %xmm11, %xmm10, %xmm11
vsubps %xmm11, %xmm0, %xmm11
vmulps %xmm11, %xmm10, %xmm11
vaddps %xmm11, %xmm10, %xmm10
vrcpps %xmm13, %xmm11
vmulps %xmm13, %xmm11, %xmm13
vsubps %xmm13, %xmm0, %xmm13
vmulps %xmm13, %xmm11, %xmm13
vaddps %xmm13, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm13
vmulps %xmm13, %xmm8, %xmm8
vaddps %xmm8, %xmm12, %xmm8
vbroadcastss 0xc0(%rcx,%rdx,4), %xmm12
vmulps %xmm7, %xmm13, %xmm7
vaddps %xmm7, %xmm12, %xmm7
vmulps %xmm6, %xmm13, %xmm6
vmovaps 0x20(%rsp), %xmm13
vbroadcastss 0xd0(%rcx,%rdx,4), %xmm12
vaddps %xmm6, %xmm12, %xmm6
vmovaps 0x90(%rsp), %xmm12
vmulps %xmm5, %xmm12, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vmulps %xmm4, %xmm12, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm3, %xmm12, %xmm3
vbroadcastss 0x1cf245b(%rip), %xmm12 # 0x1f1ff14
vaddps %xmm6, %xmm3, %xmm3
vmovaps 0x80(%rsp), %xmm6
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm5, %xmm2, %xmm2
vxorps %xmm15, %xmm9, %xmm5
vmulps %xmm5, %xmm2, %xmm2
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm4, %xmm1, %xmm1
vxorps %xmm15, %xmm10, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vmulps 0x10(%rsp), %xmm6, %xmm0
vxorps %xmm15, %xmm11, %xmm4
vaddps %xmm3, %xmm0, %xmm0
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm2, %xmm9, %xmm3
vaddps %xmm1, %xmm10, %xmm4
vbroadcastss 0x1cbdf14(%rip), %xmm10 # 0x1eeba20
vaddps %xmm0, %xmm11, %xmm5
vbroadcastss 0x1cf23f7(%rip), %xmm11 # 0x1f1ff10
vpminsd %xmm3, %xmm2, %xmm6
vpminsd %xmm4, %xmm1, %xmm7
vpmaxsd %xmm7, %xmm6, %xmm6
vpminsd %xmm5, %xmm0, %xmm7
vpmaxsd %xmm7, %xmm6, %xmm6
vpmaxsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm4, %xmm1, %xmm1
vpminsd %xmm1, %xmm2, %xmm1
vpmaxsd %xmm5, %xmm0, %xmm0
vpminsd %xmm0, %xmm1, %xmm1
vmulps %xmm6, %xmm11, %xmm0
vpmaxsd 0x140(%rsp), %xmm0, %xmm2
vmulps %xmm1, %xmm12, %xmm1
vpminsd 0x150(%rsp), %xmm1, %xmm1
jmp 0x22d8a8
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x22dbb6
vmovaps 0x150(%rsp), %xmm0
vcmpnleps %xmm13, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r15
jae 0x22dba6
testb %cl, %cl
je 0x22dce5
testb $0x8, %bpl
je 0x22d7ac
jmp 0x22dbbd
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm13, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x22db92
cmpq $-0x8, %rbp
je 0x22dcf3
movq %r11, 0x10(%rsp)
movq %r8, (%rsp)
vmovaps 0x150(%rsp), %xmm0
vcmpnleps %xmm13, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22ddd6
andq $-0x10, %rbp
vmovaps 0x30(%rsp), %xmm1
movzbl (%rbp), %eax
movq %rsi, 0x8(%rsp)
movq 0x8(%rsi), %rcx
vmovaps 0x160(%rsp), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps %xmm1, 0x50(%rsp)
vmovmskps %xmm1, %edx
xorl $0xf, %edx
je 0x22dc6e
movzbl %dl, %r14d
shll $0x6, %eax
addq %rcx, %rax
addq $0x18, %rax
movq %rax, 0x20(%rsp)
bsfq %r14, %r13
leaq 0x170(%rsp), %rdi
movq %r9, %rsi
movq %r13, %rdx
movq %r10, %rcx
movq %rbp, %r8
movq 0x20(%rsp), %rax
callq *(%rax)
testb %al, %al
je 0x22dc5b
orl $-0x1, 0x70(%rsp,%r13,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r9
jne 0x22dc32
vmovaps 0x50(%rsp), %xmm0
vorps 0x70(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vtestps 0x1cbe197(%rip), %xmm0 # 0x1eebe20
jb 0x22dcb0
vmovaps 0x150(%rsp), %xmm1
vbroadcastss 0x1cbeee7(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x150(%rsp)
xorl %eax, %eax
jmp 0x22dcb3
pushq $0x3
popq %rax
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
movq 0x10(%rsp), %r11
vbroadcastss 0x1cf31f6(%rip), %xmm15 # 0x1f20ec0
vbroadcastss 0x1cbdd4d(%rip), %xmm10 # 0x1eeba20
vbroadcastss 0x1cf2234(%rip), %xmm11 # 0x1f1ff10
vbroadcastss 0x1cf222f(%rip), %xmm12 # 0x1f1ff14
cmpl $0x3, %eax
jne 0x22d750
jmp 0x22ddde
pushq $0x3
jmp 0x22dcf9
pushq $0x2
popq %rax
jmp 0x22dce5
vmovaps %xmm13, 0x20(%rsp)
movq %r11, 0x10(%rsp)
movq %rsi, 0x8(%rsp)
movq %r8, (%rsp)
bsfq %r14, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x50(%rsp)
leaq 0x170(%rsp), %r8
pushq %r10
leaq 0x88(%rsp), %rax
pushq %rax
callq 0x271c42
popq %rcx
popq %rdx
testb %al, %al
je 0x22dd4a
movq 0x50(%rsp), %rax
orl $-0x1, 0x30(%rsp,%rax,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r9
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
jne 0x22dd10
vmovaps 0x30(%rsp), %xmm0
vtestps 0x1cbe0ab(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x10(%rsp), %r11
vbroadcastss 0x1cf313a(%rip), %xmm15 # 0x1f20ec0
vbroadcastss 0x1cbdc91(%rip), %xmm10 # 0x1eeba20
vbroadcastss 0x1cf2178(%rip), %xmm11 # 0x1f1ff10
vbroadcastss 0x1cf2173(%rip), %xmm12 # 0x1f1ff14
vmovaps 0x20(%rsp), %xmm13
jb 0x22d795
vmovaps 0x150(%rsp), %xmm1
vbroadcastss 0x1cbedc5(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x150(%rsp)
pushq $0x2
popq %rax
jmp 0x22d795
pushq $0x2
popq %rax
jmp 0x22dcb8
vmovaps 0x60(%rsp), %xmm0
vandps 0x30(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cbed91(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r11)
addq $0x18e8, %rsp # imm = 0x18E8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16781328, true, embree::avx::VirtualCurveIntersectorK<4>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b8, %rsp # imm = 0x1B8
movq (%rsi), %rbx
cmpq $0x8, 0x70(%rbx)
je 0x22e187
vmovdqa (%rdi), %xmm3
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd %xmm0, %xmm3, %xmm0
vmovmskps %xmm0, %ebp
testl %ebp, %ebp
je 0x22e187
movq %rcx, %r13
movq %rdx, %r14
movq %rsi, %r12
leaq 0x10(%rsp), %rax
vmovaps 0x40(%rdx), %xmm2
vmovaps 0x50(%rdx), %xmm1
vmovaps 0x60(%rdx), %xmm0
vmulps %xmm0, %xmm0, %xmm4
vmulps %xmm1, %xmm1, %xmm5
vaddps %xmm5, %xmm4, %xmm4
vmulps %xmm2, %xmm2, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vrsqrtps %xmm4, %xmm5
vbroadcastss 0x1cbe899(%rip), %xmm6 # 0x1eec718
vmulps %xmm6, %xmm5, %xmm6
vbroadcastss 0x1cbe890(%rip), %xmm7 # 0x1eec71c
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vmulps %xmm5, %xmm5, %xmm5
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovaps %xmm4, -0x10(%rax)
vbroadcastss 0x1cf3012(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm5, %xmm5, %xmm5
vmovss 0x1cbe85e(%rip), %xmm6 # 0x1eec718
vmovss 0x1cbecbe(%rip), %xmm7 # 0x1eecb80
movq %rbp, %rcx
bsfq %rcx, %rdx
vmovss 0x40(%r14,%rdx,4), %xmm8
vinsertps $0x1c, 0x50(%r14,%rdx,4), %xmm8, %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r14,%rdx,4), %xmm8, %xmm9 # xmm9 = xmm8[0,1],mem[0],zero
vbroadcastss (%rsp,%rdx,4), %xmm8
vmulps %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmovshdup %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
vxorps %xmm4, %xmm11, %xmm11
vunpckhps %xmm5, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
vmovss %xmm11, %xmm5, %xmm11 # xmm11 = xmm11[0],xmm5[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm4, %xmm10, %xmm10
vinsertps $0x2a, %xmm9, %xmm10, %xmm10 # xmm10 = xmm10[0],zero,xmm9[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm10, %xmm13
vcmpltps %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vblendvps %xmm12, %xmm11, %xmm10, %xmm10
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm6, %xmm12, %xmm13
vmulss %xmm7, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,2,0,3]
vmulps %xmm10, %xmm12, %xmm12
vmulps %xmm11, %xmm9, %xmm11
vsubps %xmm12, %xmm11, %xmm11
vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
vdpps $0x7f, %xmm11, %xmm11, %xmm12
leaq -0x1(%rcx), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss %xmm6, %xmm13, %xmm14
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vsubss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps %xmm9, %xmm8, %xmm8
vunpcklps %xmm8, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
vunpcklps %xmm5, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm5[0],xmm11[1],xmm5[1]
vunpckhps %xmm5, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm5[2],xmm11[3],xmm5[3]
vunpcklps %xmm11, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vmovaps %xmm11, (%rax,%rdx)
vmovaps %xmm9, 0x10(%rax,%rdx)
vmovaps %xmm8, 0x20(%rax,%rdx)
andq %rdi, %rcx
jne 0x22dec5
vpcmpeqd %xmm4, %xmm4, %xmm4
vpcmpeqd %xmm4, %xmm3, %xmm3
vmovaps (%r14), %xmm4
leaq 0xd0(%rsp), %rax
vmovaps %xmm4, (%rax)
vmovaps 0x10(%r14), %xmm4
vmovaps %xmm4, 0x10(%rax)
vmovaps 0x20(%r14), %xmm4
vmovaps %xmm4, 0x20(%rax)
vmovaps %xmm2, 0x30(%rax)
vmovaps %xmm1, 0x40(%rax)
vmovaps %xmm0, 0x50(%rax)
vbroadcastss 0x1cf2e7f(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm5
vbroadcastss 0x1cc2f96(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cbe6b4(%rip), %xmm7 # 0x1eec714
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm1, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm0, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1cf2edd(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vblendvps %xmm8, %xmm6, %xmm1, %xmm1
vdivps %xmm0, %xmm7, %xmm0
vblendvps %xmm4, %xmm6, %xmm0, %xmm0
vmovaps %xmm2, 0x60(%rax)
vmovaps %xmm1, 0x70(%rax)
vmovaps %xmm0, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d2c8a7(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm2, %xmm2
vmovaps %xmm2, 0x90(%rax)
vcmpnltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d268c9(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d2c888(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm0, %xmm0
vbroadcastss 0x1d2c870(%rip), %xmm1 # 0x1f5a96c
vbroadcastss 0x1d2c86b(%rip), %xmm2 # 0x1f5a970
vblendvps %xmm0, %xmm1, %xmm2, %xmm0
vmovaps %xmm0, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm0
vmovaps 0x80(%r14), %xmm1
vmaxps %xmm4, %xmm0, %xmm0
vmaxps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cbd8ed(%rip), %xmm2 # 0x1eeba20
vblendvps %xmm3, %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0xc0(%rax)
vbroadcastss 0x1cbea3a(%rip), %xmm0 # 0x1eecb84
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %r15
movq 0x70(%rbx), %rdx
movq %r12, %rdi
movq %rbx, %rsi
movq %rsp, %r8
movq %r14, %r9
pushq %r13
leaq 0xd8(%rsp), %rax
pushq %rax
callq 0x272244
popq %rax
popq %rcx
andq %r15, %rbp
jne 0x22e158
addq $0x1b8, %rsp # imm = 0x1B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16781328, true, embree::avx::VirtualCurveIntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1948, %rsp # imm = 0x1948
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x22ede5
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm4, %xmm4, %xmm4
vcmpnltps %xmm4, %xmm0, %xmm2
vandps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x70(%rsp)
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x22ede5
movq %rcx, %r10
leaq 0x80(%r14), %r9
movzbl %al, %eax
leaq 0x1e0(%rsp), %rcx
vmovaps 0x40(%r14), %xmm3
vmovaps 0x50(%r14), %xmm2
vmovaps 0x60(%r14), %xmm1
vmulps %xmm1, %xmm1, %xmm5
vmulps %xmm2, %xmm2, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vmulps %xmm3, %xmm3, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vrsqrtps %xmm5, %xmm6
vbroadcastss 0x1cbe4e5(%rip), %xmm7 # 0x1eec718
vmulps %xmm7, %xmm6, %xmm7
vbroadcastss 0x1cbe4dc(%rip), %xmm8 # 0x1eec71c
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vmulps %xmm6, %xmm6, %xmm6
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmovaps %xmm5, -0x10(%rcx)
vbroadcastss 0x1cf2c5e(%rip), %xmm5 # 0x1f20ec0
vmovss 0x1cbe4ae(%rip), %xmm6 # 0x1eec718
vmovss 0x1cbe90e(%rip), %xmm7 # 0x1eecb80
bsfq %rax, %rdx
vmovss 0x40(%r14,%rdx,4), %xmm8
vinsertps $0x1c, 0x50(%r14,%rdx,4), %xmm8, %xmm8 # xmm8 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x60(%r14,%rdx,4), %xmm8, %xmm9 # xmm9 = xmm8[0,1],mem[0],zero
vbroadcastss 0x1d0(%rsp,%rdx,4), %xmm8
vmulps %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmovshdup %xmm9, %xmm11 # xmm11 = xmm9[1,1,3,3]
vxorps %xmm5, %xmm11, %xmm11
vunpckhps %xmm4, %xmm9, %xmm12 # xmm12 = xmm9[2],xmm4[2],xmm9[3],xmm4[3]
vmovss %xmm11, %xmm4, %xmm11 # xmm11 = xmm11[0],xmm4[1,2,3]
vshufps $0x41, %xmm11, %xmm12, %xmm11 # xmm11 = xmm12[1,0],xmm11[0,1]
vxorpd %xmm5, %xmm10, %xmm10
vinsertps $0x2a, %xmm9, %xmm10, %xmm10 # xmm10 = xmm10[0],zero,xmm9[0],zero
vdpps $0x7f, %xmm11, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm10, %xmm13
vcmpltps %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vblendvps %xmm12, %xmm11, %xmm10, %xmm10
vdpps $0x7f, %xmm10, %xmm10, %xmm11
vrsqrtss %xmm11, %xmm11, %xmm12
vmulss %xmm6, %xmm12, %xmm13
vmulss %xmm7, %xmm11, %xmm11
vmulss %xmm12, %xmm11, %xmm11
vmulss %xmm12, %xmm12, %xmm12
vmulss %xmm12, %xmm11, %xmm11
vsubss %xmm11, %xmm13, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm11, %xmm10, %xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,2,0,3]
vmulps %xmm10, %xmm12, %xmm12
vmulps %xmm11, %xmm9, %xmm11
vsubps %xmm12, %xmm11, %xmm11
vshufps $0xc9, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
vdpps $0x7f, %xmm11, %xmm11, %xmm12
leaq -0x1(%rax), %rdi
imulq $0x30, %rdx, %rdx
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss %xmm6, %xmm13, %xmm14
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vsubss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps %xmm9, %xmm8, %xmm8
vunpcklps %xmm8, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm8[0],xmm10[1],xmm8[1]
vunpckhps %xmm8, %xmm10, %xmm8 # xmm8 = xmm10[2],xmm8[2],xmm10[3],xmm8[3]
vunpcklps %xmm4, %xmm11, %xmm10 # xmm10 = xmm11[0],xmm4[0],xmm11[1],xmm4[1]
vunpckhps %xmm4, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm4[2],xmm11[3],xmm4[3]
vunpcklps %xmm11, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm11[0],xmm8[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vmovaps %xmm11, (%rcx,%rdx)
vmovaps %xmm9, 0x10(%rcx,%rdx)
vmovaps %xmm8, 0x20(%rcx,%rdx)
andq %rdi, %rax
jne 0x22e272
vmovaps (%r14), %xmm4
vmovaps %xmm4, 0x90(%rsp)
vmovaps 0x10(%r14), %xmm4
vmovaps %xmm4, 0xa0(%rsp)
vmovaps 0x20(%r14), %xmm4
vmovaps %xmm4, 0xb0(%rsp)
vmovaps %xmm3, 0xc0(%rsp)
vmovaps %xmm2, 0xd0(%rsp)
vmovaps %xmm1, 0xe0(%rsp)
vbroadcastss 0x1cf2ac5(%rip), %xmm6 # 0x1f20ec4
vandps %xmm6, %xmm3, %xmm4
vbroadcastss 0x1cc2bdc(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm4, %xmm4
vbroadcastss 0x1cbe2fa(%rip), %xmm8 # 0x1eec714
vdivps %xmm3, %xmm8, %xmm3
vandps %xmm6, %xmm2, %xmm5
vcmpltps %xmm7, %xmm5, %xmm5
vdivps %xmm2, %xmm8, %xmm2
vandps %xmm6, %xmm1, %xmm6
vcmpltps %xmm7, %xmm6, %xmm6
vdivps %xmm1, %xmm8, %xmm1
vbroadcastss 0x1cf2b1f(%rip), %xmm7 # 0x1f20f60
vblendvps %xmm4, %xmm7, %xmm3, %xmm3
vblendvps %xmm5, %xmm7, %xmm2, %xmm2
vblendvps %xmm6, %xmm7, %xmm1, %xmm1
vmovaps %xmm3, 0xf0(%rsp)
vmovaps %xmm2, 0x100(%rsp)
vmovaps %xmm1, 0x110(%rsp)
vxorps %xmm6, %xmm6, %xmm6
vcmpltps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1d2c4e4(%rip), %xmm4 # 0x1f5a964
vandps %xmm4, %xmm3, %xmm3
vcmpnltps %xmm6, %xmm2, %xmm2
vbroadcastss 0x1d2650e(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d2c4cd(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm2, %xmm4, %xmm5, %xmm2
vmovaps %xmm3, 0x120(%rsp)
vmovaps %xmm2, 0x130(%rsp)
vcmpnltps %xmm6, %xmm1, %xmm1
vbroadcastss 0x1d2c4ab(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d2c4a6(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0x140(%rsp)
vmovaps 0x30(%r14), %xmm1
vmaxps %xmm6, %xmm1, %xmm1
vmaxps %xmm6, %xmm0, %xmm2
vbroadcastss 0x1cbd530(%rip), %xmm14 # 0x1eeba20
vmovaps 0x70(%rsp), %xmm3
vblendvps %xmm3, %xmm1, %xmm14, %xmm0
vmovaps %xmm0, 0x150(%rsp)
vbroadcastss 0x1cbe676(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, 0x160(%rsp)
vpcmpeqd %xmm1, %xmm1, %xmm1
vxorps %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0x50(%rsp)
cmpq $0x0, 0x8(%r10)
je 0x22e546
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r11d, %r11d
cmpb $0x1, %al
adcq $0x2, %r11
jmp 0x22e54a
pushq $0x3
popq %r11
leaq 0x2b0(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0xa50(%rsp), %rbx
vmovaps %xmm14, -0x20(%rbx)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm0, -0x10(%rbx)
leaq 0x1f21a03(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss 0x1cf197d(%rip), %xmm15 # 0x1f1ff10
movq %r10, 0x68(%rsp)
addq $-0x10, %rbx
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x22ece4
vmovaps (%rbx), %xmm1
vcmpltps 0x160(%rsp), %xmm1, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22ece8
movzbl %al, %r15d
popcntl %r15d, %r13d
xorl %eax, %eax
cmpq %r11, %r13
jbe 0x22eced
cmpq %r11, %r13
jbe 0x22ecd6
pushq $0x8
popq %r15
testb $0x8, %bpl
jne 0x22ebb3
vmovaps 0x160(%rsp), %xmm0
movq %rbp, %rax
andq $-0x10, %rax
movl %ebp, %ecx
andl $0x7, %ecx
vcmpnleps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
xorl %edx, %edx
movq %r15, %rbp
vmovaps %xmm14, %xmm1
movq (%rax,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x22eb49
vmovaps %xmm1, 0x30(%rsp)
cmpl $0x6, %ecx
je 0x22e63f
cmpl $0x1, %ecx
jne 0x22e7d5
vbroadcastss 0x80(%rax,%rdx,4), %xmm0
vbroadcastss 0x20(%rax,%rdx,4), %xmm2
vmovaps 0x70(%r14), %xmm1
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa0(%rax,%rdx,4), %xmm2
vbroadcastss 0x40(%rax,%rdx,4), %xmm3
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xc0(%rax,%rdx,4), %xmm3
vbroadcastss 0x60(%rax,%rdx,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0x90(%rax,%rdx,4), %xmm4
vbroadcastss 0x30(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0xb0(%rax,%rdx,4), %xmm5
vbroadcastss 0x50(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xd0(%rax,%rdx,4), %xmm6
vbroadcastss 0x70(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vmovaps 0x90(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm10
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vsubps %xmm8, %xmm2, %xmm2
vmovaps 0x100(%rsp), %xmm11
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x110(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm7, %xmm4, %xmm2
vmulps %xmm2, %xmm10, %xmm4
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vpminsd %xmm4, %xmm0, %xmm2
vpminsd %xmm5, %xmm12, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vmulps %xmm2, %xmm15, %xmm2
vpmaxsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm5, %xmm12, %xmm4
vpminsd %xmm4, %xmm0, %xmm0
vpmaxsd %xmm3, %xmm13, %xmm3
vpminsd %xmm3, %xmm0, %xmm3
vpmaxsd 0x150(%rsp), %xmm2, %xmm0
vbroadcastss 0x1cf1786(%rip), %xmm4 # 0x1f1ff14
vmulps %xmm4, %xmm3, %xmm3
vpminsd 0x160(%rsp), %xmm3, %xmm3
vcmpleps %xmm3, %xmm0, %xmm0
cmpl $0x6, %ecx
jne 0x22eb0e
vbroadcastss 0xe0(%rax,%rdx,4), %xmm3
vcmpleps %xmm1, %xmm3, %xmm3
vbroadcastss 0xf0(%rax,%rdx,4), %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vandps %xmm1, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm0
jmp 0x22eb0e
vbroadcastss 0x50(%rax,%rdx,4), %xmm8
vmovaps %xmm8, 0x180(%rsp)
vbroadcastss 0x60(%rax,%rdx,4), %xmm4
vbroadcastss 0x70(%rax,%rdx,4), %xmm5
vbroadcastss 0x80(%rax,%rdx,4), %xmm11
vbroadcastss 0x90(%rax,%rdx,4), %xmm10
vbroadcastss 0xe0(%rax,%rdx,4), %xmm0
vbroadcastss 0xf0(%rax,%rdx,4), %xmm1
vbroadcastss 0x100(%rax,%rdx,4), %xmm2
vbroadcastss 0x110(%rax,%rdx,4), %xmm6
vbroadcastss 0x120(%rax,%rdx,4), %xmm7
vbroadcastss 0x130(%rax,%rdx,4), %xmm9
vmovaps 0x70(%r14), %xmm12
vbroadcastss 0x1cbdec2(%rip), %xmm3 # 0x1eec714
vsubps %xmm12, %xmm3, %xmm13
vmulps %xmm0, %xmm12, %xmm0
vmulps %xmm1, %xmm12, %xmm1
vmulps %xmm2, %xmm12, %xmm2
vmulps 0x1cbd1a5(%rip), %xmm13, %xmm14 # 0x1eeba10
vaddps %xmm0, %xmm14, %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vaddps %xmm1, %xmm14, %xmm0
vmovaps %xmm0, 0x40(%rsp)
vaddps %xmm2, %xmm14, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmulps %xmm6, %xmm12, %xmm0
vmulps %xmm7, %xmm12, %xmm1
vmulps %xmm9, %xmm12, %xmm2
vaddps %xmm0, %xmm13, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vaddps %xmm1, %xmm13, %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vaddps %xmm2, %xmm13, %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps 0xe0(%rsp), %xmm1
vmulps %xmm1, %xmm11, %xmm2
vmulps %xmm1, %xmm10, %xmm12
vmulps %xmm0, %xmm8, %xmm13
vaddps %xmm2, %xmm13, %xmm2
vmulps %xmm0, %xmm4, %xmm13
vmovaps %xmm4, %xmm9
vaddps %xmm12, %xmm13, %xmm14
vmovaps %xmm15, %xmm4
vbroadcastss 0xa0(%rax,%rdx,4), %xmm15
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x20(%rax,%rdx,4), %xmm12
vmovaps 0xc0(%rsp), %xmm1
vmulps %xmm1, %xmm12, %xmm13
vaddps %xmm2, %xmm13, %xmm2
vbroadcastss 0x30(%rax,%rdx,4), %xmm13
vmulps %xmm1, %xmm13, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vbroadcastss 0x40(%rax,%rdx,4), %xmm14
vmulps %xmm1, %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cf257c(%rip), %xmm7 # 0x1f20ec4
vandps %xmm7, %xmm2, %xmm1
vbroadcastss 0x1cc2693(%rip), %xmm8 # 0x1ef0fe8
vcmpltps %xmm8, %xmm1, %xmm1
vblendvps %xmm1, %xmm8, %xmm2, %xmm1
vandps %xmm7, %xmm6, %xmm2
vcmpltps %xmm8, %xmm2, %xmm2
vblendvps %xmm2, %xmm8, %xmm6, %xmm6
vandps %xmm7, %xmm0, %xmm2
vcmpltps %xmm8, %xmm2, %xmm2
vblendvps %xmm2, %xmm8, %xmm0, %xmm0
vrcpps %xmm1, %xmm2
vmulps %xmm1, %xmm2, %xmm1
vsubps %xmm1, %xmm3, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm2, %xmm2
vrcpps %xmm6, %xmm1
vmulps %xmm6, %xmm1, %xmm6
vsubps %xmm6, %xmm3, %xmm6
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm6, %xmm1, %xmm1
vrcpps %xmm0, %xmm6
vmulps %xmm0, %xmm6, %xmm0
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm6, %xmm0
vmovaps 0xb0(%rsp), %xmm6
vmulps %xmm6, %xmm11, %xmm11
vbroadcastss 0xb0(%rax,%rdx,4), %xmm7
vaddps %xmm7, %xmm11, %xmm7
vmulps %xmm6, %xmm10, %xmm10
vbroadcastss 0xc0(%rax,%rdx,4), %xmm11
vaddps %xmm10, %xmm11, %xmm10
vmulps %xmm6, %xmm15, %xmm6
vmovaps %xmm4, %xmm15
vbroadcastss 0xd0(%rax,%rdx,4), %xmm11
vaddps %xmm6, %xmm11, %xmm6
vmovaps 0xa0(%rsp), %xmm11
vmulps 0x180(%rsp), %xmm11, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vmulps %xmm11, %xmm9, %xmm3
vaddps %xmm3, %xmm10, %xmm3
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vmovaps 0x90(%rsp), %xmm6
vmulps %xmm6, %xmm12, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vmulps %xmm6, %xmm13, %xmm7
vaddps %xmm3, %xmm7, %xmm3
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss 0x1cbcfd2(%rip), %xmm14 # 0x1eeba20
vaddps %xmm5, %xmm6, %xmm5
vmovaps 0x1a0(%rsp), %xmm6
vsubps %xmm4, %xmm6, %xmm6
vmovaps 0x190(%rsp), %xmm7
vsubps %xmm4, %xmm7, %xmm4
vmulps %xmm6, %xmm2, %xmm6
vmulps %xmm4, %xmm2, %xmm2
vmovaps 0x40(%rsp), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vmovaps 0x1b0(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm3
vmulps %xmm4, %xmm1, %xmm4
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x10(%rsp), %xmm3
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm3, %xmm0, %xmm3
vmovaps 0x1c0(%rsp), %xmm7
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm5, %xmm0, %xmm0
vpminsd %xmm2, %xmm6, %xmm5
vpminsd %xmm1, %xmm4, %xmm7
vpmaxsd %xmm7, %xmm5, %xmm5
vpminsd %xmm0, %xmm3, %xmm7
vpmaxsd %xmm7, %xmm5, %xmm5
vpmaxsd %xmm2, %xmm6, %xmm2
vpmaxsd %xmm1, %xmm4, %xmm1
vpminsd %xmm1, %xmm2, %xmm1
vpmaxsd %xmm0, %xmm3, %xmm0
vpminsd %xmm0, %xmm1, %xmm0
vmulps %xmm5, %xmm15, %xmm2
vpmaxsd 0x150(%rsp), %xmm2, %xmm1
vbroadcastss 0x1cf1419(%rip), %xmm3 # 0x1f1ff14
vmulps %xmm3, %xmm0, %xmm0
vpminsd 0x160(%rsp), %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
vandps 0x20(%rsp), %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22eb62
vblendvps %xmm0, %xmm2, %xmm14, %xmm0
cmpq $0x8, %rbp
je 0x22eb42
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm1, (%rbx)
addq $0x10, %rbx
vmovaps %xmm0, %xmm1
movq %rdi, %rbp
cmpq $0x8, %rdi
je 0x22eb6a
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x22e61d
jmp 0x22eb6a
vmovaps 0x30(%rsp), %xmm1
jmp 0x22eb49
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x22ebac
vmovaps 0x160(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r11
jae 0x22eb9c
testb %cl, %cl
jne 0x22e5ea
jmp 0x22ecd6
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm1, (%rbx)
addq $0x10, %rbx
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x22eb8f
cmpq $-0x8, %rbp
je 0x22ece4
movq %r11, 0x10(%rsp)
movq %r9, 0x20(%rsp)
movq %r8, (%rsp)
vmovaps 0x160(%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x22edc3
andq $-0x10, %rbp
vmovaps 0x50(%rsp), %xmm1
movzbl (%rbp), %eax
movq %rsi, 0x8(%rsp)
movq 0x8(%rsi), %rcx
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x80(%rsp)
vmovaps %xmm1, 0x40(%rsp)
vmovmskps %xmm1, %edx
xorl $0xf, %edx
je 0x22ec69
movzbl %dl, %r15d
shll $0x6, %eax
addq %rcx, %rax
addq $0x18, %rax
movq %rax, 0x30(%rsp)
bsfq %r15, %rdx
leaq 0x1d0(%rsp), %rdi
movq %r14, %rsi
movq %rdx, %r13
movq %r10, %rcx
movq %rbp, %r8
movq 0x30(%rsp), %rax
callq *(%rax)
testb %al, %al
je 0x22ec5b
orl $-0x1, 0x80(%rsp,%r13,4)
leaq -0x1(%r15), %rax
andq %rax, %r15
movq 0x68(%rsp), %r10
jne 0x22ec2f
vmovaps 0x40(%rsp), %xmm0
vorps 0x80(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vtestps 0x1cbd199(%rip), %xmm0 # 0x1eebe20
jb 0x22ecae
vmovaps 0x160(%rsp), %xmm1
vbroadcastss 0x1cbdee9(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x160(%rsp)
xorl %eax, %eax
jmp 0x22ecb1
pushq $0x3
popq %rax
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
movq 0x20(%rsp), %r9
vbroadcastss 0x1cbcd58(%rip), %xmm14 # 0x1eeba20
movq 0x10(%rsp), %r11
vbroadcastss 0x1cf123a(%rip), %xmm15 # 0x1f1ff10
cmpl $0x3, %eax
jne 0x22e598
jmp 0x22edcb
pushq $0x3
jmp 0x22ecea
pushq $0x2
popq %rax
jmp 0x22ecd6
vmovaps %xmm1, 0x30(%rsp)
movq %r11, 0x10(%rsp)
movq %r9, 0x20(%rsp)
movq %rsi, 0x8(%rsp)
movq %r8, (%rsp)
bsfq %r15, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x40(%rsp)
leaq 0x1d0(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x98(%rsp), %rax
pushq %rax
callq 0x272b3a
popq %rcx
popq %rdx
testb %al, %al
je 0x22ed43
movq 0x40(%rsp), %rax
orl $-0x1, 0x50(%rsp,%rax,4)
leaq -0x1(%r15), %rax
andq %rax, %r15
movq 0x68(%rsp), %r10
movq 0x8(%rsp), %rsi
movq (%rsp), %r8
jne 0x22ed06
vmovaps 0x50(%rsp), %xmm0
vtestps 0x1cbd0b7(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x20(%rsp), %r9
vbroadcastss 0x1cbcca6(%rip), %xmm14 # 0x1eeba20
movq 0x10(%rsp), %r11
vbroadcastss 0x1cf1188(%rip), %xmm15 # 0x1f1ff10
vmovaps 0x30(%rsp), %xmm1
jb 0x22e5dd
vmovaps 0x160(%rsp), %xmm1
vbroadcastss 0x1cbddde(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm0, 0x160(%rsp)
pushq $0x2
popq %rax
jmp 0x22e5dd
pushq $0x2
popq %rax
jmp 0x22ecb6
vmovaps 0x70(%rsp), %xmm0
vandps 0x50(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cbdda4(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r9)
addq $0x1948, %rsp # imm = 0x1948
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::SubdivPatch1IntersectorK<4>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x22efe0
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x22ee35
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x22eff2
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x22efe0
movzbl %al, %ebp
movq %rsp, %rax
andq $0x0, (%rax)
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cf2023(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cc213a(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cbd858(%rip), %xmm7 # 0x1eec714
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm2, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vdivps %xmm3, %xmm7, %xmm3
vbroadcastss 0x1cf207d(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vblendvps %xmm8, %xmm6, %xmm2, %xmm2
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d2ba4b(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vcmpnltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d25a75(%rip), %xmm5 # 0x1f549a0
vbroadcastss 0x1d2ba34(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vmovaps %xmm1, 0x90(%rax)
vmovaps %xmm2, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d2ba14(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d2ba0f(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cbca91(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cbdbde(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
movq %rsp, %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x2751de
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x22efb4
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2732a0
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::SubdivPatch1IntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1b68, %rsp # imm = 0x1B68
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x230972
movq %rcx, %r10
movq %rdx, %r14
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x22f04e
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x230987
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r14), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x230972
vandps %xmm3, %xmm4, %xmm10
andq $0x0, 0x100(%rsp)
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x280(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x290(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x2b0(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x2c0(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x2d0(%rsp)
vbroadcastss 0x1cf1de2(%rip), %xmm11 # 0x1f20ec4
vandps %xmm3, %xmm11, %xmm6
vbroadcastss 0x1cc1ef9(%rip), %xmm9 # 0x1ef0fe8
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cbd616(%rip), %xmm12 # 0x1eec714
vdivps %xmm3, %xmm12, %xmm3
vandps %xmm4, %xmm11, %xmm7
vcmpltps %xmm9, %xmm7, %xmm7
vdivps %xmm4, %xmm12, %xmm4
vandps %xmm5, %xmm11, %xmm8
vcmpltps %xmm9, %xmm8, %xmm8
vbroadcastss 0x1cf1e3d(%rip), %xmm9 # 0x1f20f60
vblendvps %xmm6, %xmm9, %xmm3, %xmm3
vblendvps %xmm7, %xmm9, %xmm4, %xmm4
vdivps %xmm5, %xmm12, %xmm5
vblendvps %xmm8, %xmm9, %xmm5, %xmm5
vmovaps %xmm3, 0x2e0(%rsp)
vmovaps %xmm4, 0x2f0(%rsp)
vmovaps %xmm5, 0x300(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d2b802(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x310(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d25823(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d2b7e2(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d2b7d2(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d2b7cd(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x320(%rsp)
vmovaps %xmm4, 0x330(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cbc84e(%rip), %xmm12 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm12, %xmm1
vmovaps %xmm1, 0x340(%rsp)
vbroadcastss 0x1cbd99a(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm10, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x350(%rsp)
vmovaps %xmm10, 0x370(%rsp)
vxorps %xmm0, %xmm10, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
testq %rax, %rax
je 0x22f228
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r9d, %r9d
cmpb $0x1, %al
adcq $0x2, %r9
jmp 0x22f22c
pushq $0x3
popq %r9
leaq 0x80(%r14), %r11
leaq 0x4d0(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0xc70(%rsp), %r12
vmovaps %xmm12, -0x20(%r12)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r15)
vmovaps %xmm1, -0x10(%r12)
leaq 0x1f20d18(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vbroadcastss 0x1cf0c92(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cf0c8d(%rip), %xmm13 # 0x1f1ff14
vpcmpeqd %xmm14, %xmm14, %xmm14
addq $-0x10, %r12
movq -0x8(%r15), %rbp
addq $-0x8, %r15
cmpq $-0x8, %rbp
je 0x23081c
vmovaps (%r12), %xmm15
vcmpltps 0x350(%rsp), %xmm15, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23082d
movzbl %al, %r13d
popcntl %r13d, %ebx
xorl %eax, %eax
cmpq %r9, %rbx
jbe 0x230831
cmpq %r9, %rbx
jbe 0x23081f
testb $0x8, %bpl
pushq $0x8
popq %rbx
jne 0x22f475
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
vmovaps %xmm12, %xmm15
movq %rbx, %rbp
movq (%rcx,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x22f40c
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x280(%rsp), %xmm1
vmovaps 0x290(%rsp), %xmm2
vmovaps 0x2a0(%rsp), %xmm3
vmovaps 0x2e0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x2f0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x300(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vminps %xmm1, %xmm5, %xmm0
vminps %xmm2, %xmm7, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vminps %xmm3, %xmm9, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmaxps %xmm1, %xmm5, %xmm1
vmaxps %xmm2, %xmm7, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm9, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmulps %xmm1, %xmm13, %xmm1
vmaxps 0x340(%rsp), %xmm0, %xmm2
vminps 0x350(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x22f40c
vblendvps %xmm1, %xmm0, %xmm12, %xmm0
cmpq $0x8, %rbp
je 0x22f405
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm15, (%r12)
addq $0x10, %r12
movq %rdi, %rbp
vmovaps %xmm0, %xmm15
cmpq $0x8, %rdi
je 0x22f423
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x22f2fc
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x22f46e
vmovaps 0x350(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r9
jae 0x22f45d
testb %cl, %cl
je 0x23081f
testb $0x8, %bpl
je 0x22f2e8
jmp 0x22f475
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm15, (%r12)
addq $0x10, %r12
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x22f449
cmpq $-0x8, %rbp
je 0x23081c
vmovaps 0x350(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x23082d
movq %rsi, 0xb8(%rsp)
movq %r11, 0x8(%rsp)
movq %r9, 0x10(%rsp)
movq %r8, 0x18(%rsp)
movl %ebp, %eax
andl $0xf, %eax
cmpl $0x8, %eax
jne 0x230921
vpxor 0xe0(%rsp), %xmm14, %xmm9
movq 0x100(%rsp), %rsi
movl 0x14(%rsi), %eax
movl 0xc(%rsi), %ecx
movl 0x10(%rsi), %ebx
xorl %edx, %edx
cmpq $0x2, %rcx
setne %dl
movl 0x24(%rsi), %r11d
imulq $0xc, %rax, %rdi
leaq 0x4(,%rdx,4), %rdx
movq %rdx, 0x1e0(%rsp)
shrq $0x4, %rbp
leaq (,%rcx,4), %rdx
movq %rdx, 0x1d0(%rsp)
leaq (%r11,%rbp,4), %rdx
addq %rdx, %rdi
leaq (%rdi,%rcx,4), %r8
addq %rsi, %r8
addq $0x2c, %r8
movq %r8, 0xc0(%rsp)
leaq (%rdx,%rax,8), %r8
leaq (%r8,%rcx,4), %r9
leaq 0x30(%rsi,%r9), %r9
movq %r9, 0xd8(%rsp)
leaq (%rdx,%rax,4), %rax
leaq (%rax,%rcx,4), %r9
leaq 0x30(%rsi,%r9), %r9
movq %r9, 0xd0(%rsp)
leaq (%rdx,%rcx,4), %rcx
leaq 0x30(%rsi,%rcx), %rcx
movq %rcx, 0xc8(%rsp)
leaq 0x30(%rsi,%rdi), %rcx
movq %rcx, 0x98(%rsp)
movl %ebx, %edi
leaq 0x30(%rsi,%r8), %rcx
movq %rcx, 0xf8(%rsp)
leaq 0x30(%rsi,%rax), %r13
leaq 0x2c(%r11,%rbp,4), %rdx
movq $0x0, 0xf0(%rsp)
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r9
movq 0x8(%rsp), %r11
movq %rdx, 0x1d8(%rsp)
leaq (%rsi,%rdx), %rax
movq %rax, 0x1e8(%rsp)
xorl %ebp, %ebp
cmpq %rbp, 0x1e0(%rsp)
je 0x230748
movq 0x1e8(%rsp), %rbx
vbroadcastss (%rbx,%rbp), %xmm12
vbroadcastss -0x4(%r13,%rbp), %xmm0
movq 0xf8(%rsp), %rcx
vbroadcastss -0x4(%rcx,%rbp), %xmm1
movq 0xc8(%rsp), %rax
vbroadcastss -0x4(%rax,%rbp), %xmm14
movq 0xd0(%rsp), %rax
vbroadcastss -0x4(%rax,%rbp), %xmm2
movq 0xd8(%rsp), %rax
vbroadcastss -0x4(%rax,%rbp), %xmm6
vmovaps 0x10(%r14), %xmm3
vmovaps 0x20(%r14), %xmm8
vmovaps %xmm8, 0x60(%rsp)
vsubps %xmm3, %xmm0, %xmm7
vsubps %xmm8, %xmm1, %xmm5
vmovaps %xmm2, 0x4a0(%rsp)
vsubps %xmm3, %xmm2, %xmm4
vmovaps %xmm4, 0x80(%rsp)
vmovaps %xmm6, 0x490(%rsp)
vsubps %xmm8, %xmm6, %xmm2
vmovaps %xmm2, 0x70(%rsp)
vsubps %xmm7, %xmm4, %xmm0
vsubps %xmm5, %xmm2, %xmm1
vaddps %xmm7, %xmm4, %xmm6
vaddps %xmm5, %xmm2, %xmm8
vmovdqa %xmm9, 0x20(%rsp)
vmulps %xmm1, %xmm6, %xmm9
vmulps %xmm0, %xmm8, %xmm10
vsubps %xmm9, %xmm10, %xmm13
vmovaps (%r14), %xmm15
vsubps %xmm15, %xmm12, %xmm2
vmovaps %xmm14, 0x4b0(%rsp)
vsubps %xmm15, %xmm14, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vmulps %xmm4, %xmm8, %xmm8
vaddps %xmm2, %xmm12, %xmm10
vmovaps %xmm1, 0x1a0(%rsp)
vmulps %xmm1, %xmm10, %xmm14
vsubps %xmm8, %xmm14, %xmm14
vmovaps %xmm0, 0x1c0(%rsp)
vmulps %xmm0, %xmm10, %xmm8
vmovaps %xmm4, 0x190(%rsp)
vmulps %xmm6, %xmm4, %xmm6
vsubps %xmm8, %xmm6, %xmm8
vmovaps 0x60(%r14), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmulps %xmm0, %xmm8, %xmm10
vmovaps 0x50(%r14), %xmm11
vmulps %xmm14, %xmm11, %xmm14
vaddps %xmm14, %xmm10, %xmm14
vmovaps 0x40(%r14), %xmm4
vmulps %xmm4, %xmm13, %xmm13
vaddps %xmm14, %xmm13, %xmm10
vbroadcastss (%r13,%rbp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vsubps %xmm3, %xmm0, %xmm0
vbroadcastss (%rcx,%rbp), %xmm1
vmovaps %xmm1, 0x470(%rsp)
vsubps 0x60(%rsp), %xmm1, %xmm1
vsubps %xmm0, %xmm7, %xmm3
vsubps %xmm1, %xmm5, %xmm14
vmovaps %xmm7, 0x60(%rsp)
vaddps %xmm0, %xmm7, %xmm7
vmovaps %xmm5, 0x180(%rsp)
vaddps %xmm1, %xmm5, %xmm5
vmulps %xmm7, %xmm14, %xmm13
vmulps %xmm5, %xmm3, %xmm6
vsubps %xmm13, %xmm6, %xmm6
vbroadcastss 0x4(%rbx,%rbp), %xmm8
vmovaps %xmm8, 0x460(%rsp)
vsubps %xmm15, %xmm8, %xmm9
vsubps %xmm9, %xmm2, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vmovaps %xmm2, 0x1b0(%rsp)
vaddps %xmm2, %xmm9, %xmm15
vmulps %xmm14, %xmm15, %xmm8
vsubps %xmm5, %xmm8, %xmm5
vmovaps %xmm3, 0x170(%rsp)
vmulps %xmm3, %xmm15, %xmm8
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm8, %xmm7, %xmm7
vmovaps 0x40(%rsp), %xmm3
vmulps %xmm3, %xmm7, %xmm7
vmulps %xmm5, %xmm11, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmulps %xmm6, %xmm4, %xmm6
vaddps %xmm5, %xmm6, %xmm7
vsubps %xmm12, %xmm9, %xmm15
vaddps %xmm12, %xmm9, %xmm5
vmovaps 0x20(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm2
vsubps %xmm2, %xmm0, %xmm12
vaddps %xmm2, %xmm0, %xmm0
vmovaps 0x70(%rsp), %xmm2
vsubps %xmm2, %xmm1, %xmm9
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm0, %xmm9, %xmm2
vmulps %xmm1, %xmm12, %xmm6
vsubps %xmm2, %xmm6, %xmm2
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm5, %xmm9, %xmm6
vsubps %xmm1, %xmm6, %xmm1
vbroadcastss 0x1cf16b7(%rip), %xmm6 # 0x1f20ec4
vmulps %xmm5, %xmm12, %xmm5
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm5, %xmm0, %xmm0
vmulps %xmm3, %xmm0, %xmm0
vmovaps %xmm11, 0x70(%rsp)
vmulps %xmm1, %xmm11, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm4, 0x80(%rsp)
vmulps %xmm2, %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm10, %xmm1
vaddps %xmm1, %xmm0, %xmm4
vandps %xmm6, %xmm4, %xmm1
vbroadcastss 0x1cf167b(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm1, %xmm1
vminps %xmm7, %xmm10, %xmm2
vminps %xmm0, %xmm2, %xmm2
vbroadcastss 0x1cf165a(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm1, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm10, 0x160(%rsp)
vmaxps %xmm7, %xmm10, %xmm5
vmaxps %xmm0, %xmm5, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vtestps %xmm8, %xmm0
vandps %xmm0, %xmm8, %xmm0
je 0x230693
vmovaps %xmm4, 0x140(%rsp)
vmovaps 0x1a0(%rsp), %xmm10
vmovaps 0x170(%rsp), %xmm3
vmovaps %xmm0, 0x150(%rsp)
vmulps %xmm3, %xmm10, %xmm0
vmovaps 0x1c0(%rsp), %xmm8
vmulps %xmm14, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm14, %xmm12, %xmm2
vmulps %xmm3, %xmm9, %xmm5
vsubps %xmm2, %xmm5, %xmm5
vandps %xmm6, %xmm0, %xmm0
vandps %xmm6, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm5, %xmm2
vmulps %xmm9, %xmm13, %xmm0
vmulps %xmm10, %xmm13, %xmm1
vmovaps 0x190(%rsp), %xmm9
vmulps %xmm14, %xmm9, %xmm4
vsubps %xmm4, %xmm1, %xmm1
vmulps %xmm14, %xmm15, %xmm5
vsubps %xmm0, %xmm5, %xmm5
vandps %xmm6, %xmm4, %xmm4
vandps %xmm6, %xmm0, %xmm0
vcmpltps %xmm0, %xmm4, %xmm0
vblendvps %xmm0, %xmm1, %xmm5, %xmm4
vmulps %xmm3, %xmm15, %xmm0
vmulps %xmm3, %xmm9, %xmm1
vmulps %xmm8, %xmm13, %xmm5
vmovaps %xmm6, %xmm11
vmulps %xmm12, %xmm13, %xmm6
vsubps %xmm5, %xmm1, %xmm1
vsubps %xmm0, %xmm6, %xmm6
vandps %xmm5, %xmm11, %xmm5
vandps %xmm0, %xmm11, %xmm0
vcmpltps %xmm0, %xmm5, %xmm0
vblendvps %xmm0, %xmm1, %xmm6, %xmm5
vmulps 0x40(%rsp), %xmm5, %xmm0
vmulps 0x70(%rsp), %xmm4, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x80(%rsp), %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x180(%rsp), %xmm5, %xmm1
vmulps 0x60(%rsp), %xmm4, %xmm6
vaddps %xmm6, %xmm1, %xmm1
vmulps 0x1b0(%rsp), %xmm2, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm0, %xmm3
vmulps %xmm3, %xmm0, %xmm6
vbroadcastss 0x1cbcd63(%rip), %xmm8 # 0x1eec714
vsubps %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm3, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x30(%r14), %xmm3
vcmpleps 0x80(%r14), %xmm1, %xmm6
vcmpleps %xmm1, %xmm3, %xmm3
vandps %xmm6, %xmm3, %xmm3
vxorps %xmm8, %xmm8, %xmm8
vcmpneqps %xmm0, %xmm8, %xmm0
vandps %xmm3, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vmovaps 0x150(%rsp), %xmm3
vtestps %xmm3, %xmm0
vandps %xmm3, %xmm0, %xmm0
je 0x22fa57
vmovaps 0x160(%rsp), %xmm3
vmovaps %xmm3, 0x450(%rsp)
vmovaps %xmm7, 0x440(%rsp)
vmovaps 0x140(%rsp), %xmm3
vmovaps %xmm3, 0x430(%rsp)
vmovaps %xmm1, 0x420(%rsp)
vmovaps %xmm2, 0x410(%rsp)
vmovaps %xmm4, 0x400(%rsp)
vmovaps %xmm5, 0x3f0(%rsp)
vbroadcastss 0x1cbbfc0(%rip), %xmm12 # 0x1eeba20
vbroadcastss 0x1cf04a7(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cf04a2(%rip), %xmm13 # 0x1f1ff14
vmovdqa 0x20(%rsp), %xmm9
movq 0xc8(%rsp), %rax
vbroadcastss (%rax,%rbp), %xmm1
vmovaps %xmm1, 0x40(%rsp)
movq 0xd0(%rsp), %rax
vbroadcastss (%rax,%rbp), %xmm2
movq 0xd8(%rsp), %rax
vbroadcastss (%rax,%rbp), %xmm14
movq 0x100(%rsp), %rcx
movl 0x18(%rcx), %eax
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rbx
vbroadcastss 0x34(%rbx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd %xmm1, %xmm8, %xmm1
vtestps %xmm0, %xmm1
jb 0x22fb01
movl 0x1c(%rcx), %ecx
vandnps %xmm0, %xmm1, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x22ffe2
cmpq $0x0, 0x48(%rbx)
jne 0x22ffe2
vpandn %xmm9, %xmm0, %xmm9
vtestps %xmm9, %xmm9
je 0x230743
vmovaps (%r14), %xmm0
vmovaps 0x10(%r14), %xmm5
vmovaps 0x20(%r14), %xmm6
vmovaps 0x4b0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm7
vmovaps 0x4a0(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm13
vmovaps 0x490(%rsp), %xmm1
vsubps %xmm6, %xmm1, %xmm15
vmovaps 0x460(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmovaps %xmm1, 0x70(%rsp)
vmovaps 0x480(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x1c0(%rsp)
vmovaps 0x470(%rsp), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmovaps %xmm1, 0x60(%rsp)
vmovaps 0x40(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vsubps %xmm5, %xmm2, %xmm5
vsubps %xmm6, %xmm14, %xmm1
vmovaps %xmm1, 0x40(%rsp)
vsubps %xmm7, %xmm0, %xmm2
vmovaps %xmm0, %xmm3
vmovaps %xmm0, 0x80(%rsp)
vsubps %xmm13, %xmm5, %xmm14
vsubps %xmm15, %xmm1, %xmm0
vaddps %xmm5, %xmm13, %xmm6
vaddps %xmm1, %xmm15, %xmm8
vmovaps %xmm9, 0x20(%rsp)
vmulps %xmm0, %xmm6, %xmm9
vmulps %xmm8, %xmm14, %xmm10
vsubps %xmm9, %xmm10, %xmm12
vaddps %xmm7, %xmm3, %xmm9
vmulps %xmm2, %xmm8, %xmm8
vmovaps %xmm0, 0x160(%rsp)
vmulps %xmm0, %xmm9, %xmm10
vsubps %xmm8, %xmm10, %xmm10
vmovaps %xmm14, 0x170(%rsp)
vmulps %xmm14, %xmm9, %xmm8
vmovaps %xmm2, 0x180(%rsp)
vmulps %xmm6, %xmm2, %xmm6
vsubps %xmm8, %xmm6, %xmm6
vmovaps 0x60(%r14), %xmm4
vmulps %xmm4, %xmm6, %xmm6
vmovaps 0x50(%r14), %xmm14
vmulps %xmm10, %xmm14, %xmm10
vaddps %xmm6, %xmm10, %xmm6
vmovaps 0x40(%r14), %xmm11
vmulps %xmm12, %xmm11, %xmm12
vaddps %xmm6, %xmm12, %xmm9
vmovaps 0x1c0(%rsp), %xmm3
vsubps %xmm3, %xmm13, %xmm1
vmovaps 0x60(%rsp), %xmm12
vsubps %xmm12, %xmm15, %xmm10
vmovaps %xmm13, 0x1a0(%rsp)
vaddps %xmm3, %xmm13, %xmm6
vmovaps %xmm15, 0x190(%rsp)
vaddps %xmm12, %xmm15, %xmm15
vmulps %xmm6, %xmm10, %xmm13
vmulps %xmm1, %xmm15, %xmm2
vsubps %xmm13, %xmm2, %xmm2
vmovaps 0x70(%rsp), %xmm0
vsubps %xmm0, %xmm7, %xmm13
vmulps %xmm15, %xmm13, %xmm15
vmovaps %xmm7, 0x1b0(%rsp)
vaddps %xmm0, %xmm7, %xmm7
vmulps %xmm7, %xmm10, %xmm8
vsubps %xmm15, %xmm8, %xmm8
vmovaps %xmm1, 0x150(%rsp)
vmulps %xmm1, %xmm7, %xmm7
vmulps %xmm6, %xmm13, %xmm6
vsubps %xmm7, %xmm6, %xmm6
vmulps %xmm4, %xmm6, %xmm6
vmulps %xmm8, %xmm14, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vmulps %xmm2, %xmm11, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vmovaps %xmm7, 0x140(%rsp)
vmovaps 0x80(%rsp), %xmm1
vsubps %xmm1, %xmm0, %xmm15
vaddps %xmm1, %xmm0, %xmm1
vsubps %xmm5, %xmm3, %xmm8
vaddps %xmm5, %xmm3, %xmm0
vmovaps 0x40(%rsp), %xmm2
vsubps %xmm2, %xmm12, %xmm6
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm6, %xmm0, %xmm3
vmulps %xmm2, %xmm8, %xmm5
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm2, %xmm15, %xmm2
vmulps %xmm6, %xmm1, %xmm5
vsubps %xmm2, %xmm5, %xmm2
vbroadcastss 0x1cf11b3(%rip), %xmm5 # 0x1f20ec4
vmulps %xmm1, %xmm8, %xmm1
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm4, 0x40(%rsp)
vmulps %xmm4, %xmm0, %xmm0
vmovaps %xmm14, 0x70(%rsp)
vmulps %xmm2, %xmm14, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm11, 0x80(%rsp)
vmulps %xmm3, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm9, %xmm1
vaddps %xmm1, %xmm0, %xmm11
vandps %xmm5, %xmm11, %xmm1
vbroadcastss 0x1cf1171(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm1, %xmm1
vminps %xmm7, %xmm9, %xmm2
vminps %xmm0, %xmm2, %xmm2
vbroadcastss 0x1cf1150(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm1, %xmm3
vcmpnltps %xmm3, %xmm2, %xmm2
vmovaps %xmm9, 0x60(%rsp)
vmaxps %xmm7, %xmm9, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vmovaps 0x20(%rsp), %xmm1
vtestps %xmm1, %xmm0
vandps %xmm1, %xmm0, %xmm9
je 0x2306bc
vmovaps 0x160(%rsp), %xmm7
vmovaps 0x150(%rsp), %xmm12
vmulps %xmm7, %xmm12, %xmm0
vmovaps %xmm8, %xmm14
vmovaps 0x170(%rsp), %xmm8
vmulps %xmm10, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm10, %xmm14, %xmm2
vmulps %xmm6, %xmm12, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vandps %xmm5, %xmm0, %xmm0
vandps %xmm5, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm3
vmulps %xmm6, %xmm13, %xmm0
vmulps %xmm7, %xmm13, %xmm1
vmovaps 0x180(%rsp), %xmm7
vmulps %xmm7, %xmm10, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm10, %xmm15, %xmm4
vsubps %xmm0, %xmm4, %xmm4
vandps %xmm5, %xmm2, %xmm2
vandps %xmm5, %xmm0, %xmm0
vcmpltps %xmm0, %xmm2, %xmm0
vblendvps %xmm0, %xmm1, %xmm4, %xmm1
vmulps %xmm12, %xmm15, %xmm0
vmulps %xmm7, %xmm12, %xmm2
vmulps %xmm8, %xmm13, %xmm4
vmovaps %xmm5, %xmm12
vmulps %xmm14, %xmm13, %xmm5
vsubps %xmm4, %xmm2, %xmm2
vsubps %xmm0, %xmm5, %xmm5
vandps %xmm4, %xmm12, %xmm4
vandps %xmm0, %xmm12, %xmm0
vcmpltps %xmm0, %xmm4, %xmm0
vblendvps %xmm0, %xmm2, %xmm5, %xmm4
vmulps 0x40(%rsp), %xmm4, %xmm0
vmulps 0x70(%rsp), %xmm1, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps 0x80(%rsp), %xmm3, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x190(%rsp), %xmm4, %xmm2
vmulps 0x1a0(%rsp), %xmm1, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vmulps 0x1b0(%rsp), %xmm3, %xmm5
vaddps %xmm2, %xmm5, %xmm2
vaddps %xmm2, %xmm2, %xmm2
vrcpps %xmm0, %xmm5
vmulps %xmm5, %xmm0, %xmm7
vbroadcastss 0x1cbc862(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm5, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm5, %xmm2, %xmm2
vmovaps 0x30(%r14), %xmm5
vcmpleps 0x80(%r14), %xmm2, %xmm7
vcmpleps %xmm2, %xmm5, %xmm5
vandps %xmm7, %xmm5, %xmm5
vxorps %xmm7, %xmm7, %xmm7
vcmpneqps %xmm7, %xmm0, %xmm0
vandps %xmm5, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vtestps %xmm9, %xmm0
vandps %xmm0, %xmm9, %xmm0
vmovaps %xmm12, %xmm6
je 0x22ff4f
vmovaps 0x60(%rsp), %xmm5
vmovaps %xmm5, 0x3e0(%rsp)
vmovaps 0x140(%rsp), %xmm5
vmovaps %xmm5, 0x3d0(%rsp)
vmovaps %xmm11, 0x3c0(%rsp)
vmovaps %xmm2, 0x3b0(%rsp)
vmovaps %xmm3, 0x3a0(%rsp)
vmovaps %xmm1, 0x390(%rsp)
vmovaps %xmm4, 0x380(%rsp)
vbroadcastss 0x1cbbac8(%rip), %xmm12 # 0x1eeba20
vbroadcastss 0x1ceffaf(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1ceffaa(%rip), %xmm13 # 0x1f1ff14
vpcmpeqd %xmm14, %xmm14, %xmm14
vmovdqa 0x20(%rsp), %xmm9
movq 0x100(%rsp), %rcx
movl 0x18(%rcx), %eax
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rbx
vbroadcastss 0x34(%rbx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd %xmm7, %xmm1, %xmm1
vtestps %xmm0, %xmm1
jb 0x22ffce
movl 0x1c(%rcx), %ecx
vandnps %xmm0, %xmm1, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x23034f
cmpq $0x0, 0x48(%rbx)
jne 0x23034f
vpandn %xmm9, %xmm0, %xmm9
addq $0x4, %rbp
vtestps %xmm9, %xmm9
jne 0x22f5b9
jmp 0x230748
vmovaps %xmm14, 0x70(%rsp)
vmovaps %xmm2, 0x80(%rsp)
vmovaps 0x430(%rsp), %xmm3
vandps %xmm3, %xmm11, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cbc705(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cc0fc4(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm2
vmulps 0x450(%rsp), %xmm2, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps 0x440(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
movq 0x98(%rsp), %rdx
vbroadcastss -0x4(%rdx,%rbp), %xmm3
movq %r11, %rdx
movq 0xc0(%rsp), %r11
vbroadcastss (%r11,%rbp), %xmm4
movq %rdx, %r11
vpsrld $0x10, %xmm3, %xmm5
vpblendw $0xaa, %xmm8, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm8[1],xmm3[2],xmm8[3],xmm3[4],xmm8[5],xmm3[6],xmm8[7]
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x1cee45e(%rip), %xmm7 # 0x1f1e4e0
vmulps %xmm7, %xmm3, %xmm3
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm7, %xmm5, %xmm5
vsubps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vmulps %xmm5, %xmm6, %xmm5
vpblendw $0xaa, %xmm8, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm2, %xmm2
movq 0x98(%rsp), %rdx
vbroadcastss (%rdx,%rbp), %xmm4
vaddps %xmm2, %xmm5, %xmm2
vpblendw $0xaa, %xmm8, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm8[1],xmm4[2],xmm8[3],xmm4[4],xmm8[5],xmm4[6],xmm8[7]
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm7, %xmm5, %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm7, %xmm4, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovaps 0x410(%rsp), %xmm2
vmovaps %xmm2, 0x1f0(%rsp)
vmovaps 0x400(%rsp), %xmm2
vmovaps %xmm2, 0x200(%rsp)
vmovaps 0x3f0(%rsp), %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmovaps %xmm3, 0x220(%rsp)
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps %xmm1, 0x230(%rsp)
vmovd %ecx, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovdqa %xmm1, 0x240(%rsp)
vmovdqa %xmm2, 0x250(%rsp)
leaq 0x260(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x260(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x270(%rsp)
vmovaps 0x80(%r14), %xmm3
vblendvps %xmm0, 0x420(%rsp), %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r14)
vmovaps %xmm0, 0xa0(%rsp)
leaq 0xa0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x120(%rsp)
movq %r14, 0x128(%rsp)
leaq 0x1f0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r10, 0x38(%rsp)
movq %rsi, 0x58(%rsp)
movl %edi, 0x4(%rsp)
vmovaps %xmm3, 0x60(%rsp)
je 0x230284
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm3
movl 0x4(%rsp), %edi
vmovdqa 0x20(%rsp), %xmm9
vxorps %xmm8, %xmm8, %xmm8
vbroadcastss 0x1cefcbb(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cefcae(%rip), %xmm10 # 0x1f1ff10
movq 0x8(%rsp), %r11
movq 0x10(%rsp), %r9
vbroadcastss 0x1cbb7ab(%rip), %xmm12 # 0x1eeba20
movq 0x18(%rsp), %r8
movq 0x58(%rsp), %rsi
movq 0x38(%rsp), %r10
vmovdqa 0xa0(%rsp), %xmm0
vptest %xmm0, %xmm0
vmovaps 0x70(%rsp), %xmm14
je 0x2306ed
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm0, %xmm0, %xmm0
je 0x23031a
testb $0x2, (%rcx)
jne 0x2302ba
testb $0x40, 0x3e(%rbx)
je 0x23031a
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm3
vmovaps 0x70(%rsp), %xmm14
movl 0x4(%rsp), %edi
vmovdqa 0x20(%rsp), %xmm9
vxorps %xmm8, %xmm8, %xmm8
vpcmpeqd %xmm0, %xmm0, %xmm0
vbroadcastss 0x1cefc25(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cefc18(%rip), %xmm10 # 0x1f1ff10
movq 0x8(%rsp), %r11
movq 0x10(%rsp), %r9
vbroadcastss 0x1cbb715(%rip), %xmm12 # 0x1eeba20
movq 0x18(%rsp), %r8
movq 0x58(%rsp), %rsi
movq 0x38(%rsp), %r10
vpcmpeqd 0xa0(%rsp), %xmm8, %xmm1
vpxor %xmm0, %xmm1, %xmm0
movq 0x128(%rsp), %rax
vbroadcastss 0x1cbc84c(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x2306f9
vmovaps 0x3c0(%rsp), %xmm3
vandps %xmm6, %xmm3, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cbc3a7(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cc0c66(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm2
vmulps 0x3e0(%rsp), %xmm2, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps 0x3d0(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
movq 0xc0(%rsp), %rdx
vbroadcastss (%rdx,%rbp), %xmm3
vbroadcastss 0x4(%rdx,%rbp), %xmm4
vpsrld $0x10, %xmm3, %xmm5
vpblendw $0xaa, %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm7[1],xmm3[2],xmm7[3],xmm3[4],xmm7[5],xmm3[6],xmm7[7]
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x1cee10e(%rip), %xmm8 # 0x1f1e4e0
vmulps %xmm3, %xmm8, %xmm3
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vsubps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vmulps %xmm5, %xmm6, %xmm5
vpblendw $0xaa, %xmm7, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm2, %xmm2
movq 0x98(%rsp), %rdx
vbroadcastss (%rdx,%rbp), %xmm4
vaddps %xmm2, %xmm5, %xmm2
vpblendw $0xaa, %xmm7, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovaps 0x3a0(%rsp), %xmm2
vmovaps %xmm2, 0x1f0(%rsp)
vmovaps 0x390(%rsp), %xmm2
vmovaps %xmm2, 0x200(%rsp)
vmovaps 0x380(%rsp), %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmovaps %xmm3, 0x220(%rsp)
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps %xmm1, 0x230(%rsp)
vmovd %ecx, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovdqa %xmm1, 0x240(%rsp)
vmovdqa %xmm2, 0x250(%rsp)
leaq 0x260(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x260(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x270(%rsp)
vmovaps 0x80(%r14), %xmm3
vblendvps %xmm0, 0x3b0(%rsp), %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r14)
vmovaps %xmm0, 0xa0(%rsp)
leaq 0xa0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x120(%rsp)
movq %r14, 0x128(%rsp)
leaq 0x1f0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %r10, 0x38(%rsp)
movq %rsi, 0x58(%rsp)
movl %edi, 0x4(%rsp)
vmovaps %xmm3, 0x40(%rsp)
je 0x2305d3
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x40(%rsp), %xmm3
movl 0x4(%rsp), %edi
vmovdqa 0x20(%rsp), %xmm9
vxorps %xmm7, %xmm7, %xmm7
vbroadcastss 0x1cef96c(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cef95f(%rip), %xmm10 # 0x1f1ff10
movq 0x8(%rsp), %r11
movq 0x10(%rsp), %r9
vbroadcastss 0x1cbb45c(%rip), %xmm12 # 0x1eeba20
movq 0x18(%rsp), %r8
movq 0x58(%rsp), %rsi
movq 0x38(%rsp), %r10
vmovdqa 0xa0(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x23071c
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm14, %xmm14, %xmm14
je 0x23065e
testb $0x2, (%rcx)
jne 0x230604
testb $0x40, 0x3e(%rbx)
je 0x23065e
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x40(%rsp), %xmm3
movl 0x4(%rsp), %edi
vmovdqa 0x20(%rsp), %xmm9
vxorps %xmm7, %xmm7, %xmm7
vpcmpeqd %xmm14, %xmm14, %xmm14
vbroadcastss 0x1cef8e1(%rip), %xmm13 # 0x1f1ff14
vbroadcastss 0x1cef8d4(%rip), %xmm10 # 0x1f1ff10
movq 0x8(%rsp), %r11
movq 0x10(%rsp), %r9
vbroadcastss 0x1cbb3d1(%rip), %xmm12 # 0x1eeba20
movq 0x18(%rsp), %r8
movq 0x58(%rsp), %rsi
movq 0x38(%rsp), %r10
vpcmpeqd 0xa0(%rsp), %xmm7, %xmm1
vpxor %xmm1, %xmm14, %xmm0
movq 0x128(%rsp), %rax
vbroadcastss 0x1cbc508(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x230729
vmovaps %xmm6, %xmm11
vbroadcastss 0x1cbb380(%rip), %xmm12 # 0x1eeba20
vbroadcastss 0x1cef867(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cef862(%rip), %xmm13 # 0x1f1ff14
vxorps %xmm8, %xmm8, %xmm8
jmp 0x22fa72
vmovaps %xmm9, %xmm0
vmovaps %xmm5, %xmm6
vbroadcastss 0x1cbb353(%rip), %xmm12 # 0x1eeba20
vbroadcastss 0x1cef83a(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cef835(%rip), %xmm13 # 0x1f1ff14
vpcmpeqd %xmm14, %xmm14, %xmm14
vxorps %xmm7, %xmm7, %xmm7
jmp 0x22ff6f
vpcmpeqd %xmm0, %xmm8, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
vblendvps %xmm1, (%r11), %xmm3, %xmm1
vmovaps %xmm1, (%r11)
vmovaps 0x80(%rsp), %xmm2
jmp 0x22fafc
vpcmpeqd %xmm7, %xmm0, %xmm0
vpcmpeqd %xmm14, %xmm14, %xmm14
vpxor %xmm0, %xmm14, %xmm0
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
vblendvps %xmm1, (%r11), %xmm3, %xmm1
vmovaps %xmm1, (%r11)
jmp 0x22ffc9
vpcmpeqd %xmm14, %xmm14, %xmm14
cmpl $0x2, %edi
movq 0x1d8(%rsp), %rdx
je 0x2307b0
movq 0xf0(%rsp), %rbx
leaq 0x1(%rbx), %rax
movq 0x1d0(%rsp), %rcx
addq %rcx, 0xc0(%rsp)
addq %rcx, 0xd8(%rsp)
addq %rcx, 0xd0(%rsp)
addq %rcx, 0xc8(%rsp)
addq %rcx, 0x98(%rsp)
addq %rcx, 0xf8(%rsp)
addq %rcx, %r13
addq %rcx, %rdx
testq %rbx, %rbx
movq %rax, 0xf0(%rsp)
je 0x22f5a3
vpxor %xmm14, %xmm9, %xmm0
xorl %ecx, %ecx
vpor 0xe0(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0xe0(%rsp)
vtestps %xmm14, %xmm0
movq 0xb8(%rsp), %rsi
jb 0x23081c
vmovaps 0x350(%rsp), %xmm1
vbroadcastss 0x1cbc39a(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x350(%rsp)
xorl %eax, %eax
testq %rcx, %rcx
je 0x23081f
movq %rcx, (%r15)
addq $0x8, %r15
vbroadcastss 0x1cbc374(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, (%r12)
addq $0x10, %r12
jmp 0x23081f
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x22f28c
jmp 0x230952
pushq $0x2
jmp 0x23081e
vmovaps %xmm15, 0x40(%rsp)
movq %r11, 0x8(%rsp)
movq %r9, 0x10(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0xb8(%rsp)
movq %r8, 0x18(%rsp)
bsfq %r13, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, 0x20(%rsp)
leaq 0x100(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x288(%rsp), %rax
pushq %rax
vzeroupper
callq 0x2780d4
popq %rcx
popq %rdx
testb %al, %al
je 0x230896
movq 0x20(%rsp), %rax
orl $-0x1, 0xe0(%rsp,%rax,4)
leaq -0x1(%r13), %rax
andq %rax, %r13
movq 0x38(%rsp), %r10
movq 0xb8(%rsp), %rsi
movq 0x18(%rsp), %r8
jne 0x230853
vmovaps 0xe0(%rsp), %xmm0
vpcmpeqd %xmm14, %xmm14, %xmm14
vtestps %xmm14, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1cbb150(%rip), %xmm12 # 0x1eeba20
movq 0x10(%rsp), %r9
movq 0x8(%rsp), %r11
vbroadcastss 0x1cef62d(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1cef628(%rip), %xmm13 # 0x1f1ff14
vmovaps 0x40(%rsp), %xmm15
jb 0x22f2d2
vmovaps 0x350(%rsp), %xmm1
vbroadcastss 0x1cbc27a(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x350(%rsp)
pushq $0x2
popq %rax
jmp 0x22f2d2
andq $-0x10, %rbp
movl 0x2c(%rbp), %eax
movq 0x30(%rbp,%rax), %rcx
movq %rbp, 0x100(%rsp)
vmovdqa 0x360(%rsp), %xmm0
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r9
movq 0x8(%rsp), %r11
jmp 0x2307b7
vmovaps 0x370(%rsp), %xmm0
vandps 0xe0(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cbc217(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r11)
addq $0x1b68, %rsp # imm = 0x1B68
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r14, %rdx
movq %r10, %rcx
addq $0x1b68, %rsp # imm = 0x1B68
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x276342
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::SubdivPatch1MBIntersectorK<4>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x230b96
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x230b96
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
movq %rsp, %rcx
andq $0x0, (%rcx)
movzbl %al, %ebp
vmovaps (%rdx), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%rdx), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%rdx), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%rdx), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%rdx), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%rdx), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1cf0492(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cc05a9(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vandps %xmm4, %xmm2, %xmm5
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vrcpps %xmm1, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1cbbc9b(%rip), %xmm5 # 0x1eec714
vsubps %xmm1, %xmm5, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vrcpps %xmm2, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d29e93(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vcmpnltps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1d23ebd(%rip), %xmm5 # 0x1f549a0
vbroadcastss 0x1d29e7c(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vmovaps %xmm1, 0x90(%rax)
vmovaps %xmm2, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d29e5c(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d29e57(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%rdx), %xmm1
vmovaps 0x80(%rdx), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cbaedb(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cbc028(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
movq %rsp, %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x279122
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x230b6a
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::SubdivPatch1MBIntersectorK<4>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1c28, %rsp # imm = 0x1C28
movq (%rsi), %r8
cmpq $0x8, 0x70(%r8)
je 0x232888
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%rdx), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x232888
movq %rcx, %r10
vandps %xmm3, %xmm4, %xmm7
andq $0x0, 0x168(%rsp)
vmovaps (%r14), %xmm3
vmovaps %xmm3, 0x2e0(%rsp)
vmovaps 0x10(%r14), %xmm3
vmovaps %xmm3, 0x2f0(%rsp)
vmovaps 0x20(%r14), %xmm3
vmovaps %xmm3, 0x300(%rsp)
vmovaps 0x40(%r14), %xmm3
vmovaps %xmm3, 0x310(%rsp)
vmovaps 0x50(%r14), %xmm4
vmovaps %xmm4, 0x320(%rsp)
vmovaps 0x60(%r14), %xmm5
vmovaps %xmm5, 0x330(%rsp)
vbroadcastss 0x1cf0264(%rip), %xmm9 # 0x1f20ec4
vandps %xmm3, %xmm9, %xmm6
vbroadcastss 0x1cc037b(%rip), %xmm8 # 0x1ef0fe8
vcmpltps %xmm8, %xmm6, %xmm6
vblendvps %xmm6, %xmm8, %xmm3, %xmm3
vandps %xmm4, %xmm9, %xmm6
vcmpltps %xmm8, %xmm6, %xmm6
vblendvps %xmm6, %xmm8, %xmm4, %xmm4
vandps %xmm5, %xmm9, %xmm6
vcmpltps %xmm8, %xmm6, %xmm6
vblendvps %xmm6, %xmm8, %xmm5, %xmm5
vrcpps %xmm3, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1cbba6a(%rip), %xmm8 # 0x1eec714
vsubps %xmm3, %xmm8, %xmm3
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vrcpps %xmm4, %xmm6
vmulps %xmm6, %xmm4, %xmm4
vsubps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vsubps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm3, 0x340(%rsp)
vmovaps %xmm4, 0x350(%rsp)
vmovaps %xmm5, 0x360(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d29c5d(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x370(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d23c7e(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d29c3d(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vmovaps %xmm3, 0x380(%rsp)
vcmpnltps %xmm2, %xmm5, %xmm3
vbroadcastss 0x1d29c24(%rip), %xmm4 # 0x1f5a96c
vbroadcastss 0x1d29c1f(%rip), %xmm5 # 0x1f5a970
vblendvps %xmm3, %xmm4, %xmm5, %xmm3
vmovaps %xmm3, 0x390(%rsp)
vmovaps 0x30(%r14), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cbaca9(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm7, %xmm3, %xmm1, %xmm1
vmovaps %xmm1, 0x3a0(%rsp)
vbroadcastss 0x1cbbdf5(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm7, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x3b0(%rsp)
vmovaps %xmm7, 0x3d0(%rsp)
vxorps %xmm0, %xmm7, %xmm0
vmovaps %xmm0, 0x150(%rsp)
cmpq $0x0, 0x8(%rcx)
je 0x230dcf
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r9d, %r9d
cmpb $0x1, %al
adcq $0x2, %r9
jmp 0x230dd3
pushq $0x3
popq %r9
leaq 0x80(%r14), %rax
movq %rax, 0xa8(%rsp)
leaq 0x590(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0xd30(%rsp), %r15
vbroadcastss 0x1cbac1c(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, -0x20(%r15)
movq 0x70(%r8), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm1, -0x10(%r15)
leaq 0x1f1f160(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
movq %r10, 0x40(%rsp)
movq %r9, 0x178(%rsp)
addq $-0x10, %r15
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x232763
vmovaps (%r15), %xmm15
vcmpltps 0x3b0(%rsp), %xmm15, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x232774
movzbl %al, %r13d
popcntl %r13d, %ebx
xorl %eax, %eax
cmpq %r9, %rbx
jbe 0x232778
cmpq %r9, %rbx
pushq $0x8
popq %rbx
jbe 0x232766
testb $0x8, %bpl
jne 0x2310f6
vmovaps 0x3b0(%rsp), %xmm0
movq %rbp, %rax
andq $-0x10, %rax
andl $0x7, %ebp
movl %ebp, %ecx
vcmpnleps %xmm15, %xmm0, %xmm0
xorl %edx, %edx
vbroadcastss 0x1cbab64(%rip), %xmm15 # 0x1eeba20
movq %rbx, %rbp
movq (%rax,%rdx,8), %rdi
cmpq $0x8, %rdi
je 0x231062
vbroadcastss 0x80(%rax,%rdx,4), %xmm2
vbroadcastss 0x20(%rax,%rdx,4), %xmm3
vmovaps 0x70(%r14), %xmm1
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa0(%rax,%rdx,4), %xmm3
vbroadcastss 0x40(%rax,%rdx,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xc0(%rax,%rdx,4), %xmm4
vbroadcastss 0x60(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0x90(%rax,%rdx,4), %xmm5
vbroadcastss 0x30(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xb0(%rax,%rdx,4), %xmm6
vbroadcastss 0x50(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xd0(%rax,%rdx,4), %xmm7
vbroadcastss 0x70(%rax,%rdx,4), %xmm8
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmovaps 0x2e0(%rsp), %xmm8
vmovaps 0x2f0(%rsp), %xmm9
vmovaps 0x300(%rsp), %xmm10
vmovaps 0x340(%rsp), %xmm11
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x350(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm10, %xmm4, %xmm2
vmovaps 0x360(%rsp), %xmm4
vmulps %xmm4, %xmm2, %xmm14
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm7, %xmm2
vmulps %xmm4, %xmm2, %xmm4
vpminsd %xmm5, %xmm12, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm4, %xmm14, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpmaxsd %xmm5, %xmm12, %xmm5
vpmaxsd %xmm3, %xmm13, %xmm3
vpminsd %xmm3, %xmm5, %xmm5
vpmaxsd %xmm4, %xmm14, %xmm4
vpmaxsd 0x3a0(%rsp), %xmm2, %xmm3
vpminsd %xmm4, %xmm5, %xmm4
vpminsd 0x3b0(%rsp), %xmm4, %xmm4
cmpl $0x6, %ecx
je 0x23107b
vcmpleps %xmm4, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm1
vpslld $0x1f, %xmm1, %xmm1
vtestps %xmm1, %xmm1
je 0x231062
vbroadcastss 0x1cba9e2(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
cmpq $0x8, %rbp
je 0x23105b
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm15, (%r15)
addq $0x10, %r15
movq %rdi, %rbp
vmovaps %xmm1, %xmm15
cmpq $0x8, %rdi
je 0x2310ab
leaq 0x1(%rdx), %rdi
cmpq $0x3, %rdx
movq %rdi, %rdx
jb 0x230ebf
jmp 0x2310ab
vcmpleps %xmm4, %xmm3, %xmm3
vbroadcastss 0xe0(%rax,%rdx,4), %xmm4
vcmpleps %xmm1, %xmm4, %xmm4
vbroadcastss 0xf0(%rax,%rdx,4), %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vandps %xmm1, %xmm4, %xmm1
vandps %xmm3, %xmm1, %xmm1
jmp 0x231025
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x2310ef
vmovaps 0x3b0(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r9
jae 0x2310de
testb %cl, %cl
jne 0x230e8c
jmp 0x232766
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm15, (%r15)
addq $0x10, %r15
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x2310d1
cmpq $-0x8, %rbp
je 0x232763
vmovaps 0x3b0(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x232774
movq %r8, 0x88(%rsp)
movl %ebp, %eax
andl $0xf, %eax
cmpl $0x8, %eax
jne 0x23283f
vmovaps 0x150(%rsp), %xmm0
movq 0x168(%rsp), %rax
movl 0x8(%rax), %eax
decl %eax
vcvtsi2ss %rax, %xmm7, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vmulps 0x70(%r14), %xmm1, %xmm1
vroundps $0x1, %xmm1, %xmm3
vaddss 0x1cbf868(%rip), %xmm2, %xmm2 # 0x1ef09cc
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vminps %xmm2, %xmm3, %xmm2
vmaxps 0x1cba89b(%rip), %xmm2, %xmm2 # 0x1eeba10
vcvtps2dq %xmm2, %xmm3
vmovapd %xmm3, 0x240(%rsp)
vmovmskps %xmm0, %eax
xorl $0xf, %eax
je 0x2326f8
movq %rsi, 0xa0(%rsp)
vsubps %xmm2, %xmm1, %xmm1
vxorps 0x1cbac7d(%rip), %xmm0, %xmm2 # 0x1eebe20
vbroadcastss 0x1cbb568(%rip), %xmm0 # 0x1eec714
vmovaps %xmm1, 0x230(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm2, %xmm0
vmovaps 0x240(%rsp), %xmm1
vmovaps %xmm1, 0x3e0(%rsp)
shrq $0x2, %rbp
andq $-0x4, %rbp
vmovaps %xmm0, 0x410(%rsp)
movzbl %al, %eax
andl $0xf, %eax
bsfq %rax, %rax
cltq
movslq 0x240(%rsp,%rax,4), %rdi
vmovd %edi, %xmm0
movq 0x168(%rsp), %rax
movl 0x28(%rax), %ebx
movl 0x14(%rax), %r8d
movl 0xc(%rax), %r13d
xorl %ecx, %ecx
cmpq $0x2, %r13
setne %cl
movq %rcx, 0x30(%rsp)
movl 0x24(%rax), %ecx
movq %rax, (%rsp)
movq %rbx, %r11
imulq %rdi, %r11
imulq $0xc, %r8, %rsi
leaq (%rax,%rbp), %rdi
andl $-0x4, %ebx
movq %rcx, %rax
movq %rcx, 0x28(%rsp)
leaq (%rbx,%rcx), %r9
addq %r11, %r9
addq %r9, %rsi
leaq (%rsi,%r13,4), %rcx
addq %rdi, %rcx
movq %rcx, 0x10(%rsp)
leaq (%r11,%r8,8), %rdx
movq %r8, %rcx
movq %r8, 0x140(%rsp)
addq %rax, %rdx
addq %rdx, %rbx
leaq (%rbx,%r13,4), %r8
addq %rdi, %r8
movq %r8, 0xe0(%rsp)
leaq (%r9,%rcx,4), %rax
movq %rax, 0xb0(%rsp)
leaq (%rax,%r13,4), %r8
addq %rdi, %r8
movq %r8, 0xd8(%rsp)
leaq (%r9,%r13,4), %r8
addq %rdi, %r8
movq %r8, 0xd0(%rsp)
movq (%rsp), %rax
leaq (%rax,%rbp), %rdi
addq $0x30, %rdi
leaq (%r11,%r13,4), %rax
movq 0x28(%rsp), %rcx
addq %rdi, %rax
addq %rcx, %rax
movq %rax, 0xc8(%rsp)
movq %rsi, %r8
leaq (%rdx,%r13,4), %rax
addq %rdi, %rax
movq %rax, 0xc0(%rsp)
movq 0x140(%rsp), %rax
leaq (%r11,%rax,4), %rax
movq %rbx, %rsi
addq %rcx, %rax
leaq (%rax,%r13,4), %rbx
addq %rdi, %rbx
movq %rbx, 0xb8(%rsp)
addq %rdi, %r8
addq %rdi, %rsi
addq %rdi, 0xb0(%rsp)
addq %rdi, %r9
movq %r9, 0x198(%rsp)
movq 0x10(%rsp), %r9
addq %rdi, %rdx
movq %rdx, 0x190(%rsp)
addq %rdi, %rax
movq %rax, 0x188(%rsp)
movq %rcx, %rdi
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpcmpeqd 0x3e0(%rsp), %xmm0, %xmm0
vpand %xmm2, %xmm0, %xmm0
vpxor 0x1cbaad2(%rip), %xmm0, %xmm1 # 0x1eebe20
vmovdqa %xmm1, 0x3f0(%rsp)
movq (%rsp), %rax
addq %rax, %r11
movl 0x10(%rax), %eax
movl %eax, 0xec(%rsp)
movq 0x30(%rsp), %rax
leaq 0x4(,%rax,4), %rax
movq %rax, 0x200(%rsp)
leaq (,%r13,4), %rax
movq %rax, 0x1f8(%rsp)
addq $0x2c, %rdi
vmovdqa %xmm0, 0x400(%rsp)
vmovdqa %xmm0, %xmm9
movq $0x0, 0x180(%rsp)
vmovaps %xmm2, %xmm0
vmovaps %xmm2, 0x210(%rsp)
leaq (%r11,%rbp), %rax
addq %rdi, %rax
movq %rax, 0x208(%rsp)
xorl %r13d, %r13d
cmpq %r13, 0x200(%rsp)
je 0x232626
movq 0x208(%rsp), %rdx
vbroadcastss (%rdx,%r13), %xmm3
movq 0x188(%rsp), %rcx
vbroadcastss -0x4(%rcx,%r13), %xmm5
movq 0x190(%rsp), %rax
vbroadcastss -0x4(%rax,%r13), %xmm6
vbroadcastss 0x4(%rdx,%r13), %xmm7
vbroadcastss (%rcx,%r13), %xmm1
vbroadcastss (%rax,%r13), %xmm4
movq 0xc8(%rsp), %rax
vbroadcastss -0x4(%rax,%r13), %xmm2
movq 0xb8(%rsp), %rax
vbroadcastss -0x4(%rax,%r13), %xmm0
vmovaps %xmm0, 0x60(%rsp)
movq 0xc0(%rsp), %rax
vbroadcastss -0x4(%rax,%r13), %xmm0
vmovaps %xmm0, 0x70(%rsp)
movq 0x198(%rsp), %rax
vmovdqa %xmm9, (%rsp)
vbroadcastss -0x4(%rax,%r13), %xmm9
movq 0xb0(%rsp), %rcx
vbroadcastss -0x4(%rcx,%r13), %xmm10
vbroadcastss -0x4(%rsi,%r13), %xmm11
vbroadcastss (%rax,%r13), %xmm12
vbroadcastss (%rcx,%r13), %xmm13
vbroadcastss (%rsi,%r13), %xmm14
movq 0xd0(%rsp), %rax
vbroadcastss 0x2c(%rax,%r13), %xmm15
vmovaps 0x230(%rsp), %xmm0
vmulps %xmm0, %xmm9, %xmm9
vmovaps 0x220(%rsp), %xmm8
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vmovaps %xmm3, 0x10(%rsp)
movq 0xd8(%rsp), %rax
vbroadcastss 0x2c(%rax,%r13), %xmm9
vmulps %xmm0, %xmm10, %xmm10
vmulps %xmm5, %xmm8, %xmm5
vaddps %xmm5, %xmm10, %xmm5
movq 0xe0(%rsp), %rax
vbroadcastss 0x2c(%rax,%r13), %xmm10
vmulps %xmm0, %xmm11, %xmm11
vmulps %xmm6, %xmm8, %xmm6
vaddps %xmm6, %xmm11, %xmm3
vmulps %xmm0, %xmm12, %xmm6
vmulps %xmm0, %xmm13, %xmm11
vmulps %xmm0, %xmm14, %xmm12
vmulps %xmm7, %xmm8, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmovaps %xmm6, 0x30(%rsp)
vmulps %xmm1, %xmm8, %xmm6
vaddps %xmm6, %xmm11, %xmm1
vmovaps %xmm1, 0x140(%rsp)
vmulps %xmm4, %xmm8, %xmm4
vaddps %xmm4, %xmm12, %xmm1
vmovaps %xmm1, 0x1d0(%rsp)
vmulps %xmm0, %xmm15, %xmm4
vmulps %xmm0, %xmm9, %xmm6
vmulps %xmm0, %xmm10, %xmm7
vmulps %xmm2, %xmm8, %xmm2
vaddps %xmm4, %xmm2, %xmm10
vmulps 0x60(%rsp), %xmm8, %xmm1
vaddps %xmm6, %xmm1, %xmm0
vmulps 0x70(%rsp), %xmm8, %xmm1
vaddps %xmm7, %xmm1, %xmm1
vmovaps 0x10(%r14), %xmm6
vmovaps 0x20(%r14), %xmm11
vsubps %xmm6, %xmm5, %xmm15
vsubps %xmm11, %xmm3, %xmm3
vmovaps %xmm0, 0x510(%rsp)
vsubps %xmm6, %xmm0, %xmm2
vmovaps %xmm2, 0x60(%rsp)
vmovaps %xmm1, 0x500(%rsp)
vsubps %xmm11, %xmm1, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vsubps %xmm15, %xmm2, %xmm1
vsubps %xmm3, %xmm0, %xmm5
vaddps %xmm2, %xmm15, %xmm7
vaddps %xmm3, %xmm0, %xmm8
vmulps %xmm5, %xmm7, %xmm0
vmulps %xmm1, %xmm8, %xmm9
vsubps %xmm0, %xmm9, %xmm12
vmovaps (%r14), %xmm9
vmovaps 0x10(%rsp), %xmm0
vsubps %xmm9, %xmm0, %xmm13
vmovaps %xmm10, 0x520(%rsp)
vsubps %xmm9, %xmm10, %xmm0
vsubps %xmm13, %xmm0, %xmm2
vmulps %xmm2, %xmm8, %xmm8
vaddps %xmm0, %xmm13, %xmm10
vmovaps %xmm5, 0xf0(%rsp)
vmulps %xmm5, %xmm10, %xmm14
vsubps %xmm8, %xmm14, %xmm14
vmovaps %xmm1, 0x100(%rsp)
vmulps %xmm1, %xmm10, %xmm8
vmovaps %xmm2, 0x1e0(%rsp)
vmulps %xmm7, %xmm2, %xmm7
vsubps %xmm8, %xmm7, %xmm8
vmovaps 0x60(%r14), %xmm2
vmulps %xmm2, %xmm8, %xmm10
vmovaps 0x50(%r14), %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmulps %xmm1, %xmm14, %xmm14
vaddps %xmm14, %xmm10, %xmm14
vmovaps 0x40(%r14), %xmm4
vmulps %xmm4, %xmm12, %xmm12
vaddps %xmm14, %xmm12, %xmm10
vmovaps 0x140(%rsp), %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmovaps 0x1d0(%rsp), %xmm5
vsubps %xmm11, %xmm5, %xmm5
vsubps %xmm1, %xmm15, %xmm12
vsubps %xmm5, %xmm3, %xmm8
vmovaps %xmm15, 0x130(%rsp)
vaddps %xmm1, %xmm15, %xmm6
vmovaps %xmm3, 0x120(%rsp)
vaddps %xmm5, %xmm3, %xmm11
vmulps %xmm6, %xmm8, %xmm15
vmulps %xmm11, %xmm12, %xmm7
vsubps %xmm15, %xmm7, %xmm7
vmovaps 0x30(%rsp), %xmm3
vsubps %xmm9, %xmm3, %xmm9
vsubps %xmm9, %xmm13, %xmm3
vmulps %xmm3, %xmm11, %xmm11
vmovaps %xmm13, 0x110(%rsp)
vaddps %xmm9, %xmm13, %xmm15
vmovaps %xmm2, %xmm14
vmovaps %xmm8, 0x560(%rsp)
vmulps %xmm8, %xmm15, %xmm8
vsubps %xmm11, %xmm8, %xmm8
vmovaps %xmm12, 0x570(%rsp)
vmulps %xmm12, %xmm15, %xmm11
vmovaps %xmm3, %xmm12
vmulps %xmm6, %xmm3, %xmm6
vsubps %xmm11, %xmm6, %xmm6
vmulps %xmm2, %xmm6, %xmm6
vmovaps 0x10(%rsp), %xmm13
vmulps %xmm8, %xmm13, %xmm8
vaddps %xmm6, %xmm8, %xmm6
vbroadcastss 0x1cef7ce(%rip), %xmm8 # 0x1f20ec4
vmulps %xmm7, %xmm4, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vsubps %xmm0, %xmm9, %xmm15
vaddps %xmm0, %xmm9, %xmm0
vmovaps (%rsp), %xmm3
vmovaps 0x60(%rsp), %xmm2
vsubps %xmm2, %xmm1, %xmm11
vaddps %xmm2, %xmm1, %xmm1
vmovaps 0x70(%rsp), %xmm2
vsubps %xmm2, %xmm5, %xmm9
vaddps %xmm2, %xmm5, %xmm2
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm2, %xmm11, %xmm7
vsubps %xmm5, %xmm7, %xmm5
vmulps %xmm2, %xmm15, %xmm2
vmulps %xmm0, %xmm9, %xmm7
vsubps %xmm2, %xmm7, %xmm2
vmulps %xmm0, %xmm11, %xmm0
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm14, 0x60(%rsp)
vmulps %xmm0, %xmm14, %xmm0
vmulps %xmm2, %xmm13, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm4, 0x70(%rsp)
vmulps %xmm5, %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm6, %xmm10, %xmm1
vaddps %xmm1, %xmm0, %xmm4
vandps %xmm4, %xmm8, %xmm1
vbroadcastss 0x1cef74c(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm1, %xmm1
vminps %xmm6, %xmm10, %xmm2
vminps %xmm0, %xmm2, %xmm2
vbroadcastss 0x1cef72b(%rip), %xmm5 # 0x1f20ec0
vxorps %xmm5, %xmm1, %xmm5
vcmpnltps %xmm5, %xmm2, %xmm2
vmovaps %xmm10, 0x550(%rsp)
vmaxps %xmm6, %xmm10, %xmm5
vmaxps %xmm0, %xmm5, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vtestps %xmm3, %xmm0
vandps %xmm3, %xmm0, %xmm0
je 0x231988
vmovaps %xmm4, 0x530(%rsp)
vmovaps 0xf0(%rsp), %xmm7
vmovaps 0x570(%rsp), %xmm10
vmovaps %xmm0, 0x540(%rsp)
vmulps %xmm7, %xmm10, %xmm0
vmovaps 0x100(%rsp), %xmm3
vmovaps 0x560(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm13, %xmm11, %xmm2
vmulps %xmm9, %xmm10, %xmm5
vsubps %xmm2, %xmm5, %xmm5
vandps %xmm0, %xmm8, %xmm0
vandps %xmm2, %xmm8, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm5, %xmm2
vmulps %xmm9, %xmm12, %xmm0
vmulps %xmm7, %xmm12, %xmm1
vmovaps 0x1e0(%rsp), %xmm7
vmulps %xmm7, %xmm13, %xmm4
vsubps %xmm4, %xmm1, %xmm1
vmulps %xmm13, %xmm15, %xmm5
vsubps %xmm0, %xmm5, %xmm5
vandps %xmm4, %xmm8, %xmm4
vandps %xmm0, %xmm8, %xmm0
vcmpltps %xmm0, %xmm4, %xmm0
vblendvps %xmm0, %xmm1, %xmm5, %xmm4
vmulps %xmm10, %xmm15, %xmm0
vmulps %xmm7, %xmm10, %xmm1
vmulps %xmm3, %xmm12, %xmm5
vmulps %xmm11, %xmm12, %xmm7
vsubps %xmm5, %xmm1, %xmm1
vsubps %xmm0, %xmm7, %xmm7
vandps %xmm5, %xmm8, %xmm5
vandps %xmm0, %xmm8, %xmm0
vcmpltps %xmm0, %xmm5, %xmm0
vblendvps %xmm0, %xmm1, %xmm7, %xmm5
vmulps 0x60(%rsp), %xmm5, %xmm0
vmulps 0x10(%rsp), %xmm4, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x70(%rsp), %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x120(%rsp), %xmm5, %xmm1
vmulps 0x130(%rsp), %xmm4, %xmm7
vaddps %xmm7, %xmm1, %xmm1
vmulps 0x110(%rsp), %xmm2, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm0, %xmm3
vmulps %xmm3, %xmm0, %xmm7
vbroadcastss 0x1cbae31(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm3, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vmovaps 0x30(%r14), %xmm3
vcmpleps 0x80(%r14), %xmm1, %xmm7
vcmpleps %xmm1, %xmm3, %xmm3
vandps %xmm7, %xmm3, %xmm3
vcmpneqps 0x1cba0fb(%rip), %xmm0, %xmm0 # 0x1eeba10
vandps %xmm3, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vmovaps 0x540(%rsp), %xmm3
vtestps %xmm3, %xmm0
vandps %xmm3, %xmm0, %xmm0
je 0x231988
vmovaps 0x550(%rsp), %xmm3
vmovaps %xmm3, 0x4f0(%rsp)
vmovaps %xmm6, 0x4e0(%rsp)
vmovaps 0x530(%rsp), %xmm3
vmovaps %xmm3, 0x4d0(%rsp)
vmovaps %xmm1, 0x4c0(%rsp)
vmovaps %xmm2, 0x4b0(%rsp)
vmovaps %xmm4, 0x4a0(%rsp)
vmovaps %xmm5, 0x490(%rsp)
vmovdqa (%rsp), %xmm9
movq 0xc8(%rsp), %rax
vbroadcastss (%rax,%r13), %xmm12
movq 0xb8(%rsp), %rax
vbroadcastss (%rax,%r13), %xmm11
movq 0xc0(%rsp), %rax
vbroadcastss (%rax,%r13), %xmm10
movq 0xd0(%rsp), %rax
vbroadcastss 0x30(%rax,%r13), %xmm13
movq 0xd8(%rsp), %rax
vbroadcastss 0x30(%rax,%r13), %xmm14
movq 0xe0(%rsp), %rax
vbroadcastss 0x30(%rax,%r13), %xmm15
movq 0x168(%rsp), %rcx
movl 0x18(%rcx), %eax
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rbx
vbroadcastss 0x34(%rbx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd 0x1cb9ffc(%rip), %xmm1, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
jb 0x231a41
movl 0x1c(%rcx), %ecx
vandnps %xmm0, %xmm1, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x231f12
cmpq $0x0, 0x48(%rbx)
jne 0x231f12
vpandn %xmm9, %xmm0, %xmm9
vtestps %xmm9, %xmm9
je 0x231ef6
vmovaps 0x230(%rsp), %xmm2
vmulps %xmm2, %xmm13, %xmm0
vmulps %xmm2, %xmm14, %xmm1
vmulps %xmm2, %xmm15, %xmm2
vmovaps 0x220(%rsp), %xmm4
vmulps %xmm4, %xmm12, %xmm3
vaddps %xmm0, %xmm3, %xmm12
vmulps %xmm4, %xmm11, %xmm3
vaddps %xmm1, %xmm3, %xmm5
vmulps %xmm4, %xmm10, %xmm1
vaddps %xmm2, %xmm1, %xmm6
vmovaps (%r14), %xmm1
vmovaps 0x10(%r14), %xmm7
vmovaps 0x20(%r14), %xmm8
vmovaps 0x520(%rsp), %xmm2
vsubps %xmm1, %xmm2, %xmm0
vmovaps 0x510(%rsp), %xmm2
vsubps %xmm7, %xmm2, %xmm13
vmovaps 0x500(%rsp), %xmm2
vsubps %xmm8, %xmm2, %xmm15
vmovaps 0x30(%rsp), %xmm2
vsubps %xmm1, %xmm2, %xmm2
vmovaps %xmm2, 0x10(%rsp)
vmovaps 0x140(%rsp), %xmm2
vsubps %xmm7, %xmm2, %xmm3
vmovaps 0x1d0(%rsp), %xmm2
vsubps %xmm8, %xmm2, %xmm11
vsubps %xmm1, %xmm12, %xmm4
vsubps %xmm7, %xmm5, %xmm5
vsubps %xmm8, %xmm6, %xmm1
vmovaps %xmm1, 0x30(%rsp)
vsubps %xmm0, %xmm4, %xmm2
vsubps %xmm13, %xmm5, %xmm7
vsubps %xmm15, %xmm1, %xmm14
vaddps %xmm5, %xmm13, %xmm6
vaddps %xmm1, %xmm15, %xmm8
vmovaps %xmm9, (%rsp)
vmulps %xmm6, %xmm14, %xmm9
vmulps %xmm7, %xmm8, %xmm10
vsubps %xmm9, %xmm10, %xmm12
vaddps %xmm0, %xmm4, %xmm9
vmulps %xmm2, %xmm8, %xmm8
vmovaps %xmm14, 0x110(%rsp)
vmulps %xmm14, %xmm9, %xmm10
vsubps %xmm8, %xmm10, %xmm10
vmovaps %xmm7, 0x120(%rsp)
vmulps %xmm7, %xmm9, %xmm8
vmovaps %xmm2, 0x130(%rsp)
vmulps %xmm6, %xmm2, %xmm6
vsubps %xmm8, %xmm6, %xmm6
vmovaps 0x60(%r14), %xmm1
vmulps %xmm1, %xmm6, %xmm6
vmovaps 0x50(%r14), %xmm14
vmulps %xmm10, %xmm14, %xmm10
vaddps %xmm6, %xmm10, %xmm6
vmovaps 0x40(%r14), %xmm2
vmovaps %xmm2, 0x140(%rsp)
vmulps %xmm2, %xmm12, %xmm12
vaddps %xmm6, %xmm12, %xmm10
vsubps %xmm3, %xmm13, %xmm12
vmovaps %xmm11, %xmm9
vsubps %xmm11, %xmm15, %xmm11
vmovaps %xmm13, 0x70(%rsp)
vaddps %xmm3, %xmm13, %xmm6
vmovaps %xmm15, 0x60(%rsp)
vaddps %xmm9, %xmm15, %xmm15
vmulps %xmm6, %xmm11, %xmm13
vmulps %xmm15, %xmm12, %xmm2
vsubps %xmm13, %xmm2, %xmm2
vmovaps 0x10(%rsp), %xmm8
vsubps %xmm8, %xmm0, %xmm13
vmulps %xmm15, %xmm13, %xmm15
vmovaps %xmm0, 0x1d0(%rsp)
vaddps %xmm0, %xmm8, %xmm7
vmovaps %xmm8, %xmm0
vmulps %xmm7, %xmm11, %xmm8
vsubps %xmm15, %xmm8, %xmm8
vmovaps %xmm12, 0x100(%rsp)
vmulps %xmm7, %xmm12, %xmm7
vmulps %xmm6, %xmm13, %xmm6
vsubps %xmm7, %xmm6, %xmm6
vmulps %xmm1, %xmm6, %xmm6
vmulps %xmm8, %xmm14, %xmm7
vmovaps %xmm1, %xmm8
vaddps %xmm7, %xmm6, %xmm6
vmovaps 0x140(%rsp), %xmm12
vmulps %xmm2, %xmm12, %xmm2
vaddps %xmm6, %xmm2, %xmm7
vmovaps %xmm7, 0x1e0(%rsp)
vsubps %xmm4, %xmm0, %xmm15
vaddps %xmm4, %xmm0, %xmm1
vsubps %xmm5, %xmm3, %xmm6
vaddps %xmm5, %xmm3, %xmm0
vmovaps 0x30(%rsp), %xmm2
vsubps %xmm2, %xmm9, %xmm4
vaddps %xmm2, %xmm9, %xmm2
vbroadcastss 0x1cef277(%rip), %xmm9 # 0x1f20ec4
vmulps %xmm4, %xmm0, %xmm3
vmulps %xmm2, %xmm6, %xmm5
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm2, %xmm15, %xmm2
vmulps %xmm4, %xmm1, %xmm5
vsubps %xmm2, %xmm5, %xmm2
vmulps %xmm6, %xmm1, %xmm1
vmulps %xmm0, %xmm15, %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm8, 0x30(%rsp)
vmulps %xmm0, %xmm8, %xmm0
vmovaps %xmm14, 0x10(%rsp)
vmulps %xmm2, %xmm14, %xmm1
vmovaps %xmm12, %xmm14
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm3, %xmm12, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm10, %xmm1
vaddps %xmm1, %xmm0, %xmm12
vandps %xmm9, %xmm12, %xmm1
vbroadcastss 0x1cef220(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm1, %xmm1
vminps %xmm7, %xmm10, %xmm2
vminps %xmm0, %xmm2, %xmm2
vbroadcastss 0x1cef1ff(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm1, %xmm3
vcmpnltps %xmm3, %xmm2, %xmm2
vmovaps %xmm10, 0xf0(%rsp)
vmaxps %xmm7, %xmm10, %xmm3
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vmovaps (%rsp), %xmm1
vtestps %xmm1, %xmm0
vandps %xmm1, %xmm0, %xmm10
je 0x231e89
vmovaps 0x110(%rsp), %xmm5
vmovaps 0x100(%rsp), %xmm8
vmulps %xmm5, %xmm8, %xmm0
vmovaps 0x120(%rsp), %xmm7
vmulps %xmm7, %xmm11, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm6, %xmm11, %xmm2
vmulps %xmm4, %xmm8, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vandps %xmm0, %xmm9, %xmm0
vandps %xmm2, %xmm9, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm3
vmulps %xmm4, %xmm13, %xmm0
vmulps %xmm5, %xmm13, %xmm1
vmovaps 0x130(%rsp), %xmm5
vmulps %xmm5, %xmm11, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm11, %xmm15, %xmm4
vsubps %xmm0, %xmm4, %xmm4
vandps %xmm2, %xmm9, %xmm2
vandps %xmm0, %xmm9, %xmm0
vcmpltps %xmm0, %xmm2, %xmm0
vblendvps %xmm0, %xmm1, %xmm4, %xmm1
vmulps %xmm8, %xmm15, %xmm0
vmulps %xmm5, %xmm8, %xmm2
vmulps %xmm7, %xmm13, %xmm4
vmulps %xmm6, %xmm13, %xmm5
vsubps %xmm4, %xmm2, %xmm2
vsubps %xmm0, %xmm5, %xmm5
vandps %xmm4, %xmm9, %xmm4
vandps %xmm0, %xmm9, %xmm0
vcmpltps %xmm0, %xmm4, %xmm0
vblendvps %xmm0, %xmm2, %xmm5, %xmm4
vmulps 0x30(%rsp), %xmm4, %xmm0
vmulps 0x10(%rsp), %xmm1, %xmm2
vaddps %xmm2, %xmm0, %xmm0
vmulps %xmm3, %xmm14, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x60(%rsp), %xmm4, %xmm2
vmulps 0x70(%rsp), %xmm1, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vmulps 0x1d0(%rsp), %xmm3, %xmm5
vaddps %xmm2, %xmm5, %xmm2
vaddps %xmm2, %xmm2, %xmm2
vrcpps %xmm0, %xmm5
vmulps %xmm5, %xmm0, %xmm7
vbroadcastss 0x1cba927(%rip), %xmm8 # 0x1eec714
vsubps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm5, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vmulps %xmm5, %xmm2, %xmm2
vmovaps 0x30(%r14), %xmm5
vcmpleps 0x80(%r14), %xmm2, %xmm7
vcmpleps %xmm2, %xmm5, %xmm5
vandps %xmm7, %xmm5, %xmm5
vcmpneqps 0x1cb9bf1(%rip), %xmm0, %xmm0 # 0x1eeba10
vandps %xmm5, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vtestps %xmm10, %xmm0
vandps %xmm0, %xmm10, %xmm10
je 0x231e89
vmovaps 0xf0(%rsp), %xmm5
vmovaps %xmm5, 0x480(%rsp)
vmovaps 0x1e0(%rsp), %xmm5
vmovaps %xmm5, 0x470(%rsp)
vmovaps %xmm12, 0x460(%rsp)
vmovaps %xmm2, 0x450(%rsp)
vmovaps %xmm3, 0x440(%rsp)
vmovaps %xmm1, 0x430(%rsp)
vmovaps %xmm4, 0x420(%rsp)
vmovdqa (%rsp), %xmm9
movq 0x168(%rsp), %rcx
movl 0x18(%rcx), %eax
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %rbx
vbroadcastss 0x34(%rbx), %xmm1
vandps 0x90(%r14), %xmm1, %xmm1
vpcmpeqd 0x1cb9b52(%rip), %xmm1, %xmm1 # 0x1eeba10
vtestps %xmm10, %xmm1
jb 0x231eec
movl 0x1c(%rcx), %ecx
vandnps %xmm10, %xmm1, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x2322b9
cmpq $0x0, 0x48(%rbx)
jne 0x2322b9
vpandn %xmm9, %xmm0, %xmm9
vtestps %xmm9, %xmm9
setne %al
jmp 0x231ef8
xorl %eax, %eax
vmovaps 0x210(%rsp), %xmm0
addq $0x4, %r13
testb %al, %al
jne 0x2313c9
jmp 0x232626
vmovaps 0x4d0(%rsp), %xmm3
vbroadcastss 0x1ceefa0(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm3, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cba7db(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cbf09a(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm2
vmulps 0x4f0(%rsp), %xmm2, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps 0x4e0(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vbroadcastss -0x4(%r8,%r13), %xmm3
vbroadcastss 0x2c(%r9,%r13), %xmm4
vpsrld $0x10, %xmm3, %xmm5
vxorps %xmm7, %xmm7, %xmm7
vpblendw $0xaa, %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm7[1],xmm3[2],xmm7[3],xmm3[4],xmm7[5],xmm3[6],xmm7[7]
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x1cec545(%rip), %xmm8 # 0x1f1e4e0
vmulps %xmm3, %xmm8, %xmm3
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vsubps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vmulps %xmm5, %xmm6, %xmm5
vpblendw $0xaa, %xmm7, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%r8,%r13), %xmm4
vaddps %xmm2, %xmm5, %xmm2
vpblendw $0xaa, %xmm7, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovaps 0x4b0(%rsp), %xmm2
vmovaps %xmm2, 0x250(%rsp)
vmovaps 0x4a0(%rsp), %xmm2
vmovaps %xmm2, 0x260(%rsp)
vmovaps 0x490(%rsp), %xmm2
vmovaps %xmm2, 0x270(%rsp)
vmovaps %xmm3, 0x280(%rsp)
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps %xmm1, 0x290(%rsp)
vmovd %ecx, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovdqa %xmm1, 0x2a0(%rsp)
vmovdqa %xmm2, 0x2b0(%rsp)
leaq 0x2c0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x2c0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x2d0(%rsp)
vmovaps 0x80(%r14), %xmm3
vblendvps %xmm0, 0x4c0(%rsp), %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r14)
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x1a8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x1b0(%rsp)
movq %r14, 0x1b8(%rsp)
leaq 0x250(%rsp), %rax
movq %rax, 0x1c0(%rsp)
movl $0x4, 0x1c8(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %rsi, 0x58(%rsp)
movq %rdi, 0x28(%rsp)
movq %r11, 0x50(%rsp)
movq %r8, 0x48(%rsp)
vmovaps %xmm10, 0x70(%rsp)
vmovaps %xmm11, 0x60(%rsp)
vmovaps %xmm12, 0x130(%rsp)
vmovaps %xmm13, 0x120(%rsp)
vmovaps %xmm14, 0x110(%rsp)
vmovaps %xmm15, 0x100(%rsp)
vmovaps %xmm3, 0xf0(%rsp)
je 0x2321e6
leaq 0x1a0(%rsp), %rdi
movq %r9, 0x10(%rsp)
vzeroupper
callq *%rax
vmovaps 0xf0(%rsp), %xmm3
vmovaps 0x100(%rsp), %xmm15
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x120(%rsp), %xmm13
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm10
vmovdqa (%rsp), %xmm9
movq 0x10(%rsp), %r9
movq 0x48(%rsp), %r8
movq 0x50(%rsp), %r11
movq 0x28(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x40(%rsp), %r10
vmovdqa 0x90(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x2325c4
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x23227c
testb $0x2, (%rcx)
jne 0x232212
testb $0x40, 0x3e(%rbx)
je 0x23227c
leaq 0x1a0(%rsp), %rdi
movq %r9, %rbx
vzeroupper
callq *%rax
vmovaps 0xf0(%rsp), %xmm3
vmovaps 0x100(%rsp), %xmm15
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x120(%rsp), %xmm13
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm10
vmovdqa (%rsp), %xmm9
movq %rbx, %r9
movq 0x48(%rsp), %r8
movq 0x50(%rsp), %r11
movq 0x28(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x40(%rsp), %r10
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x90(%rsp), %xmm0, %xmm1
vpxor 0x1cb9b8f(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x1b8(%rsp), %rax
vbroadcastss 0x1cba8e2(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x2325d4
vmovaps 0x460(%rsp), %xmm3
vbroadcastss 0x1ceebf9(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm3, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cba434(%rip), %xmm6 # 0x1eec714
vsubps %xmm3, %xmm6, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cbecf3(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm2
vmulps 0x480(%rsp), %xmm2, %xmm1
vminps %xmm6, %xmm1, %xmm1
vmulps 0x470(%rsp), %xmm2, %xmm2
vminps %xmm6, %xmm2, %xmm2
vbroadcastss 0x2c(%r9,%r13), %xmm3
vbroadcastss 0x30(%r9,%r13), %xmm4
vpsrld $0x10, %xmm3, %xmm5
vxorps %xmm7, %xmm7, %xmm7
vpblendw $0xaa, %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm7[1],xmm3[2],xmm7[3],xmm3[4],xmm7[5],xmm3[6],xmm7[7]
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x1cec19e(%rip), %xmm8 # 0x1f1e4e0
vmulps %xmm3, %xmm8, %xmm3
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vsubps %xmm1, %xmm6, %xmm6
vsubps %xmm2, %xmm6, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vmulps %xmm5, %xmm6, %xmm5
vpblendw $0xaa, %xmm7, %xmm4, %xmm6 # xmm6 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm2, %xmm2
vbroadcastss (%r8,%r13), %xmm4
vaddps %xmm2, %xmm5, %xmm2
vpblendw $0xaa, %xmm7, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm7[1],xmm4[2],xmm7[3],xmm4[4],xmm7[5],xmm4[6],xmm7[7]
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm3, %xmm5, %xmm3
vpsrld $0x10, %xmm4, %xmm4
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovaps 0x440(%rsp), %xmm2
vmovaps %xmm2, 0x250(%rsp)
vmovaps 0x430(%rsp), %xmm2
vmovaps %xmm2, 0x260(%rsp)
vmovaps 0x420(%rsp), %xmm2
vmovaps %xmm2, 0x270(%rsp)
vmovaps %xmm3, 0x280(%rsp)
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps %xmm1, 0x290(%rsp)
vmovd %ecx, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovdqa %xmm1, 0x2a0(%rsp)
vmovdqa %xmm2, 0x2b0(%rsp)
leaq 0x2c0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x2c0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x2d0(%rsp)
vmovaps 0x80(%r14), %xmm3
vblendvps %xmm0, 0x450(%rsp), %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r14)
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movq 0x18(%rbx), %rax
movq %rax, 0x1a8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x1b0(%rsp)
movq %r14, 0x1b8(%rsp)
leaq 0x250(%rsp), %rax
movq %rax, 0x1c0(%rsp)
movl $0x4, 0x1c8(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movq %rsi, 0x58(%rsp)
movq %rdi, 0x28(%rsp)
movq %r11, 0x50(%rsp)
movq %r8, 0x48(%rsp)
vmovaps %xmm3, 0x30(%rsp)
je 0x232527
leaq 0x1a0(%rsp), %rdi
movq %r9, 0x10(%rsp)
vzeroupper
callq *%rax
vmovaps 0x30(%rsp), %xmm3
vmovdqa (%rsp), %xmm9
movq 0x10(%rsp), %r9
movq 0x48(%rsp), %r8
movq 0x50(%rsp), %r11
movq 0x28(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x40(%rsp), %r10
vmovdqa 0x90(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x2325f5
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x23258a
testb $0x2, (%rcx)
jne 0x232553
testb $0x40, 0x3e(%rbx)
je 0x23258a
leaq 0x1a0(%rsp), %rdi
movq %r9, %rbx
vzeroupper
callq *%rax
vmovaps 0x30(%rsp), %xmm3
vmovdqa (%rsp), %xmm9
movq %rbx, %r9
movq 0x48(%rsp), %r8
movq 0x50(%rsp), %r11
movq 0x28(%rsp), %rdi
movq 0x58(%rsp), %rsi
movq 0x40(%rsp), %r10
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x90(%rsp), %xmm0, %xmm1
vpxor 0x1cb9881(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x1b8(%rsp), %rax
vbroadcastss 0x1cba5d4(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x232605
vpcmpeqd 0x1cb9444(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cb984c(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
movq 0xa8(%rsp), %rax
vblendvps %xmm1, (%rax), %xmm3, %xmm1
vmovaps %xmm1, (%rax)
jmp 0x231a3c
vpcmpeqd 0x1cb9413(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cb981b(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
movq 0xa8(%rsp), %rax
vblendvps %xmm1, (%rax), %xmm3, %xmm1
vmovaps %xmm1, (%rax)
jmp 0x231ee7
cmpl $0x2, 0xec(%rsp)
je 0x2326b5
movq 0x1f8(%rsp), %rax
addq %rax, %r9
addq %rax, 0xe0(%rsp)
addq %rax, 0xd8(%rsp)
addq %rax, 0xd0(%rsp)
addq %rax, 0xc0(%rsp)
addq %rax, 0xb8(%rsp)
addq %rax, 0xc8(%rsp)
addq %rax, %r8
addq %rax, %rsi
addq %rax, 0xb0(%rsp)
addq %rax, 0x198(%rsp)
addq %rax, 0x190(%rsp)
addq %rax, 0x188(%rsp)
addq %rax, %r11
movq 0x180(%rsp), %rax
testq %rax, %rax
leaq 0x1(%rax), %rax
movq %rax, 0x180(%rsp)
je 0x2313b7
vxorps 0x400(%rsp), %xmm0, %xmm2
vpor 0x3f0(%rsp), %xmm9, %xmm0
vmovdqa 0x410(%rsp), %xmm1
vpand %xmm1, %xmm0, %xmm0
vmovmskps %xmm2, %eax
testl %eax, %eax
jne 0x2311e0
vpxor 0x1cb9738(%rip), %xmm0, %xmm0 # 0x1eebe20
movq 0xa0(%rsp), %rsi
movq 0x178(%rsp), %r9
xorl %ecx, %ecx
movq 0x88(%rsp), %r8
vpor 0x150(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0x150(%rsp)
vtestps 0x1cb9703(%rip), %xmm0 # 0x1eebe20
jb 0x232763
vmovaps 0x3b0(%rsp), %xmm1
vbroadcastss 0x1cba453(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
xorl %eax, %eax
testq %rcx, %rcx
je 0x232766
movq %rcx, (%r12)
addq $0x8, %r12
vbroadcastss 0x1cba42c(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, (%r15)
addq $0x10, %r15
jmp 0x232766
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x230e3a
jmp 0x232860
pushq $0x2
jmp 0x232765
vmovaps %xmm15, 0x10(%rsp)
movq %rsi, 0xa0(%rsp)
movq %r8, 0x88(%rsp)
bsfq %r13, %rcx
movq %rsi, %rdi
movq %r8, %rsi
movq %rbp, %rdx
movq %rcx, (%rsp)
leaq 0x168(%rsp), %r8
movq %r14, %r9
pushq %r10
leaq 0x2e8(%rsp), %rax
pushq %rax
vzeroupper
callq 0x27a5b6
popq %rcx
popq %rdx
testb %al, %al
je 0x2327cf
movq (%rsp), %rax
orl $-0x1, 0x150(%rsp,%rax,4)
leaq -0x1(%r13), %rax
andq %rax, %r13
movq 0x40(%rsp), %r10
movq 0xa0(%rsp), %rsi
movq 0x88(%rsp), %r8
jne 0x23278e
vmovaps 0x150(%rsp), %xmm0
vtestps 0x1cb9621(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x178(%rsp), %r9
vmovaps 0x10(%rsp), %xmm15
jb 0x230e80
vmovaps 0x3b0(%rsp), %xmm1
vbroadcastss 0x1cba35c(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
pushq $0x2
popq %rax
jmp 0x230e80
andq $-0x10, %rbp
movq (%rbp), %rax
movq (%rax), %rcx
movq %rax, 0x168(%rsp)
vmovdqa 0x3c0(%rsp), %xmm0
jmp 0x2326fa
vmovaps 0x3d0(%rsp), %xmm0
vandps 0x150(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cba309(%rip), %xmm1 # 0x1eecb84
movq 0xa8(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x1c28, %rsp # imm = 0x1C28
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::ObjectIntersectorK<4, false>>, false>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
movq (%rsi), %rax
cmpq $0x8, 0x70(%rax)
je 0x232e8e
cmpq $0x0, 0x8(%rcx)
je 0x2328c2
movq 0x10(%rcx), %r8
testb $0x1, 0x2(%r8)
jne 0x27b910
vmovdqa (%rdi), %xmm0
vpcmpeqd %xmm2, %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm0, %xmm4
vmovmskps %xmm4, %esi
testl %esi, %esi
je 0x232e8e
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x17b8, %rsp # imm = 0x17B8
vmovaps 0x40(%rdx), %xmm6
vbroadcastss 0x1cee5cb(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm6, %xmm3
vbroadcastss 0x1cbe6e2(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm3, %xmm3
vblendvps %xmm3, %xmm5, %xmm6, %xmm3
vmovaps 0x50(%rdx), %xmm7
vandps %xmm1, %xmm7, %xmm8
vcmpltps %xmm5, %xmm8, %xmm8
vblendvps %xmm8, %xmm5, %xmm7, %xmm7
vmovaps 0x60(%rdx), %xmm8
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm5, %xmm1, %xmm1
vblendvps %xmm1, %xmm5, %xmm8, %xmm8
vrcpps %xmm3, %xmm1
vmulps %xmm1, %xmm3, %xmm3
vbroadcastss 0x1cb9dca(%rip), %xmm9 # 0x1eec714
vsubps %xmm3, %xmm9, %xmm10
vrcpps %xmm7, %xmm3
vmulps %xmm3, %xmm7, %xmm5
vsubps %xmm5, %xmm9, %xmm7
vrcpps %xmm8, %xmm5
vmulps %xmm5, %xmm8, %xmm8
vsubps %xmm8, %xmm9, %xmm8
vxorps %xmm9, %xmm9, %xmm9
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cdfd88(%rip), %xmm11 # 0x1f12704
vandps %xmm6, %xmm11, %xmm6
vmovaps 0x50(%rdx), %xmm11
vcmpltps %xmm9, %xmm11, %xmm11
vbroadcastss 0x1cee548(%rip), %xmm12 # 0x1f20edc
vandps %xmm12, %xmm11, %xmm11
vmovaps (%rdx), %xmm12
vorps %xmm6, %xmm11, %xmm11
vmovaps 0x60(%rdx), %xmm6
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1d27fbf(%rip), %xmm9 # 0x1f5a974
vandps %xmm6, %xmm9, %xmm6
vmovaps 0x10(%rdx), %xmm13
vpxor %xmm2, %xmm4, %xmm2
vmovaps 0x20(%rdx), %xmm14
vpor %xmm6, %xmm2, %xmm4
vmovaps 0x30(%rdx), %xmm2
vmulps %xmm1, %xmm10, %xmm6
vmulps %xmm7, %xmm3, %xmm7
vmulps %xmm5, %xmm8, %xmm8
vpor %xmm4, %xmm11, %xmm9
vmovaps 0x80(%rdx), %xmm4
vmovdqa %xmm9, 0x10(%rsp)
bsfq %rsi, %rdi
vbroadcastss 0x10(%rsp,%rdi,4), %xmm10
vpcmpeqd %xmm9, %xmm10, %xmm10
vmovmskps %xmm10, %edi
notq %rdi
andq %rdi, %rsi
jne 0x2329ee
vpcmpeqd %xmm9, %xmm9, %xmm9
vpcmpeqd %xmm0, %xmm9, %xmm0
vaddps %xmm6, %xmm1, %xmm9
vaddps %xmm7, %xmm3, %xmm10
vaddps %xmm5, %xmm8, %xmm11
vxorps %xmm3, %xmm3, %xmm3
vmaxps %xmm3, %xmm2, %xmm1
vmaxps %xmm3, %xmm4, %xmm2
vbroadcastss 0x1cb8feb(%rip), %xmm15 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm15, %xmm3
vbroadcastss 0x1cba140(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%rsp)
leaq 0x120(%rsp), %r12
leaq 0x8c0(%rsp), %r13
vorps 0x10(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $-0x8, -0x10(%r12)
vmovaps %xmm15, -0x20(%r13)
movq 0x70(%rax), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm3, 0xf0(%rsp)
vmovaps %xmm3, -0x10(%r13)
pushq $0x8
popq %r15
addq $-0x10, %r13
movq -0x8(%r12), %rbp
addq $-0x8, %r12
movb $0x1, %al
cmpq $-0x8, %rbp
je 0x232e75
vmovaps (%r13), %xmm0
vcmpltps (%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x232e73
testb $0x8, %bpl
jne 0x232cdd
movq %rbp, %r11
movq %rbp, %rsi
andq $-0x10, %rsi
xorl %edi, %edi
xorl %r10d, %r10d
movq %r15, %rbp
vmovaps %xmm15, %xmm0
movq (%rsi,%rdi,8), %r8
cmpq $0x8, %r8
je 0x232bf1
vbroadcastss 0x20(%r11,%rdi,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm2
vbroadcastss 0x40(%r11,%rdi,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm3
vbroadcastss 0x60(%r11,%rdi,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm4
vbroadcastss 0x30(%r11,%rdi,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm5
vbroadcastss 0x50(%r11,%rdi,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm6
vbroadcastss 0x70(%r11,%rdi,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm7
vpminsd %xmm5, %xmm2, %xmm1
vpminsd %xmm6, %xmm3, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpminsd %xmm7, %xmm4, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpmaxsd %xmm5, %xmm2, %xmm2
vpmaxsd %xmm6, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm7, %xmm4, %xmm3
vpmaxsd 0xf0(%rsp), %xmm1, %xmm4
vpminsd (%rsp), %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vcmpleps %xmm2, %xmm4, %xmm2
vtestps %xmm2, %xmm2
je 0x232bf1
vblendvps %xmm2, %xmm1, %xmm15, %xmm1
vcmpltps %xmm0, %xmm1, %xmm2
vtestps %xmm2, %xmm2
je 0x232bcd
movq %rbp, %r9
vmovaps %xmm0, %xmm2
cmpq $0x8, %rbp
movq %r8, %rbp
jne 0x232bd8
vmovaps %xmm1, %xmm0
movq %r8, %rbp
jmp 0x232bf1
movq %r8, %r9
vmovaps %xmm1, %xmm2
vmovaps %xmm0, %xmm1
incq %r10
movq %r9, (%r12)
addq $0x8, %r12
vmovaps %xmm2, (%r13)
addq $0x10, %r13
vmovaps %xmm1, %xmm0
cmpq $0x8, %r8
je 0x232c08
leaq 0x1(%rdi), %r8
cmpq $0x3, %rdi
movq %r8, %rdi
jb 0x232ae6
cmpq $0x8, %rbp
setne %dil
cmpq $0x2, %r10
setae %sil
andb %dil, %sil
cmpb $0x1, %sil
je 0x232c30
cmpq $0x8, %rbp
jne 0x232ac6
jmp 0x232e73
vmovaps -0x20(%r13), %xmm1
vmovaps -0x10(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x232c63
vmovaps %xmm2, -0x20(%r13)
vmovaps %xmm1, -0x10(%r13)
vpermilps $0x4e, -0x10(%r12), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x10(%r12)
cmpq $0x2, %r10
je 0x232c21
vmovaps -0x30(%r13), %xmm1
vmovaps -0x10(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x232ca1
vmovaps %xmm2, -0x30(%r13)
vmovaps %xmm1, -0x10(%r13)
movq -0x18(%r12), %rsi
movq -0x8(%r12), %rdi
movq %rdi, -0x18(%r12)
movq %rsi, -0x8(%r12)
vmovaps -0x30(%r13), %xmm1
vmovaps -0x20(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x232c21
vmovaps %xmm2, -0x30(%r13)
vmovaps %xmm1, -0x20(%r13)
vpermilps $0x4e, -0x18(%r12), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x18(%r12)
jmp 0x232c21
cmpq $-0x8, %rbp
je 0x232e75
vmovaps (%rsp), %xmm1
vcmpnleps %xmm0, %xmm1, %xmm1
vtestps %xmm1, %xmm1
je 0x232e73
movl %ebp, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x30(%rsp)
je 0x232e5f
andq $-0x10, %rbp
xorl %r14d, %r14d
movq %rdx, 0x28(%rsp)
vmovaps %xmm12, 0xe0(%rsp)
vmovaps %xmm13, 0xd0(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovaps %xmm9, 0xb0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x90(%rsp)
vmovaps %xmm1, 0x80(%rsp)
movq (%rcx), %rax
movl (%rbp,%r14,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vbroadcastss 0x34(%rax), %xmm0
vandps 0x90(%rdx), %xmm0, %xmm0
vpcmpeqd 0x1cb8c8c(%rip), %xmm0, %xmm0 # 0x1eeba10
vtestps %xmm1, %xmm0
jb 0x232e51
vandnps %xmm1, %xmm0, %xmm0
movl 0x4(%rbp,%r14,8), %r8d
vmovaps %xmm0, 0x100(%rsp)
leaq 0x100(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x40(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x50(%rsp)
movq %rdx, 0x58(%rsp)
movl $0x4, 0x60(%rsp)
movl %edi, 0x64(%rsp)
movl %r8d, 0x48(%rsp)
movq %rax, 0x68(%rsp)
andq $0x0, 0x70(%rsp)
movq %rcx, %rbx
movq 0x10(%rcx), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x232dfa
movq 0x60(%rax), %rcx
leaq 0x38(%rsp), %rdi
callq *%rcx
movq %rbx, %rcx
movq 0x28(%rsp), %rdx
vmovaps 0xe0(%rsp), %xmm12
vmovaps 0xd0(%rsp), %xmm13
vmovaps 0xc0(%rsp), %xmm14
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x90(%rsp), %xmm11
vbroadcastss 0x1cb8bd8(%rip), %xmm15 # 0x1eeba20
vmovaps 0x80(%rsp), %xmm1
incq %r14
cmpq %r14, 0x30(%rsp)
jne 0x232d5b
vmovaps (%rsp), %xmm0
vblendvps %xmm1, 0x80(%rdx), %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
xorl %eax, %eax
testb %al, %al
je 0x232a96
addq $0x17b8, %rsp # imm = 0x17B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::ObjectIntersectorK<4, false>>, false>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
movq (%rsi), %rax
movq 0x70(%rax), %rax
cmpq $0x8, %rax
je 0x2333cb
cmpq $0x0, 0x8(%rcx)
je 0x232eb7
movq 0x10(%rcx), %r8
testb $0x1, 0x2(%r8)
jne 0x27c2e6
vpcmpeqd %xmm9, %xmm9, %xmm9
vpcmpeqd (%rdi), %xmm9, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm0, %xmm2
vtestps %xmm1, %xmm2
je 0x2333cb
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x17d8, %rsp # imm = 0x17D8
leaq 0x80(%rdx), %r9
vandps %xmm1, %xmm2, %xmm11
vmovaps (%rdx), %xmm12
vmovaps 0x10(%rdx), %xmm13
vmovaps 0x20(%rdx), %xmm14
vmovaps 0x40(%rdx), %xmm1
vbroadcastss 0x1cedfae(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cbe0c5(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vmovaps 0x30(%rdx), %xmm3
vmovaps 0x50(%rdx), %xmm5
vmovaps 0x60(%rdx), %xmm6
vandps %xmm2, %xmm6, %xmm7
vcmpltps %xmm4, %xmm7, %xmm7
vblendvps %xmm7, %xmm4, %xmm6, %xmm6
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vbroadcastss 0x1cb97b7(%rip), %xmm8 # 0x1eec714
vsubps %xmm1, %xmm8, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vandps %xmm2, %xmm5, %xmm2
vcmpltps %xmm4, %xmm2, %xmm2
vblendvps %xmm2, %xmm4, %xmm5, %xmm2
vrcpps %xmm6, %xmm4
vmulps %xmm4, %xmm6, %xmm5
vsubps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm4, %xmm5
vmaxps %xmm10, %xmm3, %xmm3
vrcpps %xmm2, %xmm6
vmulps %xmm6, %xmm2, %xmm2
vsubps %xmm2, %xmm8, %xmm2
vbroadcastss 0x1cb8a82(%rip), %xmm15 # 0x1eeba20
vblendvps %xmm11, %xmm3, %xmm15, %xmm3
vmulps %xmm2, %xmm6, %xmm2
vaddps %xmm2, %xmm6, %xmm2
vmovaps %xmm2, 0x110(%rsp)
vxorps %xmm9, %xmm11, %xmm2
vmaxps %xmm10, %xmm0, %xmm0
vaddps %xmm1, %xmm7, %xmm1
vmovaps %xmm1, 0x100(%rsp)
vbroadcastss 0x1cb9baf(%rip), %xmm1 # 0x1eecb84
vmovaps %xmm11, 0x80(%rsp)
vblendvps %xmm11, %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps %xmm2, %xmm0
vmovaps %xmm3, %xmm11
vaddps %xmm5, %xmm4, %xmm1
vmovaps %xmm1, 0xf0(%rsp)
leaq 0x140(%rsp), %r13
movq $-0x8, -0x10(%r13)
leaq 0x8e0(%rsp), %rbp
vmovaps %xmm15, -0x20(%rbp)
movq %rax, -0x8(%r13)
vmovaps %xmm3, -0x10(%rbp)
leaq 0x1f1cf54(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm1
vmovaps %xmm1, 0xd0(%rsp)
pushq $0x8
popq %rbx
pushq $0x1
popq %r14
addq $-0x10, %rbp
movq -0x8(%r13), %r12
addq $-0x8, %r13
movb $0x1, %r10b
cmpq $-0x8, %r12
je 0x2331b3
vmovaps %xmm0, (%rsp)
vmovaps (%rbp), %xmm0
vcmpltps 0x10(%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x2331ab
testb $0x8, %r12b
jne 0x2331c1
movq %r12, %rax
movq %r12, %rdi
andq $-0x10, %rdi
xorl %r8d, %r8d
movq %rbx, %r12
vmovaps %xmm15, %xmm0
movq (%rdi,%r8,8), %rsi
cmpq $0x8, %rsi
je 0x23318a
vbroadcastss 0x20(%rax,%r8,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmovaps 0x100(%rsp), %xmm5
vmulps %xmm1, %xmm5, %xmm2
vbroadcastss 0x40(%rax,%r8,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmovaps 0x110(%rsp), %xmm6
vmulps %xmm1, %xmm6, %xmm3
vbroadcastss 0x60(%rax,%r8,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmovaps 0xf0(%rsp), %xmm7
vmulps %xmm1, %xmm7, %xmm4
vbroadcastss 0x30(%rax,%r8,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm5, %xmm5
vbroadcastss 0x50(%rax,%r8,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm6
vbroadcastss 0x70(%rax,%r8,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm7, %xmm7
vpminsd %xmm5, %xmm2, %xmm1
vpminsd %xmm6, %xmm3, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpminsd %xmm7, %xmm4, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpmaxsd %xmm5, %xmm2, %xmm2
vpmaxsd %xmm6, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm7, %xmm4, %xmm3
vpmaxsd %xmm11, %xmm1, %xmm4
vpminsd 0x10(%rsp), %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vcmpleps %xmm2, %xmm4, %xmm2
vtestps %xmm2, %xmm2
je 0x23318a
vblendvps %xmm2, %xmm1, %xmm15, %xmm1
cmpq $0x8, %r12
je 0x233183
movq %r12, (%r13)
addq $0x8, %r13
vmovaps %xmm0, (%rbp)
addq $0x10, %rbp
vmovaps %xmm1, %xmm0
movq %rsi, %r12
cmpq $0x8, %rsi
je 0x2331a1
leaq 0x1(%r8), %rsi
cmpq $0x3, %r8
movq %rsi, %r8
jb 0x233093
cmpq $0x8, %r12
jne 0x233075
xorl %r10d, %r10d
vmovaps (%rsp), %xmm0
testb %r10b, %r10b
je 0x233040
jmp 0x2333a3
cmpq $-0x8, %r12
je 0x2331ae
vmovaps 0x10(%rsp), %xmm1
vcmpnleps %xmm0, %xmm1, %xmm0
vtestps %xmm0, %xmm0
je 0x2331ab
movl %r12d, %eax
andl $0xf, %eax
vmovaps (%rsp), %xmm1
vmovaps %xmm1, %xmm0
addq $-0x8, %rax
movq %rax, 0x30(%rsp)
je 0x23336d
movq %r9, 0x20(%rsp)
andq $-0x10, %r12
vxorps %xmm1, %xmm9, %xmm2
movq %rdx, 0x28(%rsp)
vmovaps %xmm12, 0xc0(%rsp)
vmovaps %xmm13, 0xb0(%rsp)
vmovaps %xmm14, 0xa0(%rsp)
vmovaps %xmm11, 0x90(%rsp)
movq (%rcx), %rax
movl -0x8(%r12,%r14,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vbroadcastss 0x34(%rax), %xmm0
vandps 0x90(%rdx), %xmm0, %xmm0
vpcmpeqd %xmm0, %xmm10, %xmm0
vtestps %xmm2, %xmm0
vmovaps 0xd0(%rsp), %xmm1
jb 0x23333a
vmovaps %xmm2, 0xe0(%rsp)
vandnps %xmm2, %xmm0, %xmm0
movl -0x4(%r12,%r14,8), %r8d
vmovaps %xmm0, 0x120(%rsp)
leaq 0x120(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x40(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x50(%rsp)
movq %rdx, 0x58(%rsp)
movl $0x4, 0x60(%rsp)
movl %edi, 0x64(%rsp)
movl %r8d, 0x48(%rsp)
movq %rax, 0x68(%rsp)
andq $0x0, 0x70(%rsp)
movq %rcx, %r15
movq 0x10(%rcx), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x2332da
movq 0x68(%rax), %rcx
leaq 0x38(%rsp), %rdi
callq *%rcx
movq 0x28(%rsp), %rdx
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm10, %xmm10, %xmm10
vcmpltps %xmm10, %xmm0, %xmm1
movq %r15, %rcx
vpcmpeqd %xmm9, %xmm9, %xmm9
vmovaps 0xc0(%rsp), %xmm12
vmovaps 0xb0(%rsp), %xmm13
vmovaps 0xa0(%rsp), %xmm14
vbroadcastss 0x1cb86fb(%rip), %xmm15 # 0x1eeba20
vmovaps 0x90(%rsp), %xmm11
movb $0x1, %r10b
vmovaps 0xe0(%rsp), %xmm2
vandnps %xmm2, %xmm1, %xmm0
vtestps %xmm2, %xmm1
jb 0x23335b
leaq 0x1(%r14), %rax
vmovaps %xmm0, %xmm2
cmpq 0x30(%rsp), %r14
movq %rax, %r14
jb 0x23322d
vxorps %xmm0, %xmm9, %xmm0
movq 0x20(%rsp), %r9
vmovaps (%rsp), %xmm1
pushq $0x1
popq %r14
vorps %xmm0, %xmm1, %xmm1
vtestps %xmm9, %xmm1
vmovaps %xmm1, %xmm0
jb 0x2331b3
vbroadcastss 0x1cb97fb(%rip), %xmm1 # 0x1eecb84
vmovaps 0x10(%rsp), %xmm2
vblendvps %xmm0, %xmm1, %xmm2, %xmm2
vmovaps %xmm2, 0x10(%rsp)
xorl %r10d, %r10d
jmp 0x2331b3
vandps 0x80(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cb97cf(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r9)
addq $0x17d8, %rsp # imm = 0x17D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::ObjectIntersectorK<4, true>>, false>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
movq (%rsi), %rax
cmpq $0x8, 0x70(%rax)
je 0x233a6e
vmovdqa (%rdi), %xmm0
vpcmpeqd %xmm2, %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm0, %xmm4
vmovmskps %xmm4, %esi
testl %esi, %esi
je 0x233a6e
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x17b8, %rsp # imm = 0x17B8
movq %rdx, %r14
vmovaps 0x40(%rdx), %xmm6
vbroadcastss 0x1cedab0(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm6, %xmm3
vbroadcastss 0x1cbdbc7(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm3, %xmm3
vblendvps %xmm3, %xmm5, %xmm6, %xmm3
vmovaps 0x50(%rdx), %xmm7
vandps %xmm1, %xmm7, %xmm8
vcmpltps %xmm5, %xmm8, %xmm8
vblendvps %xmm8, %xmm5, %xmm7, %xmm8
vmovaps 0x60(%rdx), %xmm7
vandps %xmm1, %xmm7, %xmm1
vcmpltps %xmm5, %xmm1, %xmm1
vblendvps %xmm1, %xmm5, %xmm7, %xmm9
vrcpps %xmm3, %xmm1
vmulps %xmm1, %xmm3, %xmm3
vbroadcastss 0x1cb92af(%rip), %xmm10 # 0x1eec714
vsubps %xmm3, %xmm10, %xmm7
vrcpps %xmm8, %xmm3
vmulps %xmm3, %xmm8, %xmm5
vsubps %xmm5, %xmm10, %xmm8
vrcpps %xmm9, %xmm5
vmulps %xmm5, %xmm9, %xmm9
vsubps %xmm9, %xmm10, %xmm10
vxorps %xmm9, %xmm9, %xmm9
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cdf26c(%rip), %xmm11 # 0x1f12704
vandps %xmm6, %xmm11, %xmm11
vmovaps 0x50(%rdx), %xmm6
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1ceda2c(%rip), %xmm12 # 0x1f20edc
vandps %xmm6, %xmm12, %xmm12
vmovaps 0x80(%rdx), %xmm6
vmaxps %xmm9, %xmm6, %xmm6
vorps %xmm11, %xmm12, %xmm11
vmovaps 0x60(%rdx), %xmm12
vcmpltps %xmm9, %xmm12, %xmm9
vbroadcastss 0x1d2749a(%rip), %xmm12 # 0x1f5a974
vandps %xmm12, %xmm9, %xmm9
vmovaps (%rdx), %xmm12
vpxor %xmm2, %xmm4, %xmm2
vmovaps 0x10(%rdx), %xmm13
vpor %xmm2, %xmm9, %xmm2
vmovaps 0x20(%rdx), %xmm14
vpor %xmm2, %xmm11, %xmm9
vmovaps 0x30(%rdx), %xmm2
vmulps %xmm7, %xmm1, %xmm4
vmulps %xmm3, %xmm8, %xmm7
vmulps %xmm5, %xmm10, %xmm8
vmovdqa %xmm9, 0x10(%rsp)
bsfq %rsi, %rdx
vbroadcastss 0x10(%rsp,%rdx,4), %xmm10
vpcmpeqd %xmm9, %xmm10, %xmm10
vmovmskps %xmm10, %edx
notq %rdx
andq %rdx, %rsi
jne 0x233510
vpcmpeqd %xmm9, %xmm9, %xmm9
vpcmpeqd %xmm0, %xmm9, %xmm0
vaddps %xmm4, %xmm1, %xmm11
vaddps %xmm7, %xmm3, %xmm15
vaddps %xmm5, %xmm8, %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vxorps %xmm1, %xmm1, %xmm1
vmaxps %xmm1, %xmm2, %xmm1
vbroadcastss 0x1cb84c4(%rip), %xmm2 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm2, %xmm3
vbroadcastss 0x1cb9619(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm6, %xmm1, %xmm1
vmovaps %xmm1, (%rsp)
leaq 0x120(%rsp), %r12
leaq 0x8c0(%rsp), %r13
vorps 0x10(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq $-0x8, -0x10(%r12)
vmovaps %xmm2, -0x20(%r13)
movq 0x70(%rax), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm3, 0xe0(%rsp)
vmovaps %xmm3, -0x10(%r13)
pushq $0x8
popq %r15
addq $-0x10, %r13
movq -0x8(%r12), %rbp
addq $-0x8, %r12
movb $0x1, %al
cmpq $-0x8, %rbp
je 0x233a55
vmovaps (%r13), %xmm0
vcmpltps (%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x233a53
vmovaps %xmm0, %xmm1
testb $0x8, %bpl
jne 0x2338e1
movq %rbp, %rdx
andq $-0x10, %rdx
andl $0x7, %ebp
movl %ebp, %esi
xorl %edi, %edi
xorl %r10d, %r10d
movq %r15, %rbp
vbroadcastss 0x1cb8408(%rip), %xmm0 # 0x1eeba20
movq (%rdx,%rdi,8), %r8
cmpq $0x8, %r8
je 0x2337c4
vbroadcastss 0x80(%rdx,%rdi,4), %xmm3
vbroadcastss 0x20(%rdx,%rdi,4), %xmm4
vmovaps 0x70(%r14), %xmm2
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xa0(%rdx,%rdi,4), %xmm4
vbroadcastss 0x40(%rdx,%rdi,4), %xmm5
vmulps %xmm4, %xmm2, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0xc0(%rdx,%rdi,4), %xmm5
vbroadcastss 0x60(%rdx,%rdi,4), %xmm6
vmulps %xmm5, %xmm2, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0x90(%rdx,%rdi,4), %xmm6
vbroadcastss 0x30(%rdx,%rdi,4), %xmm7
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xb0(%rdx,%rdi,4), %xmm7
vbroadcastss 0x50(%rdx,%rdi,4), %xmm8
vmulps %xmm7, %xmm2, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vbroadcastss 0xd0(%rdx,%rdi,4), %xmm8
vbroadcastss 0x70(%rdx,%rdi,4), %xmm9
vmulps %xmm2, %xmm8, %xmm8
vaddps %xmm9, %xmm8, %xmm8
vsubps %xmm12, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm9
vsubps %xmm13, %xmm4, %xmm3
vmulps %xmm3, %xmm15, %xmm4
vsubps %xmm14, %xmm5, %xmm3
vmovaps 0xf0(%rsp), %xmm10
vmulps %xmm3, %xmm10, %xmm5
vsubps %xmm12, %xmm6, %xmm3
vmulps %xmm3, %xmm11, %xmm6
vsubps %xmm13, %xmm7, %xmm3
vmulps %xmm3, %xmm15, %xmm7
vsubps %xmm14, %xmm8, %xmm3
vmulps %xmm3, %xmm10, %xmm8
vpminsd %xmm6, %xmm9, %xmm3
vpminsd %xmm7, %xmm4, %xmm10
vpmaxsd %xmm10, %xmm3, %xmm3
vpminsd %xmm8, %xmm5, %xmm10
vpmaxsd %xmm10, %xmm3, %xmm3
vpmaxsd %xmm6, %xmm9, %xmm6
vpmaxsd %xmm7, %xmm4, %xmm4
vpminsd %xmm4, %xmm6, %xmm4
vpmaxsd %xmm8, %xmm5, %xmm5
vpminsd (%rsp), %xmm5, %xmm5
vpminsd %xmm5, %xmm4, %xmm4
vpmaxsd 0xe0(%rsp), %xmm3, %xmm5
cmpl $0x6, %esi
je 0x2337dd
vcmpleps %xmm4, %xmm5, %xmm2
vmovaps (%rsp), %xmm4
vcmpnleps %xmm1, %xmm4, %xmm4
vandps %xmm4, %xmm2, %xmm2
vpslld $0x1f, %xmm2, %xmm2
vtestps %xmm2, %xmm2
je 0x2337c4
vbroadcastss 0x1cb82ab(%rip), %xmm4 # 0x1eeba20
vblendvps %xmm2, %xmm3, %xmm4, %xmm2
vcmpltps %xmm0, %xmm2, %xmm3
vtestps %xmm3, %xmm3
je 0x2337a0
movq %rbp, %r9
vmovaps %xmm0, %xmm3
cmpq $0x8, %rbp
movq %r8, %rbp
jne 0x2337ab
vmovaps %xmm2, %xmm0
movq %r8, %rbp
jmp 0x2337c4
movq %r8, %r9
vmovaps %xmm2, %xmm3
vmovaps %xmm0, %xmm2
incq %r10
movq %r9, (%r12)
addq $0x8, %r12
vmovaps %xmm3, (%r13)
addq $0x10, %r13
vmovaps %xmm2, %xmm0
cmpq $0x8, %r8
je 0x23380d
leaq 0x1(%rdi), %r8
cmpq $0x3, %rdi
movq %r8, %rdi
jb 0x233618
jmp 0x23380d
vcmpleps %xmm4, %xmm5, %xmm4
vbroadcastss 0xe0(%rdx,%rdi,4), %xmm5
vcmpleps %xmm2, %xmm5, %xmm5
vbroadcastss 0xf0(%rdx,%rdi,4), %xmm6
vcmpltps %xmm6, %xmm2, %xmm2
vandps %xmm2, %xmm5, %xmm2
vandps %xmm4, %xmm2, %xmm2
jmp 0x233752
cmpq $0x8, %rbp
setne %dl
cmpq $0x2, %r10
setae %sil
andb %dl, %sil
cmpb $0x1, %sil
je 0x233834
cmpq $0x8, %rbp
jne 0x2335ed
jmp 0x233a53
vmovaps -0x20(%r13), %xmm1
vmovaps -0x10(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x233867
vmovaps %xmm2, -0x20(%r13)
vmovaps %xmm1, -0x10(%r13)
vpermilps $0x4e, -0x10(%r12), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x10(%r12)
cmpq $0x2, %r10
je 0x233825
vmovaps -0x30(%r13), %xmm1
vmovaps -0x10(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x2338a5
vmovaps %xmm2, -0x30(%r13)
vmovaps %xmm1, -0x10(%r13)
movq -0x18(%r12), %rsi
movq -0x8(%r12), %rdx
movq %rdx, -0x18(%r12)
movq %rsi, -0x8(%r12)
vmovaps -0x30(%r13), %xmm1
vmovaps -0x20(%r13), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x233825
vmovaps %xmm2, -0x30(%r13)
vmovaps %xmm1, -0x20(%r13)
vpermilps $0x4e, -0x18(%r12), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x18(%r12)
jmp 0x233825
cmpq $-0x8, %rbp
je 0x233a55
vmovaps (%rsp), %xmm0
vcmpnleps %xmm1, %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x233a53
movl %ebp, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x30(%rsp)
je 0x233a3f
andq $-0x10, %rbp
xorl %ebx, %ebx
movq %rcx, 0x28(%rsp)
vmovaps %xmm12, 0xd0(%rsp)
vmovaps %xmm13, 0xc0(%rsp)
vmovaps %xmm14, 0xb0(%rsp)
vmovaps %xmm11, 0xa0(%rsp)
vmovaps %xmm15, 0x90(%rsp)
vmovaps %xmm1, 0x80(%rsp)
movq (%rcx), %rax
movl (%rbp,%rbx,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vbroadcastss 0x34(%rax), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd 0x1cb8092(%rip), %xmm0, %xmm0 # 0x1eeba10
vtestps %xmm1, %xmm0
jb 0x233a31
vandnps %xmm1, %xmm0, %xmm0
movl 0x4(%rbp,%rbx,8), %edx
vmovaps %xmm0, 0x100(%rsp)
leaq 0x100(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x40(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x50(%rsp)
movq %r14, 0x58(%rsp)
movl $0x4, 0x60(%rsp)
movl %edi, 0x64(%rsp)
movl %edx, 0x48(%rsp)
movq %rax, 0x68(%rsp)
andq $0x0, 0x70(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x2339ef
movq 0x60(%rax), %rcx
leaq 0x38(%rsp), %rdi
callq *%rcx
movq 0x28(%rsp), %rcx
vmovaps 0xd0(%rsp), %xmm12
vmovaps 0xc0(%rsp), %xmm13
vmovaps 0xb0(%rsp), %xmm14
vmovaps 0xa0(%rsp), %xmm11
vmovaps 0x90(%rsp), %xmm15
vmovaps 0x80(%rsp), %xmm1
incq %rbx
cmpq %rbx, 0x30(%rsp)
jne 0x233955
vmovaps (%rsp), %xmm0
vblendvps %xmm1, 0x80(%r14), %xmm0, %xmm0
vmovaps %xmm0, (%rsp)
xorl %eax, %eax
testb %al, %al
je 0x2335bd
addq $0x17b8, %rsp # imm = 0x17B8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::ObjectIntersectorK<4, true>>, false>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x17d8, %rsp # imm = 0x17D8
movq (%rsi), %rax
movq 0x70(%rax), %rax
cmpq $0x8, %rax
je 0x234049
movq %rdx, %r14
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovaps 0x80(%rdx), %xmm0
vxorps %xmm11, %xmm11, %xmm11
vcmpnltps %xmm11, %xmm0, %xmm2
vtestps %xmm1, %xmm2
je 0x234049
leaq 0x80(%r14), %r8
vandps %xmm1, %xmm2, %xmm9
vpcmpeqd %xmm10, %xmm10, %xmm10
vmovaps (%r14), %xmm1
vmovaps %xmm1, 0x110(%rsp)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x100(%rsp)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vmovaps 0x40(%r14), %xmm1
vbroadcastss 0x1ced3be(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm1, %xmm3
vbroadcastss 0x1cbd4d5(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vmovaps 0x30(%r14), %xmm3
vmovaps 0x50(%r14), %xmm5
vmovaps 0x60(%r14), %xmm6
vandps %xmm2, %xmm6, %xmm7
vcmpltps %xmm4, %xmm7, %xmm7
vblendvps %xmm7, %xmm4, %xmm6, %xmm6
vrcpps %xmm1, %xmm7
vmulps %xmm7, %xmm1, %xmm1
vbroadcastss 0x1cb8bc4(%rip), %xmm8 # 0x1eec714
vsubps %xmm1, %xmm8, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vandps %xmm2, %xmm5, %xmm2
vcmpltps %xmm4, %xmm2, %xmm2
vblendvps %xmm2, %xmm4, %xmm5, %xmm2
vrcpps %xmm6, %xmm4
vmulps %xmm4, %xmm6, %xmm5
vsubps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm4, %xmm5
vmaxps %xmm11, %xmm3, %xmm3
vrcpps %xmm2, %xmm6
vmulps %xmm6, %xmm2, %xmm2
vsubps %xmm2, %xmm8, %xmm2
vbroadcastss 0x1cb7e8f(%rip), %xmm8 # 0x1eeba20
vblendvps %xmm9, %xmm3, %xmm8, %xmm3
vmulps %xmm2, %xmm6, %xmm2
vaddps %xmm2, %xmm6, %xmm2
vmovaps %xmm2, 0xd0(%rsp)
vxorps %xmm10, %xmm9, %xmm2
vmaxps %xmm11, %xmm0, %xmm0
vaddps %xmm1, %xmm7, %xmm1
vmovaps %xmm1, 0xc0(%rsp)
vbroadcastss 0x1cb8fbc(%rip), %xmm1 # 0x1eecb84
vmovaps %xmm9, 0x80(%rsp)
vblendvps %xmm9, %xmm0, %xmm1, %xmm12
vmovaps %xmm2, %xmm0
vaddps %xmm5, %xmm4, %xmm1
vmovaps %xmm1, 0xb0(%rsp)
leaq 0x140(%rsp), %r13
movq $-0x8, -0x10(%r13)
leaq 0x8e0(%rsp), %rbp
vmovaps %xmm8, -0x20(%rbp)
movq %rax, -0x8(%r13)
vmovaps %xmm3, 0xe0(%rsp)
vmovaps %xmm3, -0x10(%rbp)
leaq 0x1f1c362(%rip), %rax # 0x214ff80
vmovaps (%rax), %xmm1
vmovaps %xmm1, 0xa0(%rsp)
pushq $0x8
popq %rbx
pushq $0x1
popq %r15
addq $-0x10, %rbp
movq -0x8(%r13), %r12
addq $-0x8, %r13
movb $0x1, %r9b
cmpq $-0x8, %r12
je 0x233e97
vmovaps %xmm0, (%rsp)
vmovaps (%rbp), %xmm0
vmovaps %xmm0, 0x10(%rsp)
vcmpltps %xmm12, %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x233e8f
testb $0x8, %r12b
jne 0x233ea5
movq %r12, %rax
andq $-0x10, %rax
andl $0x7, %r12d
movl %r12d, %edi
xorl %edx, %edx
movq %rbx, %r12
vbroadcastss 0x1cb7d8e(%rip), %xmm1 # 0x1eeba20
movq (%rax,%rdx,8), %rsi
cmpq $0x8, %rsi
je 0x233e36
vbroadcastss 0x80(%rax,%rdx,4), %xmm3
vbroadcastss 0x20(%rax,%rdx,4), %xmm4
vmovaps 0x70(%r14), %xmm2
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xa0(%rax,%rdx,4), %xmm4
vbroadcastss 0x40(%rax,%rdx,4), %xmm5
vmulps %xmm4, %xmm2, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0xc0(%rax,%rdx,4), %xmm5
vbroadcastss 0x60(%rax,%rdx,4), %xmm6
vmulps %xmm5, %xmm2, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0x90(%rax,%rdx,4), %xmm6
vbroadcastss 0x30(%rax,%rdx,4), %xmm7
vmulps %xmm6, %xmm2, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xb0(%rax,%rdx,4), %xmm7
vbroadcastss 0x50(%rax,%rdx,4), %xmm8
vmulps %xmm7, %xmm2, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vbroadcastss 0xd0(%rax,%rdx,4), %xmm8
vbroadcastss 0x70(%rax,%rdx,4), %xmm9
vmulps %xmm2, %xmm8, %xmm8
vaddps %xmm9, %xmm8, %xmm8
vmovaps 0x110(%rsp), %xmm14
vsubps %xmm14, %xmm3, %xmm3
vmovaps 0xc0(%rsp), %xmm11
vmulps %xmm3, %xmm11, %xmm9
vmovaps 0x100(%rsp), %xmm15
vsubps %xmm15, %xmm4, %xmm3
vmovaps 0xd0(%rsp), %xmm10
vmulps %xmm3, %xmm10, %xmm4
vmovaps 0xf0(%rsp), %xmm0
vsubps %xmm0, %xmm5, %xmm3
vmovaps 0xb0(%rsp), %xmm13
vmulps %xmm3, %xmm13, %xmm5
vsubps %xmm14, %xmm6, %xmm3
vmulps %xmm3, %xmm11, %xmm6
vsubps %xmm15, %xmm7, %xmm3
vmulps %xmm3, %xmm10, %xmm7
vsubps %xmm0, %xmm8, %xmm3
vmulps %xmm3, %xmm13, %xmm8
vpminsd %xmm6, %xmm9, %xmm3
vpminsd %xmm7, %xmm4, %xmm10
vpmaxsd %xmm10, %xmm3, %xmm3
vpminsd %xmm8, %xmm5, %xmm10
vpmaxsd %xmm10, %xmm3, %xmm3
vpmaxsd %xmm6, %xmm9, %xmm6
vpmaxsd %xmm7, %xmm4, %xmm4
vpminsd %xmm4, %xmm6, %xmm4
vpmaxsd %xmm8, %xmm5, %xmm5
vpminsd %xmm12, %xmm5, %xmm5
vpminsd %xmm5, %xmm4, %xmm4
vpmaxsd 0xe0(%rsp), %xmm3, %xmm5
cmpl $0x6, %edi
je 0x233e4f
vcmpleps %xmm4, %xmm5, %xmm2
vcmpnleps 0x10(%rsp), %xmm12, %xmm4
vandps %xmm4, %xmm2, %xmm2
vpslld $0x1f, %xmm2, %xmm2
vtestps %xmm2, %xmm2
je 0x233e36
vbroadcastss 0x1cb7c0e(%rip), %xmm4 # 0x1eeba20
vblendvps %xmm2, %xmm3, %xmm4, %xmm2
cmpq $0x8, %r12
je 0x233e2f
movq %r12, (%r13)
addq $0x8, %r13
vmovaps %xmm1, (%rbp)
addq $0x10, %rbp
vmovaps %xmm2, %xmm1
movq %rsi, %r12
cmpq $0x8, %rsi
je 0x233e7f
leaq 0x1(%rdx), %rsi
cmpq $0x3, %rdx
movq %rsi, %rdx
jb 0x233c92
jmp 0x233e7f
vcmpleps %xmm4, %xmm5, %xmm4
vbroadcastss 0xe0(%rax,%rdx,4), %xmm5
vcmpleps %xmm2, %xmm5, %xmm5
vbroadcastss 0xf0(%rax,%rdx,4), %xmm6
vcmpltps %xmm6, %xmm2, %xmm2
vandps %xmm2, %xmm5, %xmm2
vandps %xmm4, %xmm2, %xmm2
jmp 0x233df2
vmovaps %xmm1, 0x10(%rsp)
cmpq $0x8, %r12
jne 0x233c6c
xorl %r9d, %r9d
vmovaps (%rsp), %xmm0
testb %r9b, %r9b
je 0x233c32
jmp 0x234032
cmpq $-0x8, %r12
je 0x233e92
vcmpnleps 0x10(%rsp), %xmm12, %xmm0
vtestps %xmm0, %xmm0
je 0x233e8f
movl %r12d, %eax
andl $0xf, %eax
vmovdqa (%rsp), %xmm1
vmovdqa %xmm1, %xmm0
addq $-0x8, %rax
movq %rax, 0x30(%rsp)
vpcmpeqd %xmm2, %xmm2, %xmm2
je 0x234008
movq %r8, 0x20(%rsp)
andq $-0x10, %r12
vpxor %xmm2, %xmm1, %xmm3
vpxor %xmm2, %xmm2, %xmm2
movq %rcx, 0x28(%rsp)
vmovaps %xmm12, 0x90(%rsp)
movq (%rcx), %rax
movl -0x8(%r12,%r15,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vbroadcastss 0x34(%rax), %xmm0
vandps 0x90(%r14), %xmm0, %xmm0
vpcmpeqd %xmm2, %xmm0, %xmm0
vtestps %xmm3, %xmm0
vmovaps 0xa0(%rsp), %xmm1
jb 0x233fd1
vmovaps %xmm3, 0x10(%rsp)
vandnps %xmm3, %xmm0, %xmm0
movl -0x4(%r12,%r15,8), %edx
vmovaps %xmm0, 0x120(%rsp)
leaq 0x120(%rsp), %rsi
movq %rsi, 0x38(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x40(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x50(%rsp)
movq %r14, 0x58(%rsp)
movl $0x4, 0x60(%rsp)
movl %edi, 0x64(%rsp)
movl %edx, 0x48(%rsp)
movq %rax, 0x68(%rsp)
andq $0x0, 0x70(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x233fa1
movq 0x68(%rax), %rcx
leaq 0x38(%rsp), %rdi
callq *%rcx
vmovaps 0x80(%r14), %xmm0
vpxor %xmm2, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm1
movq 0x28(%rsp), %rcx
vmovaps 0x90(%rsp), %xmm12
movb $0x1, %r9b
vmovaps 0x10(%rsp), %xmm3
vandnps %xmm3, %xmm1, %xmm0
vtestps %xmm3, %xmm1
jb 0x233ff2
leaq 0x1(%r15), %rax
vmovaps %xmm0, %xmm3
cmpq 0x30(%rsp), %r15
movq %rax, %r15
jb 0x233efa
vpcmpeqd %xmm2, %xmm2, %xmm2
vxorps %xmm2, %xmm0, %xmm0
movq 0x20(%rsp), %r8
vmovdqa (%rsp), %xmm1
pushq $0x1
popq %r15
vpor %xmm0, %xmm1, %xmm1
vtestps %xmm2, %xmm1
vmovaps %xmm1, %xmm0
jb 0x233e97
vbroadcastss 0x1cb8b60(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm1, %xmm12, %xmm12
xorl %r9d, %r9d
jmp 0x233e97
vandps 0x80(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cb8b40(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r8)
addq $0x17d8, %rsp # imm = 0x17D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::InstanceIntersectorK<4>>, false>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
movq (%rsi), %rax
cmpq $0x8, 0x70(%rax)
je 0x234594
cmpq $0x0, 0x8(%rcx)
je 0x234080
movq 0x10(%rcx), %r8
testb $0x1, 0x2(%r8)
jne 0x27cc7c
vmovdqa (%rdi), %xmm0
vpcmpeqd %xmm2, %xmm2, %xmm2
vpcmpeqd %xmm2, %xmm0, %xmm4
vmovmskps %xmm4, %esi
testl %esi, %esi
je 0x234594
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1768, %rsp # imm = 0x1768
vmovaps 0x40(%rdx), %xmm6
vbroadcastss 0x1cece0d(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm6, %xmm3
vbroadcastss 0x1cbcf24(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm3, %xmm3
vblendvps %xmm3, %xmm5, %xmm6, %xmm3
vmovaps 0x50(%rdx), %xmm7
vandps %xmm1, %xmm7, %xmm8
vcmpltps %xmm5, %xmm8, %xmm8
vblendvps %xmm8, %xmm5, %xmm7, %xmm7
vmovaps 0x60(%rdx), %xmm8
vandps %xmm1, %xmm8, %xmm1
vcmpltps %xmm5, %xmm1, %xmm1
vblendvps %xmm1, %xmm5, %xmm8, %xmm8
vrcpps %xmm3, %xmm1
vmulps %xmm1, %xmm3, %xmm3
vbroadcastss 0x1cb860c(%rip), %xmm9 # 0x1eec714
vsubps %xmm3, %xmm9, %xmm10
vrcpps %xmm7, %xmm3
vmulps %xmm3, %xmm7, %xmm5
vsubps %xmm5, %xmm9, %xmm7
vrcpps %xmm8, %xmm5
vmulps %xmm5, %xmm8, %xmm8
vsubps %xmm8, %xmm9, %xmm8
vxorps %xmm9, %xmm9, %xmm9
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cde5ca(%rip), %xmm11 # 0x1f12704
vandps %xmm6, %xmm11, %xmm6
vmovaps 0x50(%rdx), %xmm11
vcmpltps %xmm9, %xmm11, %xmm11
vbroadcastss 0x1cecd8a(%rip), %xmm12 # 0x1f20edc
vandps %xmm12, %xmm11, %xmm11
vmovaps (%rdx), %xmm12
vorps %xmm6, %xmm11, %xmm11
vmovaps 0x60(%rdx), %xmm6
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1d26801(%rip), %xmm9 # 0x1f5a974
vandps %xmm6, %xmm9, %xmm6
vmovaps 0x10(%rdx), %xmm13
vpxor %xmm2, %xmm4, %xmm2
vmovaps 0x20(%rdx), %xmm14
vpor %xmm6, %xmm2, %xmm4
vmovaps 0x30(%rdx), %xmm2
vmulps %xmm1, %xmm10, %xmm6
vmulps %xmm7, %xmm3, %xmm7
vmulps %xmm5, %xmm8, %xmm8
vpor %xmm4, %xmm11, %xmm9
vmovaps 0x80(%rdx), %xmm4
vmovdqa %xmm9, 0x30(%rsp)
bsfq %rsi, %rdi
vbroadcastss 0x30(%rsp,%rdi,4), %xmm10
vpcmpeqd %xmm9, %xmm10, %xmm10
vmovmskps %xmm10, %edi
notq %rdi
andq %rdi, %rsi
jne 0x2341ac
vpcmpeqd %xmm9, %xmm9, %xmm9
vpcmpeqd %xmm0, %xmm9, %xmm0
vaddps %xmm6, %xmm1, %xmm9
vaddps %xmm7, %xmm3, %xmm10
vaddps %xmm5, %xmm8, %xmm11
vxorps %xmm1, %xmm1, %xmm1
vmaxps %xmm1, %xmm2, %xmm2
vmaxps %xmm1, %xmm4, %xmm1
vbroadcastss 0x1cb782d(%rip), %xmm15 # 0x1eeba20
vblendvps %xmm0, %xmm2, %xmm15, %xmm3
vbroadcastss 0x1cb8982(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x10(%rsp)
leaq 0xd0(%rsp), %rbp
leaq 0x870(%rsp), %r15
vorps 0x30(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
movq $-0x8, -0x10(%rbp)
vmovaps %xmm15, -0x20(%r15)
movq 0x70(%rax), %rax
movq %rax, -0x8(%rbp)
vmovaps %xmm3, 0xb0(%rsp)
vmovaps %xmm3, -0x10(%r15)
pushq $0x8
popq %r12
leaq 0x20(%rsp), %rdi
leaq 0xf(%rsp), %rsi
addq $-0x10, %r15
movq -0x8(%rbp), %r13
addq $-0x8, %rbp
movb $0x1, %al
cmpq $-0x8, %r13
je 0x23457b
vmovaps (%r15), %xmm0
vcmpltps 0x10(%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x234579
testb $0x8, %r13b
jne 0x234499
movq %r13, %r11
movq %r13, %r14
andq $-0x10, %r14
xorl %ebx, %ebx
xorl %r10d, %r10d
movq %r12, %r13
vmovaps %xmm15, %xmm0
movq (%r14,%rbx,8), %r8
cmpq $0x8, %r8
je 0x2343b7
vbroadcastss 0x20(%r11,%rbx,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm2
vbroadcastss 0x40(%r11,%rbx,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm3
vbroadcastss 0x60(%r11,%rbx,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm4
vbroadcastss 0x30(%r11,%rbx,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm5
vbroadcastss 0x50(%r11,%rbx,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm6
vbroadcastss 0x70(%r11,%rbx,4), %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm7
vpminsd %xmm5, %xmm2, %xmm1
vpminsd %xmm6, %xmm3, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpminsd %xmm7, %xmm4, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpmaxsd %xmm5, %xmm2, %xmm2
vpmaxsd %xmm6, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm7, %xmm4, %xmm3
vpmaxsd 0xb0(%rsp), %xmm1, %xmm4
vpminsd 0x10(%rsp), %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vcmpleps %xmm2, %xmm4, %xmm2
vtestps %xmm2, %xmm2
je 0x2343b7
vblendvps %xmm2, %xmm1, %xmm15, %xmm1
vcmpltps %xmm0, %xmm1, %xmm2
vtestps %xmm2, %xmm2
je 0x234394
movq %r13, %r9
vmovaps %xmm0, %xmm2
cmpq $0x8, %r13
movq %r8, %r13
jne 0x23439f
vmovaps %xmm1, %xmm0
movq %r8, %r13
jmp 0x2343b7
movq %r8, %r9
vmovaps %xmm1, %xmm2
vmovaps %xmm0, %xmm1
incq %r10
movq %r9, (%rbp)
addq $0x8, %rbp
vmovaps %xmm2, (%r15)
addq $0x10, %r15
vmovaps %xmm1, %xmm0
cmpq $0x8, %r8
je 0x2343ce
leaq 0x1(%rbx), %r8
cmpq $0x3, %rbx
movq %r8, %rbx
jb 0x2342ac
cmpq $0x8, %r13
setne %r8b
cmpq $0x2, %r10
setae %r9b
andb %r8b, %r9b
cmpb $0x1, %r9b
je 0x2343f6
cmpq $0x8, %r13
jne 0x23428c
jmp 0x234579
vmovaps -0x20(%r15), %xmm1
vmovaps -0x10(%r15), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x234426
vmovaps %xmm2, -0x20(%r15)
vmovaps %xmm1, -0x10(%r15)
vpermilps $0x4e, -0x10(%rbp), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x10(%rbp)
cmpq $0x2, %r10
je 0x2343e7
vmovaps -0x30(%r15), %xmm1
vmovaps -0x10(%r15), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x234460
vmovaps %xmm2, -0x30(%r15)
vmovaps %xmm1, -0x10(%r15)
movq -0x18(%rbp), %r9
movq -0x8(%rbp), %r8
movq %r8, -0x18(%rbp)
movq %r9, -0x8(%rbp)
vmovaps -0x30(%r15), %xmm1
vmovaps -0x20(%r15), %xmm2
vcmpltps %xmm2, %xmm1, %xmm3
vtestps %xmm3, %xmm3
je 0x2343e7
vmovaps %xmm2, -0x30(%r15)
vmovaps %xmm1, -0x20(%r15)
vpermilps $0x4e, -0x18(%rbp), %xmm1 # xmm1 = mem[2,3,0,1]
vmovups %xmm1, -0x18(%rbp)
jmp 0x2343e7
cmpq $-0x8, %r13
je 0x23457b
vmovaps 0x10(%rsp), %xmm1
vcmpnleps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vtestps %xmm0, %xmm0
je 0x234579
vmovaps %xmm11, 0x50(%rsp)
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vmovaps %xmm14, 0x80(%rsp)
vmovaps %xmm13, 0x90(%rsp)
vmovaps %xmm12, 0xa0(%rsp)
movl %r13d, %ebx
andl $0xf, %ebx
addq $-0x8, %rbx
je 0x234527
andq $-0x10, %r13
movq %rcx, 0x48(%rsp)
movq %r13, %r8
movq %rdx, %r14
callq 0x3ecaec
movq %r14, %rdx
leaq 0xf(%rsp), %rsi
leaq 0x20(%rsp), %rdi
movq 0x48(%rsp), %rcx
addq $0x10, %r13
decq %rbx
jne 0x234501
vmovaps 0x20(%rsp), %xmm0
vmovaps 0x10(%rsp), %xmm1
vblendvps %xmm0, 0x80(%rdx), %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0xa0(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm13
vmovaps 0x80(%rsp), %xmm14
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm11
vbroadcastss 0x1cb74a7(%rip), %xmm15 # 0x1eeba20
xorl %eax, %eax
testb %al, %al
je 0x23425d
addq $0x1768, %rsp # imm = 0x1768
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::InstanceIntersectorK<4>>, false>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1798, %rsp # imm = 0x1798
movq (%rsi), %rax
movq 0x70(%rax), %rax
cmpq $0x8, %rax
je 0x234a28
movq %rcx, %rbx
movq %rdx, %rcx
cmpq $0x0, 0x8(%rbx)
je 0x2345d3
movq 0x10(%rbx), %rdx
testb $0x1, 0x2(%rdx)
jne 0x234a3a
vpcmpeqd %xmm9, %xmm9, %xmm9
vpcmpeqd (%rdi), %xmm9, %xmm2
vmovaps 0x80(%rcx), %xmm0
vxorps %xmm1, %xmm1, %xmm1
vcmpnltps %xmm1, %xmm0, %xmm3
vtestps %xmm2, %xmm3
je 0x234a28
leaq 0x80(%rcx), %r9
vandps %xmm2, %xmm3, %xmm10
vmovaps (%rcx), %xmm11
vmovaps 0x10(%rcx), %xmm12
vmovaps 0x40(%rcx), %xmm2
vbroadcastss 0x1cec8aa(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm4
vbroadcastss 0x1cbc9c1(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm2, %xmm4
vmovaps 0x50(%rcx), %xmm6
vmovaps 0x60(%rcx), %xmm2
vandps %xmm3, %xmm2, %xmm7
vcmpltps %xmm5, %xmm7, %xmm7
vblendvps %xmm7, %xmm5, %xmm2, %xmm7
vrcpps %xmm4, %xmm2
vmulps %xmm2, %xmm4, %xmm4
vbroadcastss 0x1cb80b8(%rip), %xmm8 # 0x1eec714
vsubps %xmm4, %xmm8, %xmm4
vandps %xmm3, %xmm6, %xmm3
vcmpltps %xmm5, %xmm3, %xmm3
vblendvps %xmm3, %xmm5, %xmm6, %xmm3
vrcpps %xmm7, %xmm5
vmulps %xmm5, %xmm7, %xmm6
vsubps %xmm6, %xmm8, %xmm6
vrcpps %xmm3, %xmm7
vmulps %xmm7, %xmm3, %xmm3
vsubps %xmm3, %xmm8, %xmm3
vmovaps 0x20(%rcx), %xmm13
vmulps %xmm3, %xmm7, %xmm3
vaddps %xmm3, %xmm7, %xmm14
vmovaps 0x30(%rcx), %xmm3
vmulps %xmm4, %xmm2, %xmm4
vmulps %xmm6, %xmm5, %xmm6
vmaxps %xmm1, %xmm3, %xmm3
vmaxps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1cb736e(%rip), %xmm15 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm15, %xmm1
vaddps %xmm4, %xmm2, %xmm2
vmovaps %xmm2, 0xc0(%rsp)
vbroadcastss 0x1cb84b6(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm10, %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vaddps %xmm6, %xmm5, %xmm0
vmovaps %xmm0, 0xb0(%rsp)
vxorps %xmm9, %xmm10, %xmm0
vmovaps %xmm0, 0x30(%rsp)
leaq 0x100(%rsp), %r15
movq $-0x8, -0x10(%r15)
leaq 0x8a0(%rsp), %r12
vmovaps %xmm15, -0x20(%r12)
movq %rax, -0x8(%r15)
vmovaps %xmm1, 0xd0(%rsp)
vmovaps %xmm1, -0x10(%r12)
pushq $0x8
popq %r13
addq $-0x10, %r12
movq -0x8(%r15), %rbp
addq $-0x8, %r15
movb $0x1, %r10b
cmpq $-0x8, %rbp
je 0x23488e
vmovaps (%r12), %xmm0
vcmpltps 0x10(%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x23488b
testb $0x8, %bpl
jne 0x23489c
movq %rbp, %rax
movq %rbp, %rdi
andq $-0x10, %rdi
xorl %edx, %edx
movq %r13, %rbp
vmovaps %xmm15, %xmm0
movq (%rdi,%rdx,8), %rsi
cmpq $0x8, %rsi
je 0x23486a
vbroadcastss 0x20(%rax,%rdx,4), %xmm1
vsubps %xmm11, %xmm1, %xmm1
vmovaps 0xc0(%rsp), %xmm5
vmulps %xmm1, %xmm5, %xmm2
vbroadcastss 0x40(%rax,%rdx,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm3
vbroadcastss 0x60(%rax,%rdx,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmovaps 0xb0(%rsp), %xmm7
vmulps %xmm1, %xmm7, %xmm4
vbroadcastss 0x30(%rax,%rdx,4), %xmm1
vsubps %xmm11, %xmm1, %xmm1
vmulps %xmm1, %xmm5, %xmm5
vbroadcastss 0x50(%rax,%rdx,4), %xmm1
vsubps %xmm12, %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm6
vbroadcastss 0x70(%rax,%rdx,4), %xmm1
vsubps %xmm13, %xmm1, %xmm1
vmulps %xmm1, %xmm7, %xmm7
vpminsd %xmm5, %xmm2, %xmm1
vpminsd %xmm6, %xmm3, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpminsd %xmm7, %xmm4, %xmm8
vpmaxsd %xmm8, %xmm1, %xmm1
vpmaxsd %xmm5, %xmm2, %xmm2
vpmaxsd %xmm6, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpmaxsd %xmm7, %xmm4, %xmm3
vpmaxsd 0xd0(%rsp), %xmm1, %xmm4
vpminsd 0x10(%rsp), %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vcmpleps %xmm2, %xmm4, %xmm2
vtestps %xmm2, %xmm2
je 0x23486a
vblendvps %xmm2, %xmm1, %xmm15, %xmm1
cmpq $0x8, %rbp
je 0x234863
movq %rbp, (%r15)
addq $0x8, %r15
vmovaps %xmm0, (%r12)
addq $0x10, %r12
vmovaps %xmm1, %xmm0
movq %rsi, %rbp
cmpq $0x8, %rsi
je 0x234881
leaq 0x1(%rdx), %rsi
cmpq $0x3, %rdx
movq %rsi, %rdx
jb 0x234777
cmpq $0x8, %rbp
jne 0x23475a
xorl %r10d, %r10d
testb %r10b, %r10b
je 0x234729
jmp 0x234a14
cmpq $-0x8, %rbp
je 0x23488e
vmovaps 0x10(%rsp), %xmm1
vcmpnleps %xmm0, %xmm1, %xmm0
vtestps %xmm0, %xmm0
je 0x23488b
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm13, 0x70(%rsp)
vmovaps %xmm12, 0x80(%rsp)
vmovaps %xmm11, 0x90(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
movq %r9, 0x28(%rsp)
movl %ebp, %eax
andl $0xf, %eax
vxorps 0x30(%rsp), %xmm9, %xmm0
vmovaps %xmm0, 0x40(%rsp)
addq $-0x8, %rax
movq %rax, 0x58(%rsp)
je 0x23495d
andq $-0x10, %rbp
pushq $0x1
popq %r14
movq %rcx, 0x50(%rsp)
leaq 0xe0(%rsp), %rdi
leaq 0x40(%rsp), %rsi
leaq 0xf(%rsp), %rdx
movq %rbx, %r8
movq %rbp, %r9
callq 0x3ecd0e
movq 0x50(%rsp), %rcx
vmovaps 0xe0(%rsp), %xmm0
vmovaps 0x40(%rsp), %xmm1
vandnps %xmm1, %xmm0, %xmm2
vmovaps %xmm2, 0x40(%rsp)
vtestps %xmm1, %xmm0
jb 0x23495d
addq $0x10, %rbp
leaq 0x1(%r14), %rax
cmpq 0x58(%rsp), %r14
movq %rax, %r14
jb 0x234909
vpcmpeqd %xmm9, %xmm9, %xmm9
vpxor 0x40(%rsp), %xmm9, %xmm0
vmovdqa 0x30(%rsp), %xmm2
vpor %xmm0, %xmm2, %xmm2
vtestps %xmm9, %xmm2
vmovaps %xmm2, 0x30(%rsp)
jb 0x2349d7
vbroadcastss 0x1cb81fc(%rip), %xmm0 # 0x1eecb84
vmovaps 0x10(%rsp), %xmm1
vblendvps %xmm2, %xmm0, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
xorl %r10d, %r10d
movq 0x28(%rsp), %r9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x90(%rsp), %xmm11
vmovaps 0x80(%rsp), %xmm12
vmovaps 0x70(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x1cb704e(%rip), %xmm15 # 0x1eeba20
jmp 0x23488e
movq 0x28(%rsp), %r9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x90(%rsp), %xmm11
vmovaps 0x80(%rsp), %xmm12
vmovaps 0x70(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x1cb7014(%rip), %xmm15 # 0x1eeba20
movb $0x1, %r10b
jmp 0x23488e
vandps 0x30(%rsp), %xmm10, %xmm0
vbroadcastss 0x1cb8161(%rip), %xmm1 # 0x1eecb84
vmaskmovps %xmm1, %xmm0, (%r9)
addq $0x1798, %rsp # imm = 0x1798
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rcx, %rdx
movq %rbx, %rcx
addq $0x1798, %rsp # imm = 0x1798
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x27d5f4
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 16777232, true, embree::avx::SubGridMBIntersectorKPluecker<4, 4, true>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1da0, %rsp # imm = 0x1DA0
movq (%rsi), %rax
cmpq $0x8, 0x70(%rax)
je 0x23a249
movq %rdx, %r13
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%rdx), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x23a249
movq %rcx, %r12
movq %rsi, 0x150(%rsp)
vandps %xmm3, %xmm4, %xmm11
vmovaps (%r13), %xmm3
vmovaps %xmm3, 0x210(%rsp)
vmovaps 0x10(%r13), %xmm3
vmovaps %xmm3, 0x220(%rsp)
vmovaps 0x20(%r13), %xmm3
vmovaps %xmm3, 0x230(%rsp)
vmovaps 0x40(%r13), %xmm3
vmovaps %xmm3, 0x240(%rsp)
vmovaps 0x50(%r13), %xmm4
vmovaps %xmm4, 0x250(%rsp)
vmovaps 0x60(%r13), %xmm5
vmovaps %xmm5, 0x260(%rsp)
vbroadcastss 0x1ce84fb(%rip), %xmm6 # 0x1f20ec4
vandps %xmm6, %xmm3, %xmm7
vbroadcastss 0x1cb8612(%rip), %xmm8 # 0x1ef0fe8
vcmpltps %xmm8, %xmm7, %xmm7
vbroadcastss 0x1cb3d2f(%rip), %xmm9 # 0x1eec714
vdivps %xmm3, %xmm9, %xmm3
vandps %xmm6, %xmm4, %xmm10
vcmpltps %xmm8, %xmm10, %xmm10
vdivps %xmm4, %xmm9, %xmm4
vandps %xmm6, %xmm5, %xmm6
vcmpltps %xmm8, %xmm6, %xmm6
vdivps %xmm5, %xmm9, %xmm5
vbroadcastss 0x1ce8552(%rip), %xmm8 # 0x1f20f60
vblendvps %xmm7, %xmm8, %xmm3, %xmm3
vblendvps %xmm10, %xmm8, %xmm4, %xmm4
vblendvps %xmm6, %xmm8, %xmm5, %xmm5
vmovaps %xmm3, 0x270(%rsp)
vmovaps %xmm4, 0x280(%rsp)
vmovaps %xmm5, 0x290(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d21f1b(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x2a0(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d1bf3c(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d21efb(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vmovaps %xmm3, 0x2b0(%rsp)
vcmpnltps %xmm2, %xmm5, %xmm3
vbroadcastss 0x1d21ee2(%rip), %xmm4 # 0x1f5a96c
vbroadcastss 0x1d21edd(%rip), %xmm5 # 0x1f5a970
vblendvps %xmm3, %xmm4, %xmm5, %xmm3
vmovaps %xmm3, 0x2c0(%rsp)
vmovaps 0x30(%r13), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cb2f67(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm11, %xmm3, %xmm1, %xmm1
vmovaps %xmm1, 0x2d0(%rsp)
vbroadcastss 0x1cb40b3(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm11, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x2e0(%rsp)
vmovaps %xmm11, 0x3c0(%rsp)
vxorps %xmm0, %xmm11, %xmm0
vmovaps %xmm0, 0x70(%rsp)
cmpq $0x0, 0x8(%rcx)
je 0x238b15
movq %rax, %rcx
movq 0x10(%r12), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r15d, %r15d
cmpb $0x1, %al
movq %rcx, %rax
adcq $0x2, %r15
jmp 0x238b19
pushq $0x3
popq %r15
leaq 0x80(%r13), %rcx
movq %rcx, 0xb8(%rsp)
leaq 0x6f0(%rsp), %rdi
movq $-0x8, -0x10(%rdi)
leaq 0xe90(%rsp), %r8
vbroadcastss 0x1cb2ed7(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, -0x20(%r8)
movq %rax, 0x148(%rsp)
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdi)
vmovaps %xmm1, -0x10(%r8)
leaq 0x1f17414(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
vbroadcastss 0x1cb3b91(%rip), %ymm2 # 0x1eec714
vbroadcastss 0x1cb7e40(%rip), %ymm0 # 0x1ef09cc
vmovaps %ymm1, 0x5a0(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x580(%rsp)
movq %r15, 0x50(%rsp)
addq $-0x10, %r8
movq -0x8(%rdi), %rdx
addq $-0x8, %rdi
cmpq $-0x8, %rdx
je 0x23a149
vmovaps (%r8), %xmm15
vcmpltps 0x2e0(%rsp), %xmm15, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23a15a
movzbl %al, %ebx
popcntl %ebx, %r14d
xorl %eax, %eax
cmpq %r15, %r14
jbe 0x23a15e
cmpq %r15, %r14
jbe 0x23a14c
testb $0x8, %dl
jne 0x238e78
vmovaps 0x2e0(%rsp), %xmm0
movq %rdx, %rax
andq $-0x10, %rax
andl $0x7, %edx
movl %edx, %ecx
vcmpnleps %xmm15, %xmm0, %xmm0
xorl %r9d, %r9d
pushq $0x8
popq %rdx
vbroadcastss 0x1cb2df7(%rip), %xmm15 # 0x1eeba20
movq (%rax,%r9,8), %rsi
cmpq $0x8, %rsi
je 0x238de5
vbroadcastss 0x80(%rax,%r9,4), %xmm2
vbroadcastss 0x20(%rax,%r9,4), %xmm3
vmovaps 0x70(%r13), %xmm1
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa0(%rax,%r9,4), %xmm3
vbroadcastss 0x40(%rax,%r9,4), %xmm4
vmulps %xmm3, %xmm1, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vbroadcastss 0xc0(%rax,%r9,4), %xmm4
vbroadcastss 0x60(%rax,%r9,4), %xmm5
vmulps %xmm4, %xmm1, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0x90(%rax,%r9,4), %xmm5
vbroadcastss 0x30(%rax,%r9,4), %xmm6
vmulps %xmm5, %xmm1, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vbroadcastss 0xb0(%rax,%r9,4), %xmm6
vbroadcastss 0x50(%rax,%r9,4), %xmm7
vmulps %xmm6, %xmm1, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vbroadcastss 0xd0(%rax,%r9,4), %xmm7
vbroadcastss 0x70(%rax,%r9,4), %xmm8
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmovaps 0x210(%rsp), %xmm8
vmovaps 0x220(%rsp), %xmm9
vmovaps 0x230(%rsp), %xmm10
vmovaps 0x270(%rsp), %xmm11
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm12
vsubps %xmm9, %xmm3, %xmm2
vmovaps 0x280(%rsp), %xmm3
vmulps %xmm3, %xmm2, %xmm13
vsubps %xmm10, %xmm4, %xmm2
vmovaps 0x290(%rsp), %xmm4
vmulps %xmm4, %xmm2, %xmm14
vsubps %xmm8, %xmm5, %xmm2
vmulps %xmm2, %xmm11, %xmm5
vsubps %xmm9, %xmm6, %xmm2
vmulps %xmm3, %xmm2, %xmm3
vsubps %xmm10, %xmm7, %xmm2
vmulps %xmm4, %xmm2, %xmm4
vpminsd %xmm5, %xmm12, %xmm2
vpminsd %xmm3, %xmm13, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vpminsd %xmm4, %xmm14, %xmm6
vpmaxsd %xmm6, %xmm2, %xmm2
vbroadcastss 0x1ce71af(%rip), %xmm6 # 0x1f1ff10
vmulps %xmm6, %xmm2, %xmm2
vpmaxsd %xmm5, %xmm12, %xmm5
vpmaxsd %xmm3, %xmm13, %xmm3
vpminsd %xmm3, %xmm5, %xmm3
vpmaxsd %xmm4, %xmm14, %xmm4
vpminsd %xmm4, %xmm3, %xmm4
vpmaxsd 0x2d0(%rsp), %xmm2, %xmm3
vbroadcastss 0x1ce7183(%rip), %xmm5 # 0x1f1ff14
vmulps %xmm5, %xmm4, %xmm4
vpminsd 0x2e0(%rsp), %xmm4, %xmm4
cmpl $0x6, %ecx
je 0x238dfe
vcmpleps %xmm4, %xmm3, %xmm1
vandps %xmm0, %xmm1, %xmm1
vpslld $0x1f, %xmm1, %xmm1
vtestps %xmm1, %xmm1
je 0x238de5
vbroadcastss 0x1cb2c5e(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
cmpq $0x8, %rdx
je 0x238dde
movq %rdx, (%rdi)
addq $0x8, %rdi
vmovaps %xmm15, (%r8)
addq $0x10, %r8
vmovaps %xmm1, %xmm15
movq %rsi, %rdx
cmpq $0x8, %rsi
je 0x238e2e
leaq 0x1(%r9), %rsi
cmpq $0x3, %r9
movq %rsi, %r9
jb 0x238c29
jmp 0x238e2e
vcmpleps %xmm4, %xmm3, %xmm3
vbroadcastss 0xe0(%rax,%r9,4), %xmm4
vcmpleps %xmm1, %xmm4, %xmm4
vbroadcastss 0xf0(%rax,%r9,4), %xmm5
vcmpltps %xmm5, %xmm1, %xmm1
vandps %xmm1, %xmm4, %xmm1
vandps %xmm3, %xmm1, %xmm1
jmp 0x238da9
xorl %eax, %eax
cmpq $0x8, %rdx
je 0x238e71
vmovaps 0x2e0(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %esi
movb $0x1, %cl
cmpq %rsi, %r15
jae 0x238e61
testb %cl, %cl
jne 0x238bf6
jmp 0x23a14c
movq %rdx, (%rdi)
addq $0x8, %rdi
vmovaps %xmm15, (%r8)
addq $0x10, %r8
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x238e54
cmpq $-0x8, %rdx
vpcmpeqd %xmm1, %xmm1, %xmm1
je 0x23a149
vmovaps 0x2e0(%rsp), %xmm0
vcmpnleps %xmm15, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x23a15a
movl %edx, %eax
andl $0xf, %eax
vmovdqa 0x70(%rsp), %xmm0
addq $-0x8, %rax
movq %rax, 0x158(%rsp)
je 0x23a111
andq $-0x10, %rdx
vpxor %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0xd0(%rsp)
xorl %eax, %eax
movq %r12, 0xc0(%rsp)
movq %r13, 0x170(%rsp)
movq %rdi, 0x28(%rsp)
movq %r8, 0x20(%rsp)
movq %rdx, 0x30(%rsp)
movq %rax, 0x160(%rsp)
imulq $0x90, %rax, %rax
vmovq 0x20(%rdx,%rax), %xmm0
vmovq 0x24(%rdx,%rax), %xmm1
vpminub %xmm1, %xmm0, %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vpxor %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
testl %ecx, %ecx
je 0x23a0eb
leaq (%rdx,%rax), %rbx
vbroadcastss 0x80(%rdx,%rax), %xmm0
vmovaps 0x70(%r13), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vbroadcastss 0x84(%rdx,%rax), %xmm1
vmulps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x3f0(%rsp)
movzbl %cl, %eax
movq %rbx, 0x58(%rsp)
movq %rax, 0x168(%rsp)
bsfq %rax, %rax
vmovq 0x20(%rbx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vbroadcastss 0x44(%rbx), %xmm1
vbroadcastss 0x38(%rbx), %xmm2
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm0
vmovq 0x50(%rbx), %xmm3
vpmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vcvtdq2ps %xmm3, %xmm3
vbroadcastss 0x74(%rbx), %xmm4
vbroadcastss 0x68(%rbx), %xmm5
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm5, %xmm3
vmovaps %xmm3, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm3
vsubss %xmm0, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovaps 0x3f0(%rsp), %xmm10
vmulps %xmm3, %xmm10, %xmm3
vaddps %xmm3, %xmm0, %xmm0
vmovq 0x24(%rbx), %xmm3
vpmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm1
vmovq 0x54(%rbx), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vcvtdq2ps %xmm2, %xmm2
vmulps %xmm2, %xmm4, %xmm2
vaddps %xmm2, %xmm5, %xmm2
vmovaps %xmm2, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm2
vsubss %xmm1, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm10, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovq 0x28(%rbx), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vcvtdq2ps %xmm2, %xmm2
vbroadcastss 0x48(%rbx), %xmm3
vbroadcastss 0x3c(%rbx), %xmm4
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm4, %xmm2
vmovaps %xmm2, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm2
vmovq 0x58(%rbx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vbroadcastss 0x78(%rbx), %xmm6
vbroadcastss 0x6c(%rbx), %xmm7
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm7, %xmm5
vmovaps %xmm5, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm5
vsubss %xmm2, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm10, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vmovq 0x2c(%rbx), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vmovaps %xmm3, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm3
vmovq 0x5c(%rbx), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm7, %xmm4
vmovaps %xmm4, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm4
vsubss %xmm3, %xmm4, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm10, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vmovq 0x30(%rbx), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vcvtdq2ps %xmm4, %xmm4
vbroadcastss 0x4c(%rbx), %xmm5
vbroadcastss 0x40(%rbx), %xmm6
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovaps %xmm4, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm4
vmovq 0x60(%rbx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vcvtdq2ps %xmm7, %xmm7
vbroadcastss 0x7c(%rbx), %xmm8
vbroadcastss 0x70(%rbx), %xmm9
vmulps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm9, %xmm7
vmovaps %xmm7, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm7
vsubss %xmm4, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm10, %xmm7
vaddps %xmm7, %xmm4, %xmm4
vmovq 0x34(%rbx), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vcvtdq2ps %xmm7, %xmm7
vmulps %xmm7, %xmm5, %xmm5
vaddps %xmm5, %xmm6, %xmm5
vmovaps %xmm5, 0x420(%rsp)
vbroadcastss 0x420(%rsp,%rax,4), %xmm5
vmovq 0x64(%rbx), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm8, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vmovaps %xmm6, 0x2f0(%rsp)
vmovss 0x2f0(%rsp,%rax,4), %xmm6
vsubss %xmm5, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm10, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vmovaps 0x210(%rsp), %xmm6
vmovaps 0x220(%rsp), %xmm7
vmovaps 0x230(%rsp), %xmm8
vmovaps 0x270(%rsp), %xmm9
vsubps %xmm6, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vsubps %xmm7, %xmm2, %xmm2
vmovaps 0x280(%rsp), %xmm10
vmulps %xmm2, %xmm10, %xmm2
vsubps %xmm8, %xmm4, %xmm4
vmovaps 0x290(%rsp), %xmm11
vmulps %xmm4, %xmm11, %xmm4
vsubps %xmm6, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vsubps %xmm7, %xmm3, %xmm3
vmulps %xmm3, %xmm10, %xmm3
vsubps %xmm8, %xmm5, %xmm5
vmulps %xmm5, %xmm11, %xmm5
vminps %xmm1, %xmm0, %xmm6
vminps %xmm3, %xmm2, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm4, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vbroadcastss 0x1ce6c77(%rip), %xmm7 # 0x1f1ff10
vmulps %xmm7, %xmm6, %xmm6
vmaxps %xmm1, %xmm0, %xmm0
vmaxps %xmm3, %xmm2, %xmm1
vminps %xmm1, %xmm0, %xmm0
vmaxps %xmm5, %xmm4, %xmm1
vminps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ce6c5a(%rip), %xmm1 # 0x1f1ff14
vmulps %xmm1, %xmm0, %xmm0
vmaxps 0x2d0(%rsp), %xmm6, %xmm1
vminps 0x2e0(%rsp), %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
vmovaps 0xd0(%rsp), %xmm1
vtestps %xmm1, %xmm0
vmovaps %xmm1, %xmm0
je 0x23a0c4
movzwl (%rbx,%rax,8), %ecx
movzwl 0x2(%rbx,%rax,8), %esi
movl %esi, 0x4c(%rsp)
movl 0x88(%rbx), %r14d
movl 0x4(%rbx,%rax,8), %eax
movq %rax, 0xc8(%rsp)
vmovaps %xmm0, 0x1d0(%rsp)
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23a0a4
movl %ecx, %esi
movzbl %al, %r9d
movl $0x7fff, %eax # imm = 0x7FFF
andl %eax, %esi
movl 0x4c(%rsp), %r10d
andl %eax, %r10d
xorl %eax, %eax
testw %cx, %cx
setns %al
movq %rax, 0x178(%rsp)
movl %esi, 0x6c(%rsp)
vmovd %esi, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x1ce3634(%rip), %xmm0, %xmm0 # 0x1f1c990
movl %r10d, 0x68(%rsp)
vmovd %r10d, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vpaddd 0x1ce362d(%rip), %xmm1, %xmm1 # 0x1f1c9a0
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vcvtdq2ps %ymm0, %ymm0
vmovaps %ymm0, 0x600(%rsp)
vinsertf128 $0x1, %xmm1, %ymm1, %ymm0
vcvtdq2ps %ymm0, %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vmovd %r14d, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovdqa %xmm0, 0x3e0(%rsp)
vmovss 0xc8(%rsp), %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x3d0(%rsp)
movq %r14, 0x180(%rsp)
movq %r9, 0x40(%rsp)
bsfq %r9, %rcx
movq %rcx, 0x60(%rsp)
movq (%r12), %rax
movq %rax, 0x198(%rsp)
movq 0x1e8(%rax), %rax
movq (%rax,%r14,8), %rax
movq 0x58(%rax), %rsi
movq 0x68(%rax), %rdi
imulq 0xc8(%rsp), %rdi
vbroadcastss 0x2c(%rax), %xmm0
vmovss 0x30(%rax), %xmm1
vbroadcastss 0x28(%rax), %xmm2
vmovaps 0x70(%r13), %xmm3
vsubps %xmm0, %xmm3, %xmm3
vsubss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vdivps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vroundps $0x1, %xmm0, %xmm1
vaddss 0x1cb7591(%rip), %xmm2, %xmm2 # 0x1ef09cc
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vminps %xmm2, %xmm1, %xmm1
vmaxps 0x1cb25c4(%rip), %xmm1, %xmm1 # 0x1eeba10
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x410(%rsp)
vcvtps2dq %xmm1, %xmm0
vmovapd %xmm0, 0x400(%rsp)
movslq 0x400(%rsp,%rcx,4), %rcx
movl (%rsi,%rdi), %edx
addl 0x6c(%rsp), %edx
movq %rsi, 0x190(%rsp)
movq %rdi, 0x188(%rsp)
movl 0x4(%rsi,%rdi), %edi
movl %edi, %r11d
imull 0x68(%rsp), %r11d
addl %edx, %r11d
leaq 0x1(%r11), %rdx
movq 0xe0(%rax), %rsi
movq %rsi, 0x100(%rsp)
imulq $0x38, %rcx, %rcx
movq %rcx, 0x80(%rsp)
movq (%rsi,%rcx), %r9
movq 0x10(%rsi,%rcx), %r10
movq %r10, %rax
imulq %r11, %rax
movq 0x48(%rsi,%rcx), %rsi
vmovups (%r9,%rax), %xmm2
movq %rsi, %r8
imulq %rdx, %r8
imulq %r10, %rdx
vmovups (%r9,%rdx), %xmm0
leaq (%r11,%rdi), %r15
movq %r15, %rax
imulq %r10, %rax
vmovups (%r9,%rax), %xmm1
vmovaps %xmm1, 0xe0(%rsp)
leaq (%r11,%rdi), %r14
incq %r14
movq %r14, %rax
imulq %r10, %rax
vmovups (%r9,%rax), %xmm1
movq %rsi, %rdx
imulq %r11, %rdx
movq 0x178(%rsp), %rcx
leaq (%r11,%rcx), %rax
incq %rax
movq %rsi, %r11
imulq %rax, %r11
imulq %r10, %rax
vmovups (%r9,%rax), %xmm4
leaq (%r14,%rcx), %rbx
movq %rbx, %rax
imulq %r10, %rax
vmovups (%r9,%rax), %xmm5
movq %rsi, %r13
imulq %r15, %r13
movq %rsi, %rax
imulq %r14, %rax
movq %rsi, %rcx
imulq %rbx, %rcx
cmpw $0x0, 0x4c(%rsp)
movl $0x0, %r12d
cmovnsq %rdi, %r12
addq %r12, %r15
movq %rsi, %rdi
imulq %r15, %rdi
imulq %r10, %r15
vmovups (%r9,%r15), %xmm6
addq %r12, %r14
movq %rsi, %r15
imulq %r14, %r15
imulq %r10, %r14
vmovups (%r9,%r14), %xmm7
movq 0x180(%rsp), %r14
addq %rbx, %r12
imulq %r12, %r10
vmovups (%r9,%r10), %xmm8
movq 0x80(%rsp), %r9
movq 0x100(%rsp), %r10
movq 0x38(%r10,%r9), %r9
vmovups (%r9,%rdx), %xmm9
vmovups (%r9,%r8), %xmm10
vmovups (%r9,%r13), %xmm11
movq 0x170(%rsp), %r13
vmovups (%r9,%rax), %xmm12
vmovups (%r9,%r11), %xmm13
vmovups (%r9,%rcx), %xmm14
movq 0x60(%rsp), %rax
vbroadcastss 0x410(%rsp,%rax,4), %xmm15
vsubps %xmm2, %xmm9, %xmm9
vmulps %xmm9, %xmm15, %xmm9
vaddps %xmm2, %xmm9, %xmm2
vmovups (%r9,%rdi), %xmm3
vsubps %xmm0, %xmm10, %xmm9
vmulps %xmm9, %xmm15, %xmm9
vaddps %xmm0, %xmm9, %xmm9
vmovups (%r9,%r15), %xmm10
imulq %rsi, %r12
movq %rax, %rsi
vmovaps 0xe0(%rsp), %xmm0
vsubps %xmm0, %xmm11, %xmm11
vmulps %xmm11, %xmm15, %xmm11
vaddps %xmm0, %xmm11, %xmm0
vmovups (%r9,%r12), %xmm11
vsubps %xmm1, %xmm12, %xmm12
vmulps %xmm12, %xmm15, %xmm12
vaddps %xmm1, %xmm12, %xmm1
vsubps %xmm4, %xmm13, %xmm12
vmulps %xmm12, %xmm15, %xmm12
vaddps %xmm4, %xmm12, %xmm4
vsubps %xmm5, %xmm14, %xmm12
vmulps %xmm12, %xmm15, %xmm12
vaddps %xmm5, %xmm12, %xmm5
vsubps %xmm6, %xmm3, %xmm3
vmulps %xmm3, %xmm15, %xmm3
vaddps %xmm3, %xmm6, %xmm3
vsubps %xmm7, %xmm10, %xmm6
vmulps %xmm6, %xmm15, %xmm6
vaddps %xmm6, %xmm7, %xmm6
vsubps %xmm8, %xmm11, %xmm7
vmulps %xmm7, %xmm15, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vunpcklps %xmm1, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vunpckhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
vunpcklps %xmm0, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm0[0],xmm9[1],xmm0[1]
vunpckhps %xmm0, %xmm9, %xmm11 # xmm11 = xmm9[2],xmm0[2],xmm9[3],xmm0[3]
vunpcklps %xmm11, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm11[0],xmm2[1],xmm11[1]
vunpcklps %xmm10, %xmm8, %xmm11 # xmm11 = xmm8[0],xmm10[0],xmm8[1],xmm10[1]
vunpckhps %xmm10, %xmm8, %xmm8 # xmm8 = xmm8[2],xmm10[2],xmm8[3],xmm10[3]
vunpcklps %xmm5, %xmm9, %xmm10 # xmm10 = xmm9[0],xmm5[0],xmm9[1],xmm5[1]
vunpckhps %xmm5, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm5[2],xmm9[3],xmm5[3]
vunpcklps %xmm1, %xmm4, %xmm12 # xmm12 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vunpckhps %xmm1, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
vunpcklps %xmm4, %xmm9, %xmm4 # xmm4 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
vunpcklps %xmm12, %xmm10, %xmm9 # xmm9 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
vunpckhps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
vunpcklps %xmm7, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
vunpckhps %xmm7, %xmm1, %xmm7 # xmm7 = xmm1[2],xmm7[2],xmm1[3],xmm7[3]
vunpcklps %xmm6, %xmm5, %xmm13 # xmm13 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vunpcklps %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
vunpcklps %xmm13, %xmm12, %xmm7 # xmm7 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vunpckhps %xmm13, %xmm12, %xmm12 # xmm12 = xmm12[2],xmm13[2],xmm12[3],xmm13[3]
vunpcklps %xmm6, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
vunpckhps %xmm6, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
vunpcklps %xmm3, %xmm1, %xmm6 # xmm6 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpckhps %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vunpcklps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm6, %xmm13, %xmm1 # xmm1 = xmm13[0],xmm6[0],xmm13[1],xmm6[1]
vunpckhps %xmm6, %xmm13, %xmm3 # xmm3 = xmm13[2],xmm6[2],xmm13[3],xmm6[3]
vinsertf128 $0x1, %xmm7, %ymm11, %ymm6
vinsertf128 $0x1, %xmm12, %ymm8, %ymm7
vinsertf128 $0x1, %xmm5, %ymm2, %ymm2
vinsertf128 $0x1, %xmm9, %ymm9, %ymm8
vinsertf128 $0x1, %xmm10, %ymm10, %ymm9
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vinsertf128 $0x1, %xmm1, %ymm1, %ymm10
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vinsertf128 $0x1, %xmm0, %ymm0, %ymm15
leaq 0x1f(%rsp), %rax
movq %rax, 0x480(%rsp)
vbroadcastss (%r13,%rsi,4), %ymm12
vbroadcastss 0x10(%r13,%rsi,4), %ymm13
vbroadcastss 0x20(%r13,%rsi,4), %ymm14
vsubps %ymm12, %ymm6, %ymm1
vsubps %ymm13, %ymm7, %ymm5
vsubps %ymm14, %ymm2, %ymm0
vsubps %ymm12, %ymm8, %ymm2
vmovaps %ymm2, 0x100(%rsp)
vsubps %ymm13, %ymm9, %ymm11
vsubps %ymm14, %ymm4, %ymm2
vmovaps %ymm2, 0x1a0(%rsp)
vsubps %ymm12, %ymm10, %ymm4
vsubps %ymm13, %ymm3, %ymm8
vsubps %ymm14, %ymm15, %ymm3
vmovaps %ymm3, 0xe0(%rsp)
vsubps %ymm1, %ymm4, %ymm12
vsubps %ymm5, %ymm8, %ymm14
vsubps %ymm0, %ymm3, %ymm2
vaddps %ymm5, %ymm8, %ymm6
vaddps %ymm0, %ymm3, %ymm7
vmulps %ymm2, %ymm6, %ymm9
vmulps %ymm7, %ymm14, %ymm10
vsubps %ymm9, %ymm10, %ymm15
vaddps %ymm1, %ymm4, %ymm9
vmulps %ymm7, %ymm12, %ymm7
vmovaps %ymm2, 0x660(%rsp)
vmulps %ymm2, %ymm9, %ymm10
vsubps %ymm7, %ymm10, %ymm7
vmovaps %ymm14, 0x680(%rsp)
vmulps %ymm14, %ymm9, %ymm9
vmovaps %ymm12, 0x6a0(%rsp)
vmulps %ymm6, %ymm12, %ymm6
vsubps %ymm9, %ymm6, %ymm6
vbroadcastss 0x50(%r13,%rsi,4), %ymm13
vbroadcastss 0x60(%r13,%rsi,4), %ymm2
vmulps %ymm2, %ymm6, %ymm6
vmulps %ymm7, %ymm13, %ymm7
vbroadcastss 0x40(%r13,%rsi,4), %ymm3
vmovaps %ymm3, 0x80(%rsp)
vaddps %ymm7, %ymm6, %ymm6
vmulps %ymm3, %ymm15, %ymm7
vaddps %ymm6, %ymm7, %ymm10
vmovaps %ymm11, %ymm3
vsubps %ymm11, %ymm5, %ymm11
vmovaps 0x1a0(%rsp), %ymm9
vsubps %ymm9, %ymm0, %ymm15
vmovaps %ymm5, 0x380(%rsp)
vaddps %ymm3, %ymm5, %ymm6
vmovaps %ymm0, 0x6c0(%rsp)
vaddps %ymm0, %ymm9, %ymm7
vmovaps %ymm9, %ymm0
vmulps %ymm6, %ymm15, %ymm14
vmulps %ymm7, %ymm11, %ymm5
vsubps %ymm14, %ymm5, %ymm5
vmovaps 0x100(%rsp), %ymm12
vsubps %ymm12, %ymm1, %ymm14
vmulps %ymm7, %ymm14, %ymm7
vmovaps %ymm1, 0x3a0(%rsp)
vaddps %ymm1, %ymm12, %ymm9
vmovaps %ymm12, %ymm1
vmulps %ymm15, %ymm9, %ymm12
vsubps %ymm7, %ymm12, %ymm7
vmovaps %ymm11, %ymm12
vmulps %ymm11, %ymm9, %ymm9
vmulps %ymm6, %ymm14, %ymm6
vsubps %ymm9, %ymm6, %ymm6
vmulps %ymm2, %ymm6, %ymm6
vmulps %ymm7, %ymm13, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovaps 0x80(%rsp), %ymm9
vmulps %ymm5, %ymm9, %ymm5
vaddps %ymm6, %ymm5, %ymm6
vsubps %ymm4, %ymm1, %ymm7
vaddps %ymm4, %ymm1, %ymm4
vsubps %ymm8, %ymm3, %ymm11
vaddps %ymm3, %ymm8, %ymm1
vmovaps 0xe0(%rsp), %ymm3
vsubps %ymm3, %ymm0, %ymm8
vaddps %ymm3, %ymm0, %ymm0
vmulps %ymm1, %ymm8, %ymm3
vmulps %ymm0, %ymm11, %ymm5
vsubps %ymm3, %ymm5, %ymm3
vmulps %ymm0, %ymm7, %ymm0
vmulps %ymm4, %ymm8, %ymm5
vsubps %ymm0, %ymm5, %ymm0
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm1, %ymm7, %ymm1
vsubps %ymm4, %ymm1, %ymm1
vmovaps %ymm2, 0xe0(%rsp)
vmulps %ymm2, %ymm1, %ymm1
vmovaps %ymm13, 0x100(%rsp)
vmulps %ymm0, %ymm13, %ymm0
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm3, %ymm9, %ymm1
vaddps %ymm0, %ymm1, %ymm1
vaddps %ymm6, %ymm10, %ymm0
vaddps %ymm0, %ymm1, %ymm9
vminps %ymm6, %ymm10, %ymm3
vminps %ymm1, %ymm3, %ymm3
vbroadcastss 0x1ce7571(%rip), %ymm4 # 0x1f20ec4
vandps %ymm4, %ymm9, %ymm4
vbroadcastss 0x1ce756c(%rip), %ymm0 # 0x1f20ecc
vmovaps %ymm4, 0x5c0(%rsp)
vmulps %ymm0, %ymm4, %ymm4
vbroadcastss 0x1ce754a(%rip), %ymm0 # 0x1f20ec0
vxorps %ymm0, %ymm4, %ymm5
vcmpnltps %ymm5, %ymm3, %ymm3
vmovaps %ymm10, 0x640(%rsp)
vmovaps %ymm6, 0x620(%rsp)
vmaxps %ymm6, %ymm10, %ymm5
vmaxps %ymm1, %ymm5, %ymm1
vcmpleps %ymm4, %ymm1, %ymm1
vorps %ymm1, %ymm3, %ymm3
vtestps %ymm3, %ymm3
je 0x239d03
vmovaps 0x660(%rsp), %ymm2
vmovaps %ymm11, %ymm6
vmovaps %ymm12, %ymm11
vmulps %ymm2, %ymm12, %ymm1
vmovaps 0x680(%rsp), %ymm13
vmulps %ymm15, %ymm13, %ymm4
vsubps %ymm1, %ymm4, %ymm4
vmulps %ymm6, %ymm15, %ymm5
vmovaps %ymm9, 0x1a0(%rsp)
vmulps %ymm8, %ymm12, %ymm9
vsubps %ymm5, %ymm9, %ymm9
vbroadcastss 0x1ce74d0(%rip), %ymm0 # 0x1f20ec4
vandps %ymm0, %ymm1, %ymm1
vandps %ymm0, %ymm5, %ymm5
vcmpltps %ymm5, %ymm1, %ymm1
vblendvps %ymm1, %ymm4, %ymm9, %ymm1
vmulps %ymm8, %ymm14, %ymm4
vmulps %ymm2, %ymm14, %ymm5
vmovaps 0x6a0(%rsp), %ymm10
vmulps %ymm15, %ymm10, %ymm8
vsubps %ymm8, %ymm5, %ymm5
vmulps %ymm7, %ymm15, %ymm9
vsubps %ymm4, %ymm9, %ymm9
vandps %ymm0, %ymm8, %ymm8
vandps %ymm0, %ymm4, %ymm4
vcmpltps %ymm4, %ymm8, %ymm4
vblendvps %ymm4, %ymm5, %ymm9, %ymm12
vmovaps 0x1a0(%rsp), %ymm9
vmulps %ymm7, %ymm11, %ymm4
vmulps %ymm11, %ymm10, %ymm5
vmulps %ymm13, %ymm14, %ymm7
vmulps %ymm6, %ymm14, %ymm2
vsubps %ymm7, %ymm5, %ymm5
vsubps %ymm4, %ymm2, %ymm2
vandps %ymm0, %ymm7, %ymm7
vandps %ymm0, %ymm4, %ymm4
vcmpltps %ymm4, %ymm7, %ymm4
vblendvps %ymm4, %ymm5, %ymm2, %ymm2
vextractf128 $0x1, %ymm3, %xmm4
vpackssdw %xmm4, %xmm3, %xmm5
vmulps 0xe0(%rsp), %ymm2, %ymm3
vmulps 0x100(%rsp), %ymm12, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vmulps 0x80(%rsp), %ymm1, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm3, %ymm4
vmulps 0x6c0(%rsp), %ymm2, %ymm3
vmulps 0x380(%rsp), %ymm12, %ymm7
vaddps %ymm7, %ymm3, %ymm3
vmulps 0x3a0(%rsp), %ymm1, %ymm7
vaddps %ymm3, %ymm7, %ymm3
vaddps %ymm3, %ymm3, %ymm3
vrcpps %ymm4, %ymm7
vmulps %ymm7, %ymm4, %ymm8
vbroadcastss 0x1cb2c37(%rip), %ymm0 # 0x1eec714
vsubps %ymm8, %ymm0, %ymm8
vmulps %ymm7, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmulps %ymm7, %ymm3, %ymm3
vbroadcastss 0x30(%r13,%rsi,4), %ymm7
vcmpleps %ymm3, %ymm7, %ymm7
vbroadcastss 0x80(%r13,%rsi,4), %ymm8
vcmpleps %ymm8, %ymm3, %ymm8
vandps %ymm7, %ymm8, %ymm7
vextractf128 $0x1, %ymm7, %xmm8
vpackssdw %xmm8, %xmm7, %xmm7
vpand %xmm5, %xmm7, %xmm5
vpmovsxwd %xmm5, %xmm7
vpshufd $0xee, %xmm5, %xmm8 # xmm8 = xmm5[2,3,2,3]
vpmovsxwd %xmm8, %xmm8
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vtestps %ymm7, %ymm7
je 0x239d03
vcmpneqps 0x1ce73ba(%rip), %ymm4, %ymm4 # 0x1f20f00
vextractf128 $0x1, %ymm4, %xmm7
vpackssdw %xmm7, %xmm4, %xmm4
vpand %xmm4, %xmm5, %xmm4
vpmovsxwd %xmm4, %xmm5
vpunpckhwd %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[4,4,5,5,6,6,7,7]
vinsertf128 $0x1, %xmm4, %ymm5, %ymm4
vtestps %ymm4, %ymm4
je 0x239d03
vmovaps %ymm9, 0x460(%rsp)
leaq 0x1f(%rsp), %rax
movq %rax, 0x480(%rsp)
vmovaps %ymm4, 0x4a0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps 0x620(%rsp), %ymm6
vsubps %ymm6, %ymm9, %ymm3
vmovaps 0x5a0(%rsp), %ymm5
vmovaps 0x640(%rsp), %ymm0
vblendvps %ymm5, %ymm3, %ymm0, %ymm4
vsubps %ymm0, %ymm9, %ymm3
vblendvps %ymm5, %ymm3, %ymm6, %ymm3
vmovaps %ymm4, 0x420(%rsp)
vmovaps %ymm3, 0x440(%rsp)
vmovaps 0x580(%rsp), %ymm0
vmulps %ymm1, %ymm0, %ymm1
vmovaps %ymm1, 0x520(%rsp)
vmulps %ymm0, %ymm12, %ymm1
vmovaps %ymm1, 0x540(%rsp)
vmulps %ymm2, %ymm0, %ymm1
vmovaps %ymm1, 0x560(%rsp)
movq 0x190(%rsp), %rcx
movq 0x188(%rsp), %rdx
movzwl 0x8(%rcx,%rdx), %eax
decl %eax
vcvtsi2ss %eax, %xmm14, %xmm1
vrcpss %xmm1, %xmm1, %xmm2
vmulss %xmm1, %xmm2, %xmm1
vmovss 0x1cb73c6(%rip), %xmm0 # 0x1ef0ff8
vsubss %xmm1, %xmm0, %xmm1
vmulss %xmm1, %xmm2, %xmm1
movzwl 0xa(%rcx,%rdx), %eax
decl %eax
vcvtsi2ss %eax, %xmm14, %xmm2
vrcpss %xmm2, %xmm2, %xmm5
vmulss %xmm2, %xmm5, %xmm2
vsubss %xmm2, %xmm0, %xmm2
vmulss %xmm2, %xmm5, %xmm2
vmulps 0x600(%rsp), %ymm9, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmulps %ymm1, %ymm4, %ymm1
vmovaps %ymm1, 0x420(%rsp)
vmulps 0x5e0(%rsp), %ymm9, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmulps %ymm2, %ymm3, %ymm2
vmovaps %ymm2, 0x440(%rsp)
movq 0x198(%rsp), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r14,8), %r10
movl 0x90(%r13,%rsi,4), %eax
testl %eax, 0x34(%r10)
je 0x239d03
movq 0xc0(%rsp), %r12
movq 0x10(%r12), %rax
cmpq $0x0, 0x10(%rax)
jne 0x239d3b
cmpq $0x0, 0x48(%r10)
jne 0x239d3b
movq 0x50(%rsp), %r15
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r8
movq 0x30(%rsp), %rdx
movq 0x58(%rsp), %rbx
movq 0x40(%rsp), %r9
andl $0x0, 0x1d0(%rsp,%rsi,4)
jmp 0x239d29
movq 0xc0(%rsp), %r12
movq 0x50(%rsp), %r15
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r8
movq 0x30(%rsp), %rdx
movq 0x58(%rsp), %rbx
movq 0x40(%rsp), %r9
leaq -0x1(%r9), %rax
andq %rax, %r9
jne 0x2393cb
jmp 0x23a0a4
vrcpps %ymm9, %ymm3
vmulps %ymm3, %ymm9, %ymm0
vbroadcastss 0x1cb29c7(%rip), %ymm4 # 0x1eec714
vsubps %ymm0, %ymm4, %ymm0
vmulps %ymm0, %ymm3, %ymm0
vaddps %ymm0, %ymm3, %ymm0
vbroadcastss 0x1cb7286(%rip), %ymm3 # 0x1ef0fe8
vmovaps 0x5c0(%rsp), %ymm5
vcmpnltps %ymm3, %ymm5, %ymm3
vandps %ymm0, %ymm3, %ymm0
vmulps %ymm0, %ymm1, %ymm1
vminps %ymm4, %ymm1, %ymm1
vmovaps %ymm1, 0x4c0(%rsp)
vmulps %ymm0, %ymm2, %ymm0
vminps %ymm4, %ymm0, %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovmskps %ymm0, %r11d
bsfq %r11, %rcx
testl %r11d, %r11d
setne 0x1a0(%rsp)
movq 0x50(%rsp), %r15
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r8
movq 0x30(%rsp), %rdx
movq 0x58(%rsp), %rbx
movq 0x40(%rsp), %r9
je 0x23a091
pushq $0x1
popq %rax
movq %rcx, 0x80(%rsp)
movl %esi, %ecx
shll %cl, %eax
cltq
shlq $0x4, %rax
leaq 0x1f1618e(%rip), %rcx # 0x214ff80
vmovaps (%rcx,%rax), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
movq 0x80(%rsp), %rcx
movq %r10, 0x380(%rsp)
vmovss 0x80(%r13,%rsi,4), %xmm5
vmovss 0x500(%rsp,%rcx,4), %xmm0
vbroadcastss 0x4c0(%rsp,%rcx,4), %xmm1
vbroadcastss 0x4e0(%rsp,%rcx,4), %xmm2
vmovss %xmm0, 0x80(%r13,%rsi,4)
movq 0x8(%r12), %rax
vbroadcastss 0x520(%rsp,%rcx,4), %xmm0
vbroadcastss 0x540(%rsp,%rcx,4), %xmm3
movq %rcx, 0x80(%rsp)
vbroadcastss 0x560(%rsp,%rcx,4), %xmm4
vmovaps %xmm0, 0x2f0(%rsp)
vmovaps %xmm3, 0x300(%rsp)
vmovaps %xmm4, 0x310(%rsp)
vmovaps %xmm1, 0x320(%rsp)
vmovaps %xmm2, 0x330(%rsp)
vmovaps 0x3d0(%rsp), %xmm0
vmovaps %xmm0, 0x340(%rsp)
vmovaps 0x3e0(%rsp), %xmm0
vmovaps %xmm0, 0x350(%rsp)
leaq 0x360(%rsp), %rcx
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
leaq 0x130(%rsp), %rcx
movq %rcx, 0x1e0(%rsp)
movq 0x18(%r10), %rcx
movq %rcx, 0x1e8(%rsp)
movq %rax, 0x1f0(%rsp)
movq %r13, 0x1f8(%rsp)
leaq 0x2f0(%rsp), %rax
movq %rax, 0x200(%rsp)
movl $0x4, 0x208(%rsp)
movq 0x48(%r10), %rax
testq %rax, %rax
movq %r11, 0x100(%rsp)
vmovss %xmm5, 0xe0(%rsp)
je 0x239f9d
leaq 0x1e0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xe0(%rsp), %xmm5
movq 0x100(%rsp), %r11
movq 0x380(%rsp), %r10
movq 0x60(%rsp), %rsi
movq 0x40(%rsp), %r9
movq 0x30(%rsp), %rdx
movq 0x20(%rsp), %r8
movq 0x28(%rsp), %rdi
vmovdqa 0x130(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x23a044
movq 0x10(%r12), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x23a00a
testb $0x2, (%rcx)
jne 0x239fcb
testb $0x40, 0x3e(%r10)
je 0x23a00a
leaq 0x1e0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xe0(%rsp), %xmm5
movq 0x100(%rsp), %r11
movq 0x380(%rsp), %r10
movq 0x60(%rsp), %rsi
movq 0x40(%rsp), %r9
movq 0x30(%rsp), %rdx
movq 0x20(%rsp), %r8
movq 0x28(%rsp), %rdi
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x130(%rsp), %xmm0, %xmm1
vpxor 0x1cb1e01(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x1f8(%rsp), %rax
vbroadcastss 0x1cb2b54(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x23a054
vpcmpeqd 0x1cb19c4(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cb1dcc(%rip), %xmm0, %xmm0 # 0x1eebe20
vmovddup 0x1ce6e8c(%rip), %xmm1 # xmm1 = mem[0,0]
vptest %xmm1, %xmm0
movq 0x80(%rsp), %rcx
jne 0x23a091
movq 0xb8(%rsp), %rax
vmovss %xmm5, (%rax,%rsi,4)
btcq %rcx, %r11
bsfq %r11, %rcx
testq %r11, %r11
setne 0x1a0(%rsp)
jne 0x239e10
testb $0x1, 0x1a0(%rsp)
jne 0x239cf9
jmp 0x239d29
vmovaps 0x1d0(%rsp), %xmm0
vmovaps 0xd0(%rsp), %xmm1
vtestps %xmm1, %xmm0
vandps %xmm1, %xmm0, %xmm0
sete %al
jmp 0x23a0c6
xorl %eax, %eax
vmovaps %xmm0, 0xd0(%rsp)
testb %al, %al
jne 0x23a0eb
movq 0x168(%rsp), %rcx
leaq -0x1(%rcx), %rax
andq %rax, %rcx
movq %rcx, %rax
jne 0x238f6b
movq 0x160(%rsp), %rax
incq %rax
cmpq 0x158(%rsp), %rax
jne 0x238eef
vpcmpeqd %xmm1, %xmm1, %xmm1
vpxor 0xd0(%rsp), %xmm1, %xmm0
vpor 0x70(%rsp), %xmm0, %xmm0
vmovdqa %xmm0, 0x70(%rsp)
vtestps %xmm1, %xmm0
jb 0x23a149
vmovaps 0x2e0(%rsp), %xmm1
vbroadcastss 0x1cb2a4e(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x2e0(%rsp)
xorl %eax, %eax
jmp 0x23a14c
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x238ba9
jmp 0x23a224
pushq $0x2
jmp 0x23a14b
vmovaps %xmm15, 0x80(%rsp)
movq %r8, 0x20(%rsp)
movq %rdi, 0x28(%rsp)
movq %rdx, 0x30(%rsp)
bsfq %rbx, %r15
movq 0x150(%rsp), %rdi
movq 0x148(%rsp), %rsi
movq %r15, %rcx
leaq 0x3f(%rsp), %r8
movq %r13, %r9
pushq %r12
leaq 0x218(%rsp), %rax
pushq %rax
vzeroupper
callq 0x28632c
popq %rcx
popq %rdx
testb %al, %al
je 0x23a1b4
orl $-0x1, 0x70(%rsp,%r15,4)
leaq -0x1(%rbx), %rax
andq %rax, %rbx
movq 0x30(%rsp), %rdx
jne 0x23a176
vmovaps 0x70(%rsp), %xmm0
vtestps 0x1cb1c4f(%rip), %xmm0 # 0x1eebe20
pushq $0x3
popq %rax
movq 0x50(%rsp), %r15
movq 0x28(%rsp), %rdi
movq 0x20(%rsp), %r8
vmovaps 0x80(%rsp), %xmm15
jb 0x238bed
vmovaps 0x2e0(%rsp), %xmm1
vbroadcastss 0x1cb2980(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps 0x80(%rsp), %xmm15
vmovaps %xmm0, 0x2e0(%rsp)
pushq $0x2
popq %rax
jmp 0x238bed
vmovaps 0x3c0(%rsp), %xmm0
vandps 0x70(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cb2948(%rip), %xmm1 # 0x1eecb84
movq 0xb8(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::SubGridIntersectorKPluecker<4, 4, true>, true>::intersect(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xf8, %rsp
movq (%rsi), %r13
cmpq $0x8, 0x70(%r13)
je 0x23a43f
movq %rcx, %r15
movq %rdx, %r14
movq %rsi, %r12
cmpq $0x0, 0x8(%rcx)
je 0x23a299
movq 0x10(%r15), %rax
testb $0x1, 0x2(%rax)
jne 0x23a451
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23a43f
movzbl %al, %ebp
vmovaps (%r14), %xmm1
leaq 0x10(%rsp), %rax
vmovaps %xmm1, (%rax)
vmovaps 0x10(%r14), %xmm1
vmovaps %xmm1, 0x10(%rax)
vmovaps 0x20(%r14), %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps 0x40(%r14), %xmm1
vmovaps %xmm1, 0x30(%rax)
vmovaps 0x50(%r14), %xmm2
vmovaps %xmm2, 0x40(%rax)
vmovaps 0x60(%r14), %xmm3
vmovaps %xmm3, 0x50(%rax)
vbroadcastss 0x1ce6bc6(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm1, %xmm5
vbroadcastss 0x1cb6cdd(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1cb23fb(%rip), %xmm7 # 0x1eec714
vdivps %xmm1, %xmm7, %xmm1
vandps %xmm4, %xmm2, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vdivps %xmm2, %xmm7, %xmm2
vandps %xmm4, %xmm3, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vbroadcastss 0x1ce6c24(%rip), %xmm6 # 0x1f20f60
vblendvps %xmm5, %xmm6, %xmm1, %xmm1
vblendvps %xmm8, %xmm6, %xmm2, %xmm2
vdivps %xmm3, %xmm7, %xmm3
vblendvps %xmm4, %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x60(%rax)
vmovaps %xmm2, 0x70(%rax)
vmovaps %xmm3, 0x80(%rax)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %xmm4, %xmm1, %xmm1
vbroadcastss 0x1d205ee(%rip), %xmm5 # 0x1f5a964
vandps %xmm5, %xmm1, %xmm1
vmovaps %xmm1, 0x90(%rax)
vcmpnltps %xmm4, %xmm2, %xmm1
vbroadcastss 0x1d1a610(%rip), %xmm2 # 0x1f549a0
vbroadcastss 0x1d205cf(%rip), %xmm5 # 0x1f5a968
vblendvps %xmm1, %xmm2, %xmm5, %xmm1
vmovaps %xmm1, 0xa0(%rax)
vcmpnltps %xmm4, %xmm3, %xmm1
vbroadcastss 0x1d205b7(%rip), %xmm2 # 0x1f5a96c
vbroadcastss 0x1d205b2(%rip), %xmm3 # 0x1f5a970
vblendvps %xmm1, %xmm2, %xmm3, %xmm1
vmovaps %xmm1, 0xb0(%rax)
vmovaps 0x30(%r14), %xmm1
vmovaps 0x80(%r14), %xmm2
vmaxps %xmm4, %xmm1, %xmm1
vmaxps %xmm4, %xmm2, %xmm2
vbroadcastss 0x1cb1634(%rip), %xmm3 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm3, %xmm1
vmovaps %xmm1, 0xc0(%rax)
vbroadcastss 0x1cb2781(%rip), %xmm1 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0xd0(%rax)
bsfq %rbp, %rcx
leaq -0x1(%rbp), %rbx
movq 0x70(%r13), %rdx
movq %r12, %rdi
movq %r13, %rsi
leaq 0xf(%rsp), %r8
movq %r14, %r9
pushq %r15
leaq 0x18(%rsp), %rax
pushq %rax
callq 0x289aa2
popq %rax
popq %rcx
andq %rbx, %rbp
jne 0x23a411
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %r12, %rsi
movq %r14, %rdx
movq %r15, %rcx
addq $0xf8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x2879ca
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, true, embree::avx::SubGridIntersectorKPluecker<4, 4, true>, true>::occluded(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayK<K>& __restrict__ ray,
RayQueryContext* context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
occludedCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out already occluded and invalid rays */
vbool<K> valid = (*valid_i == -1) & (ray.tfar >= 0.0f);
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
const size_t valid_bits = movemask(valid);
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
vbool<K> terminated = !valid;
const vfloat<K> inf = vfloat<K>(pos_inf);
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
if (occluded1(This, bvh, cur, i, pre, ray, tray, context))
set(terminated, i);
}
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(shadow.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we push the previously hit node onto the stack, and continue with the currently hit child */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push 'cur' node onto stack and continue with hit child */
if (likely(cur != BVH::emptyNode)) {
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
}
if (unlikely(cur == BVH::emptyNode))
goto pop;
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(shadow.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*) cur.leaf(items);
size_t lazy_node = 0;
terminated |= PrimitiveIntersectorK::occluded(!terminated, This, pre, ray, context, prim, items, tray, lazy_node);
if (all(terminated)) break;
tray.tfar = select(terminated, vfloat<K>(neg_inf), tray.tfar); // ignore node intersections for terminated rays
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
vfloat<K>::store(valid & terminated, &ray.tfar, neg_inf);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1c58, %rsp # imm = 0x1C58
movq (%rsi), %rax
movq %rax, 0x178(%rsp)
cmpq $0x8, 0x70(%rax)
je 0x23c19c
movq %rcx, %r10
movq %rdx, %r9
movq 0x8(%rcx), %rax
testq %rax, %rax
je 0x23a4b4
movq 0x10(%r10), %rcx
testb $0x1, 0x2(%rcx)
jne 0x23c1b1
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm3
vmovaps 0x80(%r9), %xmm1
vxorps %xmm2, %xmm2, %xmm2
vcmpnltps %xmm2, %xmm1, %xmm4
vtestps %xmm3, %xmm4
je 0x23c19c
vandps %xmm3, %xmm4, %xmm10
vmovaps (%r9), %xmm3
vmovaps %xmm3, 0x270(%rsp)
vmovaps 0x10(%r9), %xmm3
vmovaps %xmm3, 0x280(%rsp)
vmovaps 0x20(%r9), %xmm3
vmovaps %xmm3, 0x290(%rsp)
vmovaps 0x40(%r9), %xmm3
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps 0x50(%r9), %xmm4
vmovaps %xmm4, 0x2b0(%rsp)
vmovaps 0x60(%r9), %xmm5
vmovaps %xmm5, 0x2c0(%rsp)
vbroadcastss 0x1ce6985(%rip), %xmm8 # 0x1f20ec4
vandps %xmm3, %xmm8, %xmm6
vbroadcastss 0x1cb6a9c(%rip), %xmm9 # 0x1ef0fe8
vcmpltps %xmm9, %xmm6, %xmm6
vbroadcastss 0x1cb21b9(%rip), %xmm11 # 0x1eec714
vdivps %xmm3, %xmm11, %xmm3
vandps %xmm4, %xmm8, %xmm7
vcmpltps %xmm9, %xmm7, %xmm7
vdivps %xmm4, %xmm11, %xmm4
vandps %xmm5, %xmm8, %xmm8
vcmpltps %xmm9, %xmm8, %xmm8
vbroadcastss 0x1ce69e0(%rip), %xmm9 # 0x1f20f60
vblendvps %xmm6, %xmm9, %xmm3, %xmm3
vblendvps %xmm7, %xmm9, %xmm4, %xmm4
vdivps %xmm5, %xmm11, %xmm5
vblendvps %xmm8, %xmm9, %xmm5, %xmm5
vmovaps %xmm3, 0x2d0(%rsp)
vmovaps %xmm4, 0x2e0(%rsp)
vmovaps %xmm5, 0x2f0(%rsp)
vcmpltps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1d203a5(%rip), %xmm6 # 0x1f5a964
vandps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0x300(%rsp)
vcmpnltps %xmm2, %xmm4, %xmm3
vbroadcastss 0x1d1a3c6(%rip), %xmm4 # 0x1f549a0
vbroadcastss 0x1d20385(%rip), %xmm6 # 0x1f5a968
vblendvps %xmm3, %xmm4, %xmm6, %xmm3
vcmpnltps %xmm2, %xmm5, %xmm4
vbroadcastss 0x1d20375(%rip), %xmm5 # 0x1f5a96c
vbroadcastss 0x1d20370(%rip), %xmm6 # 0x1f5a970
vblendvps %xmm4, %xmm5, %xmm6, %xmm4
vmovaps %xmm3, 0x310(%rsp)
vmovaps %xmm4, 0x320(%rsp)
vmovaps 0x30(%r9), %xmm3
vmaxps %xmm2, %xmm3, %xmm3
vmaxps %xmm2, %xmm1, %xmm2
vbroadcastss 0x1cb13f1(%rip), %xmm11 # 0x1eeba20
vblendvps %xmm10, %xmm3, %xmm11, %xmm1
vmovaps %xmm1, 0x330(%rsp)
vbroadcastss 0x1cb253d(%rip), %xmm3 # 0x1eecb84
vblendvps %xmm10, %xmm2, %xmm3, %xmm2
vmovaps %xmm2, 0x340(%rsp)
vmovaps %xmm10, 0x3e0(%rsp)
vxorps %xmm0, %xmm10, %xmm0
vmovaps %xmm0, 0x130(%rsp)
testq %rax, %rax
movq %rsi, 0x240(%rsp)
je 0x23a68d
movq 0x10(%r10), %rax
movb 0x2(%rax), %al
andb $0x1, %al
xorl %r13d, %r13d
cmpb $0x1, %al
adcq $0x2, %r13
jmp 0x23a691
pushq $0x3
popq %r13
leaq 0x80(%r9), %rax
movq %rax, 0x110(%rsp)
leaq 0x5c0(%rsp), %r12
movq $-0x8, -0x10(%r12)
leaq 0xd60(%rsp), %r15
vmovaps %xmm11, -0x20(%r15)
movq 0x178(%rsp), %rax
movq 0x70(%rax), %rax
movq %rax, -0x8(%r12)
vmovaps %xmm1, -0x10(%r15)
pushq $0x8
popq %rdi
vbroadcastss 0x1ce582e(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1ce5829(%rip), %xmm12 # 0x1f1ff14
vpcmpeqd %xmm13, %xmm13, %xmm13
movq %r10, 0x48(%rsp)
movq %r9, 0x40(%rsp)
addq $-0x10, %r15
movq -0x8(%r12), %rbp
addq $-0x8, %r12
cmpq $-0x8, %rbp
je 0x23c096
vmovaps (%r15), %xmm14
vcmpltps 0x340(%rsp), %xmm14, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23c0a7
movzbl %al, %r14d
popcntl %r14d, %ebx
xorl %eax, %eax
cmpq %r13, %rbx
jbe 0x23c0ab
cmpq %r13, %rbx
jbe 0x23c099
testb $0x8, %bpl
jne 0x23a8d9
movq %rbp, %rax
movq %rbp, %rcx
andq $-0x10, %rcx
xorl %edx, %edx
movq %rdi, %rbp
vmovaps %xmm11, %xmm14
movq (%rcx,%rdx,8), %rsi
cmpq $0x8, %rsi
je 0x23a877
vbroadcastss 0x20(%rax,%rdx,4), %xmm0
vmovaps 0x270(%rsp), %xmm1
vmovaps 0x280(%rsp), %xmm2
vmovaps 0x290(%rsp), %xmm3
vmovaps 0x2d0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm5
vbroadcastss 0x40(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmovaps 0x2e0(%rsp), %xmm6
vmulps %xmm0, %xmm6, %xmm7
vbroadcastss 0x60(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmovaps 0x2f0(%rsp), %xmm8
vmulps %xmm0, %xmm8, %xmm9
vbroadcastss 0x30(%rax,%rdx,4), %xmm0
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm1
vbroadcastss 0x50(%rax,%rdx,4), %xmm0
vsubps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm2
vbroadcastss 0x70(%rax,%rdx,4), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm8, %xmm3
vminps %xmm1, %xmm5, %xmm0
vminps %xmm2, %xmm7, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vminps %xmm3, %xmm9, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmaxps %xmm1, %xmm5, %xmm1
vmaxps %xmm2, %xmm7, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm3, %xmm9, %xmm2
vminps %xmm2, %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vmaxps 0x330(%rsp), %xmm0, %xmm2
vminps 0x340(%rsp), %xmm1, %xmm1
vcmpleps %xmm1, %xmm2, %xmm1
vtestps %xmm1, %xmm1
je 0x23a877
vblendvps %xmm1, %xmm0, %xmm11, %xmm0
cmpq $0x8, %rbp
je 0x23a870
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm14, (%r15)
addq $0x10, %r15
vmovaps %xmm0, %xmm14
movq %rsi, %rbp
cmpq $0x8, %rsi
je 0x23a88e
leaq 0x1(%rdx), %rsi
cmpq $0x3, %rdx
movq %rsi, %rdx
jb 0x23a767
xorl %eax, %eax
cmpq $0x8, %rbp
je 0x23a8d2
vmovaps 0x340(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vmovmskps %xmm0, %ecx
popcntl %ecx, %edx
movb $0x1, %cl
cmpq %rdx, %r13
jae 0x23a8c1
testb %cl, %cl
jne 0x23a749
jmp 0x23c099
movq %rbp, (%r12)
addq $0x8, %r12
vmovaps %xmm14, (%r15)
addq $0x10, %r15
xorl %ecx, %ecx
pushq $0x4
popq %rax
jmp 0x23a8b4
cmpq $-0x8, %rbp
je 0x23c096
vmovaps 0x340(%rsp), %xmm0
vcmpnleps %xmm14, %xmm0, %xmm0
vtestps %xmm0, %xmm0
je 0x23c0a7
movl %ebp, %eax
andl $0xf, %eax
vmovdqa 0x130(%rsp), %xmm0
addq $-0x8, %rax
movq %rax, 0x248(%rsp)
je 0x23c058
andq $-0x10, %rbp
vpxor %xmm0, %xmm13, %xmm14
xorl %eax, %eax
movq %r13, 0x258(%rsp)
movq %rax, 0x250(%rsp)
imulq $0x58, %rax, %rdx
vmovq 0x20(%rbp,%rdx), %xmm0
vmovq 0x24(%rbp,%rdx), %xmm1
vpminub %xmm1, %xmm0, %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpxor %xmm0, %xmm13, %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vpxor %xmm0, %xmm13, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %eax
testl %eax, %eax
je 0x23c03a
addq %rbp, %rdx
movzbl %al, %esi
movq %rdx, 0x260(%rsp)
bsfq %rsi, %rcx
vmovq 0x20(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vbroadcastss 0x44(%rdx), %xmm1
vbroadcastss 0x38(%rdx), %xmm2
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovq 0x24(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x350(%rsp)
vmovq 0x28(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vbroadcastss 0x48(%rdx), %xmm1
vbroadcastss 0x3c(%rdx), %xmm2
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x140(%rsp)
vmovq 0x2c(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0xc0(%rsp)
vmovq 0x30(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vbroadcastss 0x4c(%rdx), %xmm1
vbroadcastss 0x40(%rdx), %xmm2
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x190(%rsp)
vmovq 0x34(%rdx), %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vcvtdq2ps %xmm0, %xmm0
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x180(%rsp)
vbroadcastss 0x4b0(%rsp,%rcx,4), %xmm0
vmovaps 0x270(%rsp), %xmm1
vmovaps 0x280(%rsp), %xmm2
vmovaps 0x290(%rsp), %xmm3
vmovaps 0x2d0(%rsp), %xmm4
vsubps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm4, %xmm0
vbroadcastss 0x140(%rsp,%rcx,4), %xmm5
vsubps %xmm2, %xmm5, %xmm5
vmovaps 0x2e0(%rsp), %xmm6
vmulps %xmm5, %xmm6, %xmm5
vbroadcastss 0x190(%rsp,%rcx,4), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vmovaps 0x2f0(%rsp), %xmm8
vmulps %xmm7, %xmm8, %xmm7
vbroadcastss 0x350(%rsp,%rcx,4), %xmm9
vsubps %xmm1, %xmm9, %xmm1
vmulps %xmm1, %xmm4, %xmm1
vbroadcastss 0xc0(%rsp,%rcx,4), %xmm4
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm6, %xmm2
vbroadcastss 0x180(%rsp,%rcx,4), %xmm4
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm8, %xmm3
vminps %xmm1, %xmm0, %xmm4
vminps %xmm2, %xmm5, %xmm6
vmaxps %xmm6, %xmm4, %xmm4
vminps %xmm3, %xmm7, %xmm6
vmaxps %xmm6, %xmm4, %xmm4
vmulps %xmm4, %xmm10, %xmm4
vmaxps %xmm1, %xmm0, %xmm0
vmaxps %xmm2, %xmm5, %xmm1
vminps %xmm1, %xmm0, %xmm0
vmaxps %xmm3, %xmm7, %xmm1
vminps %xmm1, %xmm0, %xmm0
vmulps %xmm0, %xmm12, %xmm0
vmaxps 0x330(%rsp), %xmm4, %xmm1
vminps 0x340(%rsp), %xmm0, %xmm0
vcmpleps %xmm0, %xmm1, %xmm0
vtestps %xmm14, %xmm0
je 0x23c027
movq %rsi, 0x268(%rsp)
movzwl (%rdx,%rcx,8), %r8d
movzwl 0x2(%rdx,%rcx,8), %esi
movl 0x50(%rdx), %eax
movq %rax, 0x10(%rsp)
movl 0x4(%rdx,%rcx,8), %r13d
movq %r13, 0x20(%rsp)
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rax,8), %r9
movq 0x58(%r9), %r11
movq %r11, 0x118(%rsp)
movq 0x90(%r9), %rdx
imulq 0x68(%r9), %r13
movl %r8d, %eax
movl $0x7fff, %edi # imm = 0x7FFF
andl %edi, %eax
movl (%r11,%r13), %r10d
addl %eax, %r10d
movl %esi, %ecx
andl %edi, %ecx
movl 0x4(%r11,%r13), %edi
movl %edi, %r14d
imull %ecx, %r14d
addl %r10d, %r14d
leaq 0x1(%r14), %r10
movq 0xa0(%r9), %r9
movq %r9, %r11
imulq %r14, %r11
vmovups (%rdx,%r11), %xmm2
imulq %r9, %r10
vmovups (%rdx,%r10), %xmm0
leaq (%r14,%rdi), %r11
leaq (%r14,%rdi), %r10
incq %r10
movq %r11, %rbx
imulq %r9, %rbx
vmovups (%rdx,%rbx), %xmm3
movq %r10, %rbx
imulq %r9, %rbx
vmovups (%rdx,%rbx), %xmm1
xorl %ebx, %ebx
testw %r8w, %r8w
setns %bl
leaq (%r14,%rbx), %r8
incq %r8
imulq %r9, %r8
vmovups (%rdx,%r8), %xmm5
addq %r10, %rbx
movq %rbx, %r8
imulq %r9, %r8
vmovups (%rdx,%r8), %xmm4
movl %eax, %r8d
testw %si, %si
movl $0x0, %esi
cmovsq %rsi, %rdi
movq 0x118(%rsp), %rsi
addq %rdi, %r11
imulq %r9, %r11
vmovups (%rdx,%r11), %xmm6
movl %ecx, %r11d
addq %rdi, %r10
imulq %r9, %r10
vmovups (%rdx,%r10), %xmm7
movq 0x48(%rsp), %r10
addq %rbx, %rdi
imulq %r9, %rdi
movq 0x40(%rsp), %r9
vmovups (%rdx,%rdi), %xmm8
movq %r13, %rdi
vmovss 0x20(%rsp), %xmm9
vmovaps %xmm2, 0x4b0(%rsp)
vmovaps %xmm0, 0x4c0(%rsp)
vmovaps %xmm1, 0x4d0(%rsp)
vmovaps %xmm3, 0x4e0(%rsp)
vmovaps %xmm0, 0x4f0(%rsp)
vmovaps %xmm5, 0x500(%rsp)
vmovaps %xmm4, 0x510(%rsp)
vmovaps %xmm1, 0x520(%rsp)
vmovaps %xmm3, 0x530(%rsp)
vmovaps %xmm1, 0x540(%rsp)
vmovaps %xmm7, 0x550(%rsp)
vmovaps %xmm6, 0x560(%rsp)
vmovaps %xmm1, 0x570(%rsp)
vmovaps %xmm4, 0x580(%rsp)
vmovaps %xmm8, 0x590(%rsp)
vmovaps %xmm7, 0x5a0(%rsp)
vmovss 0x10(%rsp), %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps %xmm0, 0x410(%rsp)
vshufps $0x0, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,0,0,0]
vmovaps %xmm0, 0x400(%rsp)
leaq 0x4e8(%rsp), %r13
xorl %ebx, %ebx
vmovaps %xmm14, 0x3f0(%rsp)
vmovaps %xmm14, 0x10(%rsp)
vbroadcastss -0x38(%r13), %xmm13
vbroadcastss -0x34(%r13), %xmm2
vbroadcastss -0x30(%r13), %xmm1
vbroadcastss -0x8(%r13), %xmm0
vbroadcastss -0x4(%r13), %xmm5
vbroadcastss (%r13), %xmm6
vmovaps 0x410(%rsp), %xmm3
vmovaps %xmm3, 0x190(%rsp)
vmovaps 0x400(%rsp), %xmm3
vmovaps %xmm3, 0x180(%rsp)
vmovaps 0x10(%r9), %xmm10
vmovaps 0x20(%r9), %xmm3
vmovaps %xmm3, 0xa0(%rsp)
vsubps %xmm10, %xmm2, %xmm4
vsubps %xmm3, %xmm1, %xmm9
vmovaps %xmm5, 0x490(%rsp)
vsubps %xmm10, %xmm5, %xmm5
vmovaps %xmm5, 0xb0(%rsp)
vmovaps %xmm6, 0x480(%rsp)
vsubps %xmm3, %xmm6, %xmm2
vmovaps %xmm2, 0x20(%rsp)
vsubps %xmm4, %xmm5, %xmm1
vsubps %xmm9, %xmm2, %xmm3
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm2, %xmm9, %xmm6
vmulps %xmm3, %xmm5, %xmm7
vmulps %xmm6, %xmm1, %xmm8
vsubps %xmm7, %xmm8, %xmm11
vmovaps (%r9), %xmm7
vsubps %xmm7, %xmm13, %xmm14
vmovaps %xmm0, 0x4a0(%rsp)
vsubps %xmm7, %xmm0, %xmm13
vsubps %xmm14, %xmm13, %xmm0
vmulps %xmm6, %xmm0, %xmm6
vaddps %xmm14, %xmm13, %xmm8
vmovaps %xmm3, 0x210(%rsp)
vmulps %xmm3, %xmm8, %xmm3
vsubps %xmm6, %xmm3, %xmm3
vmovaps %xmm1, 0x220(%rsp)
vmulps %xmm1, %xmm8, %xmm6
vmovaps %xmm0, 0x1e0(%rsp)
vmulps %xmm5, %xmm0, %xmm5
vsubps %xmm6, %xmm5, %xmm6
vmovaps 0x60(%r9), %xmm15
vmulps %xmm6, %xmm15, %xmm8
vmovaps 0x50(%r9), %xmm12
vmulps %xmm3, %xmm12, %xmm3
vaddps %xmm3, %xmm8, %xmm3
vmovaps 0x40(%r9), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmulps %xmm0, %xmm11, %xmm11
vaddps %xmm3, %xmm11, %xmm8
vbroadcastss -0x24(%r13), %xmm0
vmovaps %xmm0, 0x470(%rsp)
vsubps %xmm10, %xmm0, %xmm3
vbroadcastss -0x20(%r13), %xmm0
vmovaps %xmm0, 0x460(%rsp)
vsubps 0xa0(%rsp), %xmm0, %xmm0
vsubps %xmm3, %xmm4, %xmm2
vsubps %xmm0, %xmm9, %xmm11
vmovaps %xmm4, 0xe0(%rsp)
vaddps %xmm3, %xmm4, %xmm1
vmovaps %xmm9, 0x1d0(%rsp)
vaddps %xmm0, %xmm9, %xmm4
vmulps %xmm1, %xmm11, %xmm10
vmulps %xmm4, %xmm2, %xmm5
vsubps %xmm10, %xmm5, %xmm5
vbroadcastss -0x28(%r13), %xmm6
vmovaps %xmm6, 0x450(%rsp)
vsubps %xmm7, %xmm6, %xmm7
vsubps %xmm7, %xmm14, %xmm10
vmulps %xmm4, %xmm10, %xmm4
vmovaps %xmm14, 0x230(%rsp)
vaddps %xmm7, %xmm14, %xmm14
vmulps %xmm11, %xmm14, %xmm6
vsubps %xmm4, %xmm6, %xmm4
vmovaps %xmm2, 0x1c0(%rsp)
vmulps %xmm2, %xmm14, %xmm6
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm6, %xmm1, %xmm1
vmovaps %xmm8, %xmm6
vmulps %xmm1, %xmm15, %xmm1
vmulps %xmm4, %xmm12, %xmm4
vaddps %xmm4, %xmm1, %xmm1
vmovaps 0xf0(%rsp), %xmm14
vmulps %xmm5, %xmm14, %xmm4
vaddps %xmm1, %xmm4, %xmm8
vsubps %xmm13, %xmm7, %xmm9
vaddps %xmm7, %xmm13, %xmm1
vmovaps 0x10(%rsp), %xmm7
vmovaps 0xb0(%rsp), %xmm4
vsubps %xmm4, %xmm3, %xmm2
vaddps %xmm4, %xmm3, %xmm3
vmovaps 0x20(%rsp), %xmm4
vsubps %xmm4, %xmm0, %xmm13
vaddps %xmm4, %xmm0, %xmm0
vmulps %xmm3, %xmm13, %xmm4
vmulps %xmm0, %xmm2, %xmm5
vsubps %xmm4, %xmm5, %xmm4
vmulps %xmm0, %xmm9, %xmm0
vmulps %xmm1, %xmm13, %xmm5
vsubps %xmm0, %xmm5, %xmm0
vmovaps %xmm2, 0x1a0(%rsp)
vmulps %xmm2, %xmm1, %xmm1
vmovaps %xmm9, 0x1b0(%rsp)
vmulps %xmm3, %xmm9, %xmm3
vsubps %xmm1, %xmm3, %xmm1
vmovaps %xmm15, 0x200(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vmovaps %xmm12, 0x1f0(%rsp)
vmulps %xmm0, %xmm12, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm4, %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm6, %xmm8, %xmm1
vaddps %xmm1, %xmm0, %xmm1
vbroadcastss 0x1ce5eeb(%rip), %xmm5 # 0x1f20ec4
vmovaps %xmm1, 0xa0(%rsp)
vandps %xmm5, %xmm1, %xmm1
vbroadcastss 0x1ce5edd(%rip), %xmm3 # 0x1f20ecc
vmulps %xmm3, %xmm1, %xmm1
vminps %xmm8, %xmm6, %xmm3
vminps %xmm0, %xmm3, %xmm3
vbroadcastss 0x1ce5ebb(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm4, %xmm1, %xmm4
vcmpnltps %xmm4, %xmm3, %xmm3
vmaxps %xmm8, %xmm6, %xmm4
vmaxps %xmm0, %xmm4, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm3, %xmm0
vtestps %xmm7, %xmm0
vmovaps %xmm7, 0x10(%rsp)
vandps %xmm7, %xmm0, %xmm12
vbroadcastss -0x18(%r13), %xmm0
vmovaps %xmm0, 0x440(%rsp)
vbroadcastss -0x14(%r13), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vbroadcastss -0x10(%r13), %xmm0
vmovaps %xmm0, 0x420(%rsp)
je 0x23b93a
vmovaps %xmm14, %xmm9
vmovaps %xmm8, 0xb0(%rsp)
vmovaps %xmm6, 0x20(%rsp)
vmovaps 0x210(%rsp), %xmm8
vmovaps 0x1c0(%rsp), %xmm15
vmulps %xmm8, %xmm15, %xmm0
vmovaps %xmm5, %xmm6
vmovaps 0x220(%rsp), %xmm5
vmulps %xmm5, %xmm11, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmovaps 0x1a0(%rsp), %xmm2
vmulps %xmm2, %xmm11, %xmm3
vmulps %xmm13, %xmm15, %xmm4
vsubps %xmm3, %xmm4, %xmm4
vandps %xmm6, %xmm0, %xmm0
vandps %xmm6, %xmm3, %xmm3
vcmpltps %xmm3, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm4, %xmm7
vmulps %xmm13, %xmm10, %xmm0
vmulps %xmm8, %xmm10, %xmm1
vmovaps 0x1e0(%rsp), %xmm8
vmulps %xmm11, %xmm8, %xmm3
vsubps %xmm3, %xmm1, %xmm1
vmovaps 0x1b0(%rsp), %xmm13
vmulps %xmm11, %xmm13, %xmm4
vsubps %xmm0, %xmm4, %xmm4
vandps %xmm6, %xmm3, %xmm3
vandps %xmm6, %xmm0, %xmm0
vcmpltps %xmm0, %xmm3, %xmm0
vblendvps %xmm0, %xmm1, %xmm4, %xmm3
vmulps %xmm15, %xmm13, %xmm0
vmulps %xmm15, %xmm8, %xmm1
vmovaps %xmm3, %xmm15
vmulps %xmm5, %xmm10, %xmm3
vmovaps %xmm6, %xmm5
vmulps %xmm2, %xmm10, %xmm4
vsubps %xmm3, %xmm1, %xmm1
vsubps %xmm0, %xmm4, %xmm4
vandps %xmm6, %xmm3, %xmm3
vandps %xmm6, %xmm0, %xmm0
vcmpltps %xmm0, %xmm3, %xmm0
vblendvps %xmm0, %xmm1, %xmm4, %xmm14
vmulps 0x200(%rsp), %xmm14, %xmm0
vmulps 0x1f0(%rsp), %xmm15, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps %xmm7, %xmm9, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x1d0(%rsp), %xmm14, %xmm1
vmulps 0xe0(%rsp), %xmm15, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vmovaps %xmm7, 0xf0(%rsp)
vmulps 0x230(%rsp), %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm3
vbroadcastss 0x1cb1570(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm10
vmovaps 0x30(%r9), %xmm1
vcmpleps 0x80(%r9), %xmm10, %xmm2
vcmpleps %xmm10, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm1
vcmpneqps 0x1cb0839(%rip), %xmm0, %xmm0 # 0x1eeba10
vandps %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vtestps %xmm12, %xmm0
vandps %xmm0, %xmm12, %xmm12
je 0x23b93a
vbroadcastss 0x1cb0823(%rip), %xmm11 # 0x1eeba20
vpcmpeqd %xmm13, %xmm13, %xmm13
vmovaps 0x20(%rsp), %xmm6
vmovaps 0xb0(%rsp), %xmm8
vtestps %xmm12, %xmm12
je 0x23b325
movl %ebx, %eax
andl $0x1, %eax
addl %r8d, %eax
movl %ebx, %ecx
shrl %ecx
addl %r11d, %ecx
movzwl 0x8(%rsi,%rdi), %edx
decl %edx
vcvtsi2ss %edx, %xmm9, %xmm0
vrcpss %xmm0, %xmm0, %xmm1
vmulss %xmm0, %xmm1, %xmm0
vmovss 0x1cb5db2(%rip), %xmm3 # 0x1ef0ff8
vsubss %xmm0, %xmm3, %xmm0
vmulss %xmm0, %xmm1, %xmm0
movzwl 0xa(%rsi,%rdi), %edx
decl %edx
vcvtsi2ss %edx, %xmm9, %xmm1
vcvtsi2ss %eax, %xmm9, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps 0xa0(%rsp), %xmm4
vmulps %xmm4, %xmm2, %xmm2
vaddps %xmm2, %xmm6, %xmm2
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm2, %xmm0
vrcpss %xmm1, %xmm1, %xmm2
vmulss %xmm1, %xmm2, %xmm1
vblendvps %xmm12, %xmm0, %xmm6, %xmm6
vsubss %xmm1, %xmm3, %xmm0
vcvtsi2ss %ecx, %xmm9, %xmm1
vmulss %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm8, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm1, %xmm0
vblendvps %xmm12, %xmm0, %xmm8, %xmm8
movq (%r10), %rcx
movl 0x190(%rsp,%rbx,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r14
vbroadcastss 0x34(%r14), %xmm0
vandps 0x90(%r9), %xmm0, %xmm0
vpcmpeqd 0x1cb0732(%rip), %xmm0, %xmm0 # 0x1eeba10
vtestps %xmm12, %xmm0
vmovaps 0x10(%rsp), %xmm1
jb 0x23b317
movl 0x180(%rsp,%rbx,4), %ecx
vandnps %xmm12, %xmm0, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x23b9f9
cmpq $0x0, 0x48(%r14)
jne 0x23b9f9
vpandn 0x10(%rsp), %xmm0, %xmm1
vandnps 0x10(%rsp), %xmm12, %xmm0
vmovaps %xmm1, 0x10(%rsp)
jmp 0x23b32b
vmovaps 0x10(%rsp), %xmm0
vtestps %xmm0, %xmm0
je 0x23b8be
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm14, 0x70(%rsp)
vmovaps %xmm15, 0x80(%rsp)
vmovaps %xmm8, 0xb0(%rsp)
vmovaps %xmm6, 0x20(%rsp)
vmovaps (%r9), %xmm0
vmovaps 0x10(%r9), %xmm3
vmovaps 0x20(%r9), %xmm5
vmovaps 0x440(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm4
vmovaps 0x430(%rsp), %xmm1
vsubps %xmm3, %xmm1, %xmm2
vmovaps 0x420(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm11
vmovaps 0x4a0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmovaps %xmm1, 0x50(%rsp)
vmovaps 0x490(%rsp), %xmm1
vsubps %xmm3, %xmm1, %xmm15
vmovaps 0x480(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm14
vmovaps 0x450(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm12
vmovaps 0x470(%rsp), %xmm1
vsubps %xmm3, %xmm1, %xmm13
vmovaps 0x460(%rsp), %xmm1
vsubps %xmm5, %xmm1, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vsubps %xmm4, %xmm12, %xmm1
vsubps %xmm2, %xmm13, %xmm5
vsubps %xmm11, %xmm0, %xmm10
vaddps %xmm2, %xmm13, %xmm6
vaddps %xmm0, %xmm11, %xmm7
vmulps %xmm6, %xmm10, %xmm8
vmulps %xmm7, %xmm5, %xmm9
vsubps %xmm8, %xmm9, %xmm9
vaddps %xmm4, %xmm12, %xmm8
vmulps %xmm7, %xmm1, %xmm7
vmovaps %xmm10, 0x1d0(%rsp)
vmulps %xmm10, %xmm8, %xmm10
vsubps %xmm7, %xmm10, %xmm10
vmovaps %xmm5, 0x1e0(%rsp)
vmulps %xmm5, %xmm8, %xmm7
vmovaps %xmm1, 0x1f0(%rsp)
vmulps %xmm6, %xmm1, %xmm6
vsubps %xmm7, %xmm6, %xmm7
vmovaps 0x60(%r9), %xmm8
vmulps %xmm7, %xmm8, %xmm1
vmovaps 0x50(%r9), %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm1, %xmm10, %xmm10
vmovaps 0x40(%r9), %xmm3
vmulps %xmm3, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm7
vmovaps %xmm15, %xmm9
vsubps %xmm15, %xmm2, %xmm0
vmovaps %xmm14, %xmm5
vsubps %xmm14, %xmm11, %xmm15
vmovaps %xmm2, 0x210(%rsp)
vaddps %xmm2, %xmm9, %xmm14
vmovaps %xmm11, 0x200(%rsp)
vaddps %xmm5, %xmm11, %xmm1
vmovaps %xmm5, %xmm11
vmulps %xmm15, %xmm14, %xmm10
vmulps %xmm1, %xmm0, %xmm2
vsubps %xmm10, %xmm2, %xmm2
vmovaps 0x50(%rsp), %xmm6
vsubps %xmm6, %xmm4, %xmm10
vmulps %xmm1, %xmm10, %xmm1
vmovaps %xmm4, 0x220(%rsp)
vaddps %xmm6, %xmm4, %xmm5
vmovaps %xmm6, %xmm4
vmulps %xmm5, %xmm15, %xmm6
vsubps %xmm1, %xmm6, %xmm1
vmovaps %xmm0, 0x1b0(%rsp)
vmulps %xmm0, %xmm5, %xmm5
vmulps %xmm14, %xmm10, %xmm6
vsubps %xmm5, %xmm6, %xmm5
vmulps %xmm5, %xmm8, %xmm5
vmovaps 0x230(%rsp), %xmm6
vmulps %xmm1, %xmm6, %xmm1
vaddps %xmm1, %xmm5, %xmm1
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm1, %xmm2, %xmm5
vsubps %xmm12, %xmm4, %xmm14
vaddps %xmm4, %xmm12, %xmm0
vsubps %xmm13, %xmm9, %xmm12
vaddps %xmm13, %xmm9, %xmm1
vmovaps 0xe0(%rsp), %xmm2
vsubps %xmm2, %xmm11, %xmm9
vaddps %xmm2, %xmm11, %xmm2
vmulps %xmm1, %xmm9, %xmm11
vmulps %xmm2, %xmm12, %xmm4
vsubps %xmm11, %xmm4, %xmm11
vmulps %xmm2, %xmm14, %xmm2
vmulps %xmm0, %xmm9, %xmm4
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm0, %xmm12, %xmm0
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm8, 0xe0(%rsp)
vmulps %xmm0, %xmm8, %xmm0
vmovaps %xmm6, %xmm8
vmulps %xmm2, %xmm6, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps %xmm3, 0x1c0(%rsp)
vmulps %xmm3, %xmm11, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm5, %xmm7, %xmm1
vaddps %xmm1, %xmm0, %xmm1
vmovaps %xmm1, 0x100(%rsp)
vbroadcastss 0x1ce5925(%rip), %xmm2 # 0x1f20ec4
vmovaps %xmm2, %xmm6
vandps %xmm2, %xmm1, %xmm1
vbroadcastss 0x1ce591c(%rip), %xmm2 # 0x1f20ecc
vmulps %xmm2, %xmm1, %xmm1
vminps %xmm5, %xmm7, %xmm2
vminps %xmm0, %xmm2, %xmm2
vbroadcastss 0x1ce58fb(%rip), %xmm3 # 0x1f20ec0
vxorps %xmm3, %xmm1, %xmm3
vcmpnltps %xmm3, %xmm2, %xmm2
vmovaps %xmm7, 0xd0(%rsp)
vmaxps %xmm5, %xmm7, %xmm3
vmovaps 0x10(%rsp), %xmm7
vmaxps %xmm0, %xmm3, %xmm0
vcmpleps %xmm1, %xmm0, %xmm0
vorps %xmm0, %xmm2, %xmm0
vtestps %xmm7, %xmm0
vmovaps %xmm7, 0x10(%rsp)
vandps %xmm7, %xmm0, %xmm0
je 0x23b995
vmovaps %xmm12, %xmm4
vmovaps %xmm5, 0x50(%rsp)
vmovaps 0x1d0(%rsp), %xmm11
vmovaps 0x1b0(%rsp), %xmm13
vmovaps %xmm0, 0x1a0(%rsp)
vmulps %xmm11, %xmm13, %xmm0
vmovaps %xmm6, %xmm5
vmovaps 0x1e0(%rsp), %xmm6
vmulps %xmm6, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps %xmm4, %xmm15, %xmm2
vmulps %xmm9, %xmm13, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vandps %xmm5, %xmm0, %xmm0
vandps %xmm5, %xmm2, %xmm2
vcmpltps %xmm2, %xmm0, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm4
vmulps %xmm9, %xmm10, %xmm0
vmulps %xmm11, %xmm10, %xmm1
vmovaps 0x1f0(%rsp), %xmm11
vmulps %xmm15, %xmm11, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmulps %xmm15, %xmm14, %xmm3
vsubps %xmm0, %xmm3, %xmm3
vandps %xmm5, %xmm2, %xmm2
vandps %xmm5, %xmm0, %xmm0
vcmpltps %xmm0, %xmm2, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm15
vmulps %xmm13, %xmm14, %xmm0
vmulps %xmm13, %xmm11, %xmm1
vmulps %xmm6, %xmm10, %xmm2
vmulps %xmm12, %xmm10, %xmm3
vsubps %xmm2, %xmm1, %xmm1
vsubps %xmm0, %xmm3, %xmm3
vandps %xmm5, %xmm2, %xmm2
vmovaps %xmm5, %xmm6
vandps %xmm5, %xmm0, %xmm0
vcmpltps %xmm0, %xmm2, %xmm0
vblendvps %xmm0, %xmm1, %xmm3, %xmm9
vmulps 0xe0(%rsp), %xmm9, %xmm0
vmulps %xmm15, %xmm8, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x1c0(%rsp), %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm0, %xmm0
vmulps 0x200(%rsp), %xmm9, %xmm1
vmulps 0x210(%rsp), %xmm15, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmovaps %xmm4, 0x120(%rsp)
vmulps 0x220(%rsp), %xmm4, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vaddps %xmm1, %xmm1, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm3
vbroadcastss 0x1cb0fe4(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm10
vmovaps 0x30(%r9), %xmm1
vcmpleps 0x80(%r9), %xmm10, %xmm2
vcmpleps %xmm10, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm1
vcmpneqps 0x1cb02ad(%rip), %xmm0, %xmm0 # 0x1eeba10
vandps %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vpsrad $0x1f, %xmm0, %xmm0
vmovaps 0x1a0(%rsp), %xmm1
vtestps %xmm1, %xmm0
vandps %xmm1, %xmm0, %xmm0
je 0x23b995
vbroadcastss 0x1cb028e(%rip), %xmm11 # 0x1eeba20
vpcmpeqd %xmm13, %xmm13, %xmm13
vmovaps 0x50(%rsp), %xmm5
vtestps %xmm0, %xmm0
je 0x23b8f4
vmovaps 0x100(%rsp), %xmm8
vsubps 0xd0(%rsp), %xmm8, %xmm2
vsubps %xmm5, %xmm8, %xmm1
movl %ebx, %ecx
andl $0x1, %ecx
addl %r8d, %ecx
movl %ebx, %eax
movzwl 0x8(%rsi,%rdi), %edx
decl %edx
vcvtsi2ss %edx, %xmm14, %xmm3
vrcpss %xmm3, %xmm3, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmovss 0x1cb5815(%rip), %xmm7 # 0x1ef0ff8
vsubss %xmm3, %xmm7, %xmm3
movzwl 0xa(%rsi,%rdi), %edx
decl %edx
vcvtsi2ss %edx, %xmm14, %xmm5
vmulss %xmm3, %xmm4, %xmm3
vcvtsi2ss %ecx, %xmm14, %xmm4
shrl %eax
addl %r11d, %eax
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm4
vaddps %xmm4, %xmm2, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm4, %xmm3
vrcpss %xmm5, %xmm5, %xmm4
vmulss %xmm5, %xmm4, %xmm5
vsubss %xmm5, %xmm7, %xmm5
vblendvps %xmm0, %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 0xd0(%rsp)
vmulss %xmm5, %xmm4, %xmm2
vcvtsi2ss %eax, %xmm14, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm3, %xmm1, %xmm3
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm3, %xmm2
vblendvps %xmm0, %xmm2, %xmm1, %xmm5
movq (%r10), %rcx
movl 0x190(%rsp,%rbx,4), %eax
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %r14
vbroadcastss 0x34(%r14), %xmm1
vandps 0x90(%r9), %xmm1, %xmm1
vpcmpeqd 0x1cb0190(%rip), %xmm1, %xmm1 # 0x1eeba10
vtestps %xmm0, %xmm1
jb 0x23b8f4
movl 0x180(%rsp,%rbx,4), %ecx
vandnps %xmm0, %xmm1, %xmm0
movq 0x10(%r10), %rdx
cmpq $0x0, 0x10(%rdx)
jne 0x23bcd3
cmpq $0x0, 0x48(%r14)
jne 0x23bcd3
vmovdqa 0x10(%rsp), %xmm1
vpandn %xmm1, %xmm0, %xmm1
vmovdqa %xmm1, 0x10(%rsp)
jmp 0x23b8f4
vmovaps %xmm6, 0xd0(%rsp)
vmovaps %xmm8, %xmm5
vmovaps 0xa0(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovdqa 0xf0(%rsp), %xmm0
vmovdqa %xmm0, 0x120(%rsp)
vmovaps %xmm14, %xmm9
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vmovaps %xmm15, 0x80(%rsp)
vmovaps %xmm5, 0x50(%rsp)
vmovaps 0x10(%rsp), %xmm0
vtestps %xmm0, %xmm0
je 0x23bfdd
leaq 0x1(%rbx), %rax
addq $0x40, %r13
cmpq $0x3, %rbx
movq %rax, %rbx
jb 0x23ad4d
jmp 0x23bfdd
vmovaps 0xd0(%rsp), %xmm6
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x100(%rsp), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x80(%rsp), %xmm15
vmovaps 0x70(%rsp), %xmm14
vbroadcastss 0x1cb0095(%rip), %xmm11 # 0x1eeba20
vpcmpeqd %xmm13, %xmm13, %xmm13
jmp 0x23b211
vmovaps 0x20(%rsp), %xmm1
vmovaps %xmm1, 0xd0(%rsp)
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm1
vmovaps %xmm1, 0x100(%rsp)
vmovaps 0x60(%rsp), %xmm10
vmovaps 0xf0(%rsp), %xmm1
vmovaps %xmm1, 0x120(%rsp)
vmovaps 0x80(%rsp), %xmm15
vmovaps 0x70(%rsp), %xmm9
vbroadcastss 0x1cb0031(%rip), %xmm11 # 0x1eeba20
vpcmpeqd %xmm13, %xmm13, %xmm13
jmp 0x23b79d
vmovaps 0xa0(%rsp), %xmm3
vandps %xmm5, %xmm3, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cb0cfd(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cb55bc(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm2
vminps %xmm4, %xmm2, %xmm2
vmulps %xmm1, %xmm8, %xmm1
vminps %xmm4, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovd %eax, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %ecx, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovaps 0xf0(%rsp), %xmm5
vmovaps %xmm5, 0x350(%rsp)
vmovaps %xmm15, 0x360(%rsp)
vmovaps %xmm14, 0x370(%rsp)
vmovaps %xmm2, 0x380(%rsp)
vmovaps %xmm1, 0x390(%rsp)
vmovdqa %xmm4, 0x3a0(%rsp)
vmovdqa %xmm3, 0x3b0(%rsp)
leaq 0x3c0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x3c0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x3d0(%rsp)
vmovaps 0x80(%r9), %xmm3
vblendvps %xmm0, %xmm10, %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r9)
vmovaps %xmm0, 0xc0(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x140(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x148(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x150(%rsp)
movq %r9, 0x158(%rsp)
leaq 0x350(%rsp), %rax
movq %rax, 0x160(%rsp)
movl $0x4, 0x168(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
movq %rdi, 0x98(%rsp)
movl %r8d, 0xc(%rsp)
movl %r11d, 0x8(%rsp)
vmovaps %xmm6, 0x20(%rsp)
vmovaps %xmm8, 0xb0(%rsp)
vmovaps %xmm15, 0x80(%rsp)
vmovaps %xmm14, 0x70(%rsp)
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm12, 0x50(%rsp)
vmovaps %xmm3, 0xe0(%rsp)
je 0x23bbf8
leaq 0x140(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0xe0(%rsp), %xmm3
vmovaps 0x50(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm15
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0x20(%rsp), %xmm6
movl 0x8(%rsp), %r11d
movl 0xc(%rsp), %r8d
movq 0x98(%rsp), %rdi
movq 0x118(%rsp), %rsi
vbroadcastss 0x1cafe32(%rip), %xmm11 # 0x1eeba20
movq 0x40(%rsp), %r9
movq 0x48(%rsp), %r10
vmovdqa 0xc0(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x23bf79
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm13, %xmm13, %xmm13
je 0x23bc9a
testb $0x2, (%rcx)
jne 0x23bc2a
testb $0x40, 0x3e(%r14)
je 0x23bc9a
leaq 0x140(%rsp), %rdi
movq %rsi, %r14
vzeroupper
callq *%rax
vmovaps 0xe0(%rsp), %xmm3
vmovaps 0x50(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm15
vmovaps 0xb0(%rsp), %xmm8
vmovaps 0x20(%rsp), %xmm6
movl 0x8(%rsp), %r11d
movl 0xc(%rsp), %r8d
movq 0x98(%rsp), %rdi
movq %r14, %rsi
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0x1cafd90(%rip), %xmm11 # 0x1eeba20
movq 0x40(%rsp), %r9
movq 0x48(%rsp), %r10
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0xc0(%rsp), %xmm0, %xmm1
vpxor %xmm1, %xmm13, %xmm0
movq 0x158(%rsp), %rax
vbroadcastss 0x1cb0ec8(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x23bf8a
vmovaps 0x100(%rsp), %xmm3
vandps %xmm6, %xmm3, %xmm1
vrcpps %xmm3, %xmm2
vmulps %xmm2, %xmm3, %xmm3
vbroadcastss 0x1cb0a23(%rip), %xmm4 # 0x1eec714
vsubps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm2, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vbroadcastss 0x1cb52e2(%rip), %xmm3 # 0x1ef0fe8
vcmpnltps %xmm3, %xmm1, %xmm1
vandps %xmm2, %xmm1, %xmm1
vmulps 0xd0(%rsp), %xmm1, %xmm2
vminps %xmm4, %xmm2, %xmm2
vmulps %xmm1, %xmm5, %xmm1
vminps %xmm4, %xmm1, %xmm1
movq 0x8(%r10), %rdx
vmovd %eax, %xmm3
vpshufd $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmovd %ecx, %xmm4
vpshufd $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmovaps 0x120(%rsp), %xmm6
vmovaps %xmm6, 0x350(%rsp)
vmovaps %xmm15, 0x360(%rsp)
vmovaps %xmm9, 0x370(%rsp)
vmovaps %xmm2, 0x380(%rsp)
vmovaps %xmm1, 0x390(%rsp)
vmovdqa %xmm4, 0x3a0(%rsp)
vmovdqa %xmm3, 0x3b0(%rsp)
leaq 0x3c0(%rsp), %rax
vcmptrueps %ymm1, %ymm1, %ymm1
vmovups %ymm1, (%rax)
vbroadcastss (%rdx), %xmm1
vmovaps %xmm1, 0x3c0(%rsp)
vbroadcastss 0x4(%rdx), %xmm1
vmovaps %xmm1, 0x3d0(%rsp)
vmovaps 0x80(%r9), %xmm3
vblendvps %xmm0, %xmm10, %xmm3, %xmm1
vmovaps %xmm1, 0x80(%r9)
vmovaps %xmm0, 0xc0(%rsp)
leaq 0xc0(%rsp), %rax
movq %rax, 0x140(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x148(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x150(%rsp)
movq %r9, 0x158(%rsp)
leaq 0x350(%rsp), %rax
movq %rax, 0x160(%rsp)
movl $0x4, 0x168(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm5, 0x50(%rsp)
vmovaps %xmm15, 0x80(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vmovaps %xmm10, 0x60(%rsp)
movq %rdi, 0x98(%rsp)
movl %r8d, 0xc(%rsp)
movl %r11d, 0x8(%rsp)
vmovaps %xmm3, 0x20(%rsp)
je 0x23beb3
leaq 0x140(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm3
movl 0x8(%rsp), %r11d
movl 0xc(%rsp), %r8d
movq 0x98(%rsp), %rdi
movq 0x118(%rsp), %rsi
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm5
vbroadcastss 0x1cafb77(%rip), %xmm11 # 0x1eeba20
movq 0x40(%rsp), %r9
movq 0x48(%rsp), %r10
vmovdqa 0xc0(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x23bfab
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vpcmpeqd %xmm13, %xmm13, %xmm13
je 0x23bf43
testb $0x2, (%rcx)
jne 0x23bee5
testb $0x40, 0x3e(%r14)
je 0x23bf43
leaq 0x140(%rsp), %rdi
movq %rsi, %r14
vzeroupper
callq *%rax
vmovaps 0x20(%rsp), %xmm3
movl 0x8(%rsp), %r11d
movl 0xc(%rsp), %r8d
movq 0x98(%rsp), %rdi
movq %r14, %rsi
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm5
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0x1cafae7(%rip), %xmm11 # 0x1eeba20
movq 0x40(%rsp), %r9
movq 0x48(%rsp), %r10
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0xc0(%rsp), %xmm0, %xmm1
vpxor %xmm1, %xmm13, %xmm0
movq 0x158(%rsp), %rax
vbroadcastss 0x1cb0c1f(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x23bfbc
vpcmpeqd 0x1cafa8f(%rip), %xmm0, %xmm0 # 0x1eeba10
vpcmpeqd %xmm13, %xmm13, %xmm13
vpxor %xmm0, %xmm13, %xmm0
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
movq 0x110(%rsp), %rax
vblendvps %xmm1, (%rax), %xmm3, %xmm1
vmovaps %xmm1, (%rax)
jmp 0x23b311
vpcmpeqd 0x1cafa5d(%rip), %xmm0, %xmm0 # 0x1eeba10
vpcmpeqd %xmm13, %xmm13, %xmm13
vpxor %xmm0, %xmm13, %xmm0
vpslld $0x1f, %xmm0, %xmm1
vpsrad $0x1f, %xmm1, %xmm0
movq 0x110(%rsp), %rax
vblendvps %xmm1, (%rax), %xmm3, %xmm1
vmovaps %xmm1, (%rax)
jmp 0x23b8ac
vmovaps 0x3f0(%rsp), %xmm0
vmovaps 0x10(%rsp), %xmm1
vtestps %xmm0, %xmm1
vandps %xmm0, %xmm1, %xmm14
sete %al
movq 0x258(%rsp), %r13
pushq $0x8
popq %rdi
vbroadcastss 0x1ce3f04(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1ce3eff(%rip), %xmm12 # 0x1f1ff14
movq 0x260(%rsp), %rdx
movq 0x268(%rsp), %rsi
jmp 0x23c029
xorl %eax, %eax
testb %al, %al
jne 0x23c03a
leaq -0x1(%rsi), %rax
andq %rax, %rsi
jne 0x23a97b
movq 0x250(%rsp), %rax
incq %rax
cmpq 0x248(%rsp), %rax
jne 0x23a92f
vxorps %xmm13, %xmm14, %xmm0
vorps 0x130(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x130(%rsp)
vtestps %xmm13, %xmm0
jb 0x23c096
vmovaps 0x340(%rsp), %xmm1
vbroadcastss 0x1cb0b01(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x340(%rsp)
xorl %eax, %eax
jmp 0x23c099
pushq $0x3
popq %rax
cmpl $0x3, %eax
jne 0x23a6fa
jmp 0x23c174
pushq $0x2
jmp 0x23c098
vmovaps %xmm14, 0x20(%rsp)
bsfq %r14, %rcx
movq 0x240(%rsp), %rdi
movq 0x178(%rsp), %rsi
movq %rbp, %rdx
movq %rcx, 0x10(%rsp)
leaq 0x3f(%rsp), %r8
pushq %r10
leaq 0x278(%rsp), %rax
pushq %rax
vzeroupper
callq 0x28ce16
popq %rcx
popq %rdx
testb %al, %al
je 0x23c0f8
movq 0x10(%rsp), %rax
orl $-0x1, 0x130(%rsp,%rax,4)
leaq -0x1(%r14), %rax
andq %rax, %r14
movq 0x48(%rsp), %r10
movq 0x40(%rsp), %r9
jne 0x23c0b1
vmovaps 0x130(%rsp), %xmm0
vpcmpeqd %xmm13, %xmm13, %xmm13
vtestps %xmm13, %xmm0
pushq $0x3
popq %rax
vbroadcastss 0x1caf8f6(%rip), %xmm11 # 0x1eeba20
pushq $0x8
popq %rdi
vbroadcastss 0x1ce3dda(%rip), %xmm10 # 0x1f1ff10
vbroadcastss 0x1ce3dd5(%rip), %xmm12 # 0x1f1ff14
vmovaps 0x20(%rsp), %xmm14
jb 0x23a740
vmovaps 0x340(%rsp), %xmm1
vbroadcastss 0x1cb0a27(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm0, %xmm2, %xmm1, %xmm0
vmovaps %xmm0, 0x340(%rsp)
pushq $0x2
popq %rax
jmp 0x23a740
vmovaps 0x3e0(%rsp), %xmm0
vandps 0x130(%rsp), %xmm0, %xmm0
vbroadcastss 0x1cb09f5(%rip), %xmm1 # 0x1eecb84
movq 0x110(%rsp), %rax
vmaskmovps %xmm1, %xmm0, (%rax)
addq $0x1c58, %rsp # imm = 0x1C58
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %r9, %rdx
movq %r10, %rcx
addq $0x1c58, %rsp # imm = 0x1C58
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
jmp 0x28aeca
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::TriangleMIntersectorKMoeller<4, 4, true>>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::avx::MoellerTrumboreIntersectorK<4, 4>&, embree::RayK<4>&, embree::avx::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa88, %rsp # imm = 0xA88
movq %r9, %rbx
movq %rcx, %r14
movq 0xac0(%rsp), %rax
leaq 0x2e8(%rsp), %rdi
movq %rdx, -0x8(%rdi)
vbroadcastss (%rax,%rcx,4), %xmm3
vbroadcastss 0x10(%rax,%rcx,4), %xmm4
vbroadcastss 0x20(%rax,%rcx,4), %xmm5
vbroadcastss 0x60(%rax,%rcx,4), %xmm6
vbroadcastss 0x70(%rax,%rcx,4), %xmm7
vbroadcastss 0x80(%rax,%rcx,4), %xmm8
movslq 0x90(%rax,%rcx,4), %r8
movslq 0xa0(%rax,%rcx,4), %r9
movslq 0xb0(%rax,%rcx,4), %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rdx
xorq $0x10, %rdx
movq %r10, %r12
xorq $0x10, %r12
vbroadcastss 0xc0(%rax,%rcx,4), %xmm9
vbroadcastss 0xd0(%rax,%rcx,4), %xmm10
pushq $0x1
popq %rax
shll %cl, %eax
movq %rdx, %rcx
cltq
shlq $0x4, %rax
addq 0x1ee5865(%rip), %rax # 0x2124730
movq %rax, 0x150(%rsp)
leaq 0x2e0(%rsp), %r15
pushq $0x6
popq %rsi
vmovaps %xmm3, 0xd0(%rsp)
vmovaps %xmm4, 0xc0(%rsp)
vmovaps %xmm5, 0xb0(%rsp)
vmovaps %xmm6, 0xa0(%rsp)
vmovaps %xmm7, 0x90(%rsp)
vmovaps %xmm8, 0x80(%rsp)
movq %r12, 0x58(%rsp)
vmovaps %xmm9, 0x70(%rsp)
vmovaps %xmm10, 0x60(%rsp)
movq %rdi, %rax
cmpq %r15, %rdi
je 0x23f7d9
leaq -0x8(%rax), %rdi
movq %rax, 0x158(%rsp)
movq -0x8(%rax), %rbp
testb $0x8, %bpl
jne 0x23efd0
vmovaps 0x20(%rbp,%r8), %xmm0
vsubps %xmm3, %xmm0, %xmm0
vmulps %xmm0, %xmm6, %xmm0
vmovaps 0x20(%rbp,%r9), %xmm1
vsubps %xmm4, %xmm1, %xmm1
vmulps %xmm1, %xmm7, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbp,%r10), %xmm1
vsubps %xmm5, %xmm1, %xmm1
vmulps %xmm1, %xmm8, %xmm1
vpmaxsd %xmm9, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rbp,%r11), %xmm1
vsubps %xmm3, %xmm1, %xmm1
vmulps %xmm1, %xmm6, %xmm1
vmovaps 0x20(%rbp,%rcx), %xmm2
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm2, %xmm7, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbp,%r12), %xmm2
vsubps %xmm5, %xmm2, %xmm2
vmulps %xmm2, %xmm8, %xmm2
vpminsd %xmm10, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r13d
testb $0x8, %bpl
jne 0x23f007
testq %r13, %r13
je 0x23f00b
andq $-0x10, %rbp
bsfq %r13, %rax
leaq -0x1(%r13), %rdx
movq (%rbp,%rax,8), %rax
prefetcht0 (%rax)
prefetcht0 0x40(%rax)
andq %r13, %rdx
jne 0x23f010
movq %rax, %rbp
xorl %edx, %edx
testl %edx, %edx
je 0x23ef41
jmp 0x23f055
movl %esi, %edx
jmp 0x23effd
pushq $0x4
popq %rdx
jmp 0x23effd
movq %r15, %rsi
movq %rcx, %r15
movq %rax, (%rdi)
addq $0x8, %rdi
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rax
movq (%rbp,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %rdx, %rax
je 0x23f047
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rax, %rcx
leaq -0x1(%rax), %rdx
jmp 0x23f025
movq %rcx, %rbp
movq %r15, %rcx
movq %rsi, %r15
pushq $0x6
popq %rsi
jmp 0x23effb
cmpl $0x6, %edx
jne 0x23f7a1
movl %ebp, %eax
andl $0xf, %eax
xorl %edx, %edx
addq $-0x8, %rax
setne 0xf(%rsp)
je 0x23f7a1
andq $-0x10, %rbp
xorl %r12d, %r12d
imulq $0xb0, %r12, %r15
vmovaps 0x80(%rbp,%r15), %xmm8
vmovaps 0x40(%rbp,%r15), %xmm5
vmulps %xmm5, %xmm8, %xmm0
vmovaps 0x70(%rbp,%r15), %xmm10
vmovaps 0x50(%rbp,%r15), %xmm7
vmulps %xmm7, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x60(%rbp,%r15), %xmm11
vmulps %xmm7, %xmm11, %xmm1
vmovaps (%rbp,%r15), %xmm3
vmovaps 0x10(%rbp,%r15), %xmm13
vmovaps 0x20(%rbp,%r15), %xmm0
vmovaps 0x30(%rbp,%r15), %xmm9
vmulps %xmm9, %xmm8, %xmm2
vsubps %xmm1, %xmm2, %xmm6
vmulps %xmm9, %xmm10, %xmm2
vmulps %xmm5, %xmm11, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vbroadcastss (%rbx,%r14,4), %xmm12
vsubps %xmm12, %xmm3, %xmm2
vbroadcastss 0x10(%rbx,%r14,4), %xmm12
vsubps %xmm12, %xmm13, %xmm3
vbroadcastss 0x20(%rbx,%r14,4), %xmm12
vsubps %xmm12, %xmm0, %xmm1
vbroadcastss 0x50(%rbx,%r14,4), %xmm12
vbroadcastss 0x60(%rbx,%r14,4), %xmm13
vmulps %xmm1, %xmm12, %xmm14
vmulps %xmm3, %xmm13, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x40(%rbx,%r14,4), %xmm15
vmulps %xmm2, %xmm13, %xmm0
vmovaps %xmm1, 0xe0(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm3, 0xf0(%rsp)
vmulps %xmm3, %xmm15, %xmm1
vmovaps %xmm2, 0x100(%rsp)
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm4, 0x30(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vmovaps 0x10(%rsp), %xmm13
vmovaps %xmm6, 0x40(%rsp)
vmulps %xmm6, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm15, %xmm13, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm8, %xmm8
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm8, %xmm8
vmulps %xmm14, %xmm11, %xmm10
vaddps %xmm8, %xmm10, %xmm8
vmulps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm5, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1ce1d05(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm2, %xmm10
vxorps %xmm8, %xmm10, %xmm7
vmulps %xmm14, %xmm9, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm10, %xmm8
vxorps %xmm9, %xmm9, %xmm9
vcmpnltps %xmm9, %xmm7, %xmm0
vcmpnltps %xmm9, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1ce1cd5(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm5
vcmpneqps %xmm2, %xmm9, %xmm1
vandps %xmm1, %xmm0, %xmm9
vaddps %xmm7, %xmm8, %xmm0
vcmpleps %xmm5, %xmm0, %xmm11
vtestps %xmm11, %xmm9
jne 0x23f222
incq %r12
cmpq %rax, %r12
setb 0xf(%rsp)
jne 0x23f07b
jmp 0x23f752
vandps %xmm11, %xmm9, %xmm9
vmovaps 0xe0(%rsp), %xmm0
vmulps 0x30(%rsp), %xmm0, %xmm0
vmovaps 0xf0(%rsp), %xmm1
vmulps 0x40(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x100(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm10, %xmm3
vbroadcastss 0x30(%rbx,%r14,4), %xmm0
vmulps %xmm0, %xmm5, %xmm0
vcmpltps %xmm3, %xmm0, %xmm0
vbroadcastss 0x80(%rbx,%r14,4), %xmm1
vmulps %xmm1, %xmm5, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vandps %xmm1, %xmm0, %xmm4
vtestps %xmm9, %xmm4
je 0x23f20c
addq %rbp, %r15
vandps %xmm4, %xmm9, %xmm0
vmovaps %xmm7, 0x190(%rsp)
vmovaps %xmm8, 0x1a0(%rsp)
vmovaps %xmm3, 0x1b0(%rsp)
vmovaps %xmm5, 0x1c0(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm13, 0x220(%rsp)
vmovaps 0x40(%rsp), %xmm1
vmovaps %xmm1, 0x230(%rsp)
vmovaps 0x30(%rsp), %xmm1
vmovaps %xmm1, 0x240(%rsp)
movq %rax, 0xe0(%rsp)
movq 0xac8(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x100(%rsp)
vrcpps %xmm5, %xmm1
vmulps %xmm1, %xmm5, %xmm2
vbroadcastss 0x1cad405(%rip), %xmm3 # 0x1eec714
vsubps %xmm2, %xmm3, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps 0x1b0(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x210(%rsp)
vmulps 0x190(%rsp), %xmm1, %xmm2
vmovaps %xmm2, 0x1f0(%rsp)
vmulps 0x1a0(%rsp), %xmm1, %xmm1
vmovaps %xmm1, 0x200(%rsp)
vmovmskps %xmm0, %eax
movq %rax, 0x10(%rsp)
movq %rcx, 0x20(%rsp)
movq 0x10(%rsp), %rax
bsfq %rax, %rax
movq %rax, 0x30(%rsp)
movl 0x90(%r15,%rax,4), %eax
movq 0x100(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq %rax, 0xf0(%rsp)
movq (%rcx,%rax,8), %rax
movl 0x90(%rbx,%r14,4), %ecx
movq %rax, 0x40(%rsp)
testl %ecx, 0x34(%rax)
je 0x23f414
movq 0xac8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
vmovaps 0xd0(%rsp), %xmm3
vmovaps 0xc0(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm6
vmovaps 0x90(%rsp), %xmm7
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x60(%rsp), %xmm10
jne 0x23f491
movq 0x40(%rsp), %rax
cmpq $0x0, 0x48(%rax)
jne 0x23f491
xorl %eax, %eax
movq 0x20(%rsp), %rcx
jmp 0x23f470
movq 0x10(%rsp), %rax
movq 0x30(%rsp), %rcx
btcq %rcx, %rax
movq %rax, 0x10(%rsp)
movb $0x1, %al
vmovaps 0xd0(%rsp), %xmm3
vmovaps 0xc0(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm6
vmovaps 0x90(%rsp), %xmm7
vmovaps 0x80(%rsp), %xmm8
movq 0x20(%rsp), %rcx
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x60(%rsp), %xmm10
testb %al, %al
je 0x23f7b4
cmpq $0x0, 0x10(%rsp)
movq 0xe0(%rsp), %rax
jne 0x23f35f
jmp 0x23f20c
movq %r11, 0x128(%rsp)
movq %r10, 0x130(%rsp)
movq %r9, 0x138(%rsp)
movq %r8, 0x140(%rsp)
movq %rdi, 0x148(%rsp)
vmovss 0x80(%rbx,%r14,4), %xmm0
vmovss %xmm0, 0x2c(%rsp)
movq 0x30(%rsp), %rax
vmovss 0x210(%rsp,%rax,4), %xmm0
vbroadcastss 0x1f0(%rsp,%rax,4), %xmm1
vbroadcastss 0x200(%rsp,%rax,4), %xmm2
vmovss %xmm0, 0x80(%rbx,%r14,4)
movq 0xac8(%rsp), %rcx
movq 0x8(%rcx), %rcx
vmovss 0xf0(%rsp), %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vbroadcastss 0xa0(%r15,%rax,4), %xmm3
vbroadcastss 0x220(%rsp,%rax,4), %xmm4
vbroadcastss 0x230(%rsp,%rax,4), %xmm5
vbroadcastss 0x240(%rsp,%rax,4), %xmm6
vmovaps %xmm4, 0x250(%rsp)
vmovaps %xmm5, 0x260(%rsp)
vmovaps %xmm6, 0x270(%rsp)
vmovaps %xmm1, 0x280(%rsp)
vmovaps %xmm2, 0x290(%rsp)
vmovaps %xmm3, 0x2a0(%rsp)
vmovaps %xmm0, 0x2b0(%rsp)
leaq 0x2c0(%rsp), %rax
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rax)
vbroadcastss (%rcx), %xmm0
vmovaps %xmm0, 0x2c0(%rsp)
vbroadcastss 0x4(%rcx), %xmm0
vmovaps %xmm0, 0x2d0(%rsp)
movq 0x150(%rsp), %rax
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x110(%rsp)
leaq 0x110(%rsp), %rax
movq %rax, 0x160(%rsp)
movq 0x40(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0x168(%rsp)
movq %rcx, 0x170(%rsp)
movq %rbx, 0x178(%rsp)
leaq 0x250(%rsp), %rax
movq %rax, 0x180(%rsp)
movl $0x4, 0x188(%rsp)
movq 0x48(%rdx), %rax
testq %rax, %rax
je 0x23f61b
leaq 0x160(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x110(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x23f697
movq 0xac8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x23f65d
testb $0x2, (%rcx)
jne 0x23f650
movq 0x40(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x23f65d
leaq 0x160(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0x110(%rsp), %xmm0, %xmm1
vpxor 0x1cac7ae(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x178(%rsp), %rax
vbroadcastss 0x1cad501(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x23f6a7
vpcmpeqd 0x1cac371(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1cac779(%rip), %xmm0, %xmm0 # 0x1eebe20
vpslld $0x1f, %xmm0, %xmm0
vtestps %xmm0, %xmm0
sete %al
jne 0x23f6d9
vmovss 0x2c(%rsp), %xmm0
vmovss %xmm0, 0x80(%rbx,%r14,4)
movq 0x10(%rsp), %rcx
movq 0x30(%rsp), %rdx
btcq %rdx, %rcx
movq %rcx, 0x10(%rsp)
vmovaps 0xd0(%rsp), %xmm3
vmovaps 0xc0(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm6
vmovaps 0x90(%rsp), %xmm7
movq 0x148(%rsp), %rdi
vmovaps 0x80(%rsp), %xmm8
movq 0x140(%rsp), %r8
movq 0x138(%rsp), %r9
movq 0x130(%rsp), %r10
movq 0x128(%rsp), %r11
movq 0x20(%rsp), %rcx
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x60(%rsp), %xmm10
pushq $0x6
popq %rsi
xorl %edx, %edx
jmp 0x23f470
vmovaps 0xd0(%rsp), %xmm3
vmovaps 0xc0(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm6
vmovaps 0x90(%rsp), %xmm7
vmovaps 0x80(%rsp), %xmm8
movq 0x58(%rsp), %r12
vmovaps 0x70(%rsp), %xmm9
vmovaps 0x60(%rsp), %xmm10
leaq 0x2e0(%rsp), %r15
testb $0x3, %dl
movq 0x158(%rsp), %rax
je 0x23ef25
jmp 0x23f7d9
testb $0x1, 0xf(%rsp)
movq 0x58(%rsp), %r12
leaq 0x2e0(%rsp), %r15
je 0x23f7a1
movl $0xff800000, 0x80(%rbx,%r14,4) # imm = 0xFF800000
pushq $0x1
popq %rdx
jmp 0x23f7a1
cmpq %r15, %rax
setne %al
addq $0xa88, %rsp # imm = 0xA88
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::TriangleMIntersectorKMoeller<4, 4, false>>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::avx::MoellerTrumboreIntersectorK<4, 4>&, embree::RayHitK<4>&, embree::avx::TravRayK<4, false> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x10c8, %rsp # imm = 0x10C8
movq 0x1100(%rsp), %rax
leaq 0x190(%rsp), %rsi
movq %rdx, -0x10(%rsi)
andl $0x0, -0x8(%rsi)
vbroadcastss (%rax,%rcx,4), %xmm5
vbroadcastss 0x10(%rax,%rcx,4), %xmm7
vbroadcastss 0x20(%rax,%rcx,4), %xmm8
vbroadcastss 0x60(%rax,%rcx,4), %xmm9
vbroadcastss 0x70(%rax,%rcx,4), %xmm10
vbroadcastss 0x80(%rax,%rcx,4), %xmm11
movslq 0x90(%rax,%rcx,4), %rdx
movslq 0xa0(%rax,%rcx,4), %rdi
movslq 0xb0(%rax,%rcx,4), %r8
movq %rdx, %r10
xorq $0x10, %r10
movq %rdi, %r11
xorq $0x10, %r11
movq %r8, %rbx
xorq $0x10, %rbx
vbroadcastss 0xd0(%rax,%rcx,4), %xmm12
vbroadcastss 0xc0(%rax,%rcx,4), %xmm13
vmovaps %xmm5, 0x140(%rsp)
vmovaps %xmm7, 0x130(%rsp)
vmovaps %xmm8, 0x120(%rsp)
vmovaps %xmm9, 0x110(%rsp)
vmovaps %xmm10, 0x100(%rsp)
vmovaps %xmm11, 0xf0(%rsp)
vmovaps %xmm13, 0xe0(%rsp)
leaq 0x180(%rsp), %rax
cmpq %rax, %rsi
je 0x240ede
vmovss -0x8(%rsi), %xmm0
addq $-0x10, %rsi
vucomiss 0x80(%r9,%rcx,4), %xmm0
ja 0x2406ed
movq (%rsi), %r13
testb $0x8, %r13b
jne 0x2407ae
vmovaps 0x20(%r13,%rdx), %xmm0
vsubps %xmm5, %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm0
vmovaps 0x20(%r13,%rdi), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%r8), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vpmaxsd %xmm13, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%r13,%r10), %xmm1
vsubps %xmm5, %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vmovaps 0x20(%r13,%r11), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r13,%rbx), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm2
vpminsd %xmm12, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm1
vmovmskps %xmm1, %eax
xorb $0xf, %al
movzbl %al, %r12d
vmovdqa %xmm0, 0x20(%rsp)
testb $0x8, %r13b
jne 0x2407eb
testq %r12, %r12
je 0x2407f2
andq $-0x10, %r13
bsfq %r12, %r14
leaq -0x1(%r12), %rbp
xorl %eax, %eax
movq (%r13,%r14,8), %r15
prefetcht0 (%r15)
prefetcht0 0x40(%r15)
andq %r12, %rbp
jne 0x2407f9
movq %r15, %r13
testl %eax, %eax
je 0x240716
jmp 0x2409d1
movl $0x6, %eax
jmp 0x2407de
movl $0x4, %eax
jmp 0x2407de
movl 0x20(%rsp,%r14,4), %r14d
movq %r14, -0x10(%rsp)
bsfq %rbp, %r14
movq %r14, -0x80(%rsp)
leaq -0x1(%rbp), %r14
movq %r14, (%rsp)
movq -0x80(%rsp), %r14
movq (%r13,%r14,8), %r14
prefetcht0 (%r14)
movq %r14, -0x50(%rsp)
prefetcht0 0x40(%r14)
movq -0x80(%rsp), %r14
movl 0x20(%rsp,%r14,4), %r14d
movq %r14, -0x80(%rsp)
movq (%rsp), %r14
andq %rbp, %r14
jne 0x24087e
leaq 0x10(%rsi), %r14
movq -0x10(%rsp), %r13
movq -0x80(%rsp), %rbp
cmpl %ebp, %r13d
jae 0x24086a
movq -0x50(%rsp), %r13
movq %r13, (%rsi)
movl %ebp, 0x8(%rsi)
movq %r14, %rsi
jmp 0x2407db
movq %r15, (%rsi)
movl %r13d, 0x8(%rsi)
movq %r14, %rsi
movq -0x50(%rsp), %r13
jmp 0x2407de
vmovq %r15, %xmm0
vmovd -0x10(%rsp), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq -0x50(%rsp), %xmm1
vmovd -0x80(%rsp), %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
movq %r14, %r15
bsfq %r14, %r14
movq %r14, -0x80(%rsp)
leaq -0x1(%r15), %r14
movq -0x80(%rsp), %rbp
movq (%r13,%rbp,8), %rbp
prefetcht0 (%rbp)
prefetcht0 0x40(%rbp)
vmovq %rbp, %xmm2
movq -0x80(%rsp), %rbp
vmovd 0x20(%rsp,%rbp,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
vpcmpgtd %xmm0, %xmm1, %xmm3
andq %r15, %r14
jne 0x24092e
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%rsi)
vmovaps %xmm4, 0x10(%rsi)
vmovq %xmm3, %r13
addq $0x20, %rsi
jmp 0x2407de
bsfq %r14, %r14
movq (%r13,%r14,8), %r15
prefetcht0 (%r15)
prefetcht0 0x40(%r15)
vmovq %r15, %xmm4
vmovd 0x20(%rsp,%r14,4), %xmm6
vpunpcklqdq %xmm6, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm6[0]
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm6
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm4, %xmm3
vblendvps %xmm1, %xmm4, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm6, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm6, %xmm3, %xmm2
vblendvps %xmm1, %xmm3, %xmm6, %xmm1
vpcmpgtd %xmm1, %xmm4, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm4, %xmm6
vblendvps %xmm3, %xmm4, %xmm1, %xmm1
vmovaps %xmm0, (%rsi)
vmovaps %xmm1, 0x10(%rsi)
vmovaps %xmm6, 0x20(%rsi)
vmovq %xmm2, %r13
addq $0x30, %rsi
jmp 0x2407de
cmpl $0x6, %eax
jne 0x2406ed
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
je 0x240e90
andq $-0x10, %r13
vbroadcastss (%r9,%rcx,4), %xmm0
vmovaps %xmm0, -0x50(%rsp)
vbroadcastss 0x10(%r9,%rcx,4), %xmm0
vmovaps %xmm0, (%rsp)
vbroadcastss 0x20(%r9,%rcx,4), %xmm0
vmovaps %xmm0, -0x10(%rsp)
vbroadcastss 0x40(%r9,%rcx,4), %xmm0
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss 0x50(%r9,%rcx,4), %xmm0
vmovaps %xmm0, 0x160(%rsp)
vbroadcastss 0x60(%r9,%rcx,4), %xmm0
vmovaps %xmm0, 0x150(%rsp)
xorl %ebp, %ebp
imulq $0xb0, %rbp, %r15
vmovaps 0x80(%r13,%r15), %xmm1
vmovaps 0x40(%r13,%r15), %xmm14
vmulps %xmm1, %xmm14, %xmm3
vmovaps 0x70(%r13,%r15), %xmm2
vmovaps 0x50(%r13,%r15), %xmm6
vmulps %xmm6, %xmm2, %xmm4
vsubps %xmm3, %xmm4, %xmm12
vmovaps 0x60(%r13,%r15), %xmm3
vmulps %xmm3, %xmm6, %xmm8
vmovaps (%r13,%r15), %xmm4
vsubps -0x50(%rsp), %xmm4, %xmm11
vmovaps 0x10(%r13,%r15), %xmm4
vsubps (%rsp), %xmm4, %xmm0
vmovaps 0x20(%r13,%r15), %xmm4
vsubps -0x10(%rsp), %xmm4, %xmm10
vmovaps 0x30(%r13,%r15), %xmm4
vmulps %xmm4, %xmm1, %xmm9
vsubps %xmm8, %xmm9, %xmm5
vmovaps %xmm5, -0x80(%rsp)
vmulps %xmm4, %xmm2, %xmm9
vmulps %xmm3, %xmm14, %xmm5
vsubps %xmm9, %xmm5, %xmm13
vmovaps 0x160(%rsp), %xmm7
vmulps %xmm7, %xmm10, %xmm5
vmovaps 0x150(%rsp), %xmm9
vmulps %xmm0, %xmm9, %xmm15
vsubps %xmm5, %xmm15, %xmm5
vmulps %xmm9, %xmm11, %xmm15
vmovaps 0x170(%rsp), %xmm8
vmovaps %xmm10, -0x40(%rsp)
vmulps %xmm10, %xmm8, %xmm10
vsubps %xmm15, %xmm10, %xmm10
vmovaps %xmm0, -0x30(%rsp)
vmulps %xmm0, %xmm8, %xmm15
vmovaps %xmm11, -0x20(%rsp)
vmulps %xmm7, %xmm11, %xmm11
vsubps %xmm15, %xmm11, %xmm11
vmovaps %xmm13, -0x60(%rsp)
vmulps %xmm9, %xmm13, %xmm15
vmovaps -0x80(%rsp), %xmm9
vmulps %xmm7, %xmm9, %xmm7
vaddps %xmm7, %xmm15, %xmm7
vmulps %xmm8, %xmm12, %xmm15
vaddps %xmm7, %xmm15, %xmm7
vmulps %xmm1, %xmm11, %xmm1
vmulps %xmm2, %xmm10, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vmulps %xmm5, %xmm3, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm6, %xmm11, %xmm0
vmulps %xmm10, %xmm14, %xmm2
vaddps %xmm2, %xmm0, %xmm3
vbroadcastss 0x1ce0354(%rip), %xmm0 # 0x1f20ec0
vandps %xmm0, %xmm7, %xmm2
vxorps %xmm1, %xmm2, %xmm0
vmulps %xmm5, %xmm4, %xmm1
vaddps %xmm3, %xmm1, %xmm1
vxorps %xmm1, %xmm2, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vcmpnltps %xmm5, %xmm0, %xmm3
vcmpnltps %xmm5, %xmm1, %xmm4
vandps %xmm4, %xmm3, %xmm3
vbroadcastss 0x1ce0329(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm7, %xmm14
vcmpneqps %xmm5, %xmm7, %xmm4
vandps %xmm4, %xmm3, %xmm3
vaddps %xmm1, %xmm0, %xmm4
vcmpleps %xmm14, %xmm4, %xmm4
vtestps %xmm4, %xmm3
jne 0x240bca
incq %rbp
cmpq %rax, %rbp
jne 0x240a45
jmp 0x240e90
vandps %xmm4, %xmm3, %xmm3
vmovaps -0x40(%rsp), %xmm4
vmulps -0x60(%rsp), %xmm4, %xmm4
vmulps -0x30(%rsp), %xmm9, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulps -0x20(%rsp), %xmm12, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vxorps %xmm4, %xmm2, %xmm2
vbroadcastss 0x30(%r9,%rcx,4), %xmm4
vmulps %xmm4, %xmm14, %xmm4
vcmpltps %xmm2, %xmm4, %xmm4
vbroadcastss 0x80(%r9,%rcx,4), %xmm5
vmulps %xmm5, %xmm14, %xmm5
vcmpleps %xmm5, %xmm2, %xmm5
vandps %xmm5, %xmm4, %xmm4
vtestps %xmm3, %xmm4
je 0x240bb9
movq %rsi, -0x40(%rsp)
vandps %xmm3, %xmm4, %xmm6
vmovaps %xmm0, 0x20(%rsp)
vmovaps %xmm1, 0x30(%rsp)
vmovaps %xmm2, 0x40(%rsp)
vmovaps %xmm14, 0x50(%rsp)
vmovaps %xmm6, 0x70(%rsp)
vmovaps %xmm12, 0xb0(%rsp)
vmovaps %xmm9, 0xc0(%rsp)
vmovaps -0x60(%rsp), %xmm0
vmovaps %xmm0, 0xd0(%rsp)
movq 0x1108(%rsp), %r14
movq (%r14), %rsi
vmovaps %xmm6, 0x10(%rsp)
vrcpps %xmm14, %xmm0
vmulps %xmm0, %xmm14, %xmm1
vbroadcastss 0x1caba89(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x40(%rsp), %xmm0, %xmm7
vmovaps %xmm7, 0xa0(%rsp)
vmulps 0x20(%rsp), %xmm0, %xmm1
vmovaps %xmm1, 0x80(%rsp)
vmulps 0x30(%rsp), %xmm0, %xmm0
vbroadcastss 0x1caad5c(%rip), %xmm4 # 0x1eeba20
vblendvps %xmm6, %xmm7, %xmm4, %xmm1
vmovaps %xmm0, 0x90(%rsp)
vshufps $0xb1, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[1,0,3,2]
vminps %xmm1, %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,0]
vminps %xmm0, %xmm2, %xmm0
vcmpeqps %xmm0, %xmm1, %xmm1
vtestps %xmm6, %xmm1
vpcmpeqd %xmm0, %xmm0, %xmm0
je 0x240cf9
vmovaps %xmm1, %xmm0
addq %r13, %r15
vandps %xmm0, %xmm6, %xmm0
vmovmskps %xmm0, %r14d
bsfq %r14, %r14
movq 0x1e8(%rsi), %rsi
movq %rsi, -0x60(%rsp)
movq %r9, -0x20(%rsp)
movq %rcx, -0x30(%rsp)
movl 0x90(%r9,%rcx,4), %esi
movq %r14, -0x80(%rsp)
movl 0x90(%r15,%r14,4), %ecx
movq -0x60(%rsp), %r9
movq (%r9,%rcx,8), %r14
movl %esi, -0x64(%rsp)
testl %esi, 0x34(%r14)
jne 0x240dbd
movq -0x80(%rsp), %r14
andl $0x0, 0x10(%rsp,%r14,4)
vmovaps 0x10(%rsp), %xmm0
vtestps %xmm0, %xmm0
movq -0x20(%rsp), %r9
movq -0x30(%rsp), %rcx
movq -0x40(%rsp), %rsi
je 0x240bb9
vblendvps %xmm0, %xmm7, %xmm4, %xmm1
vshufps $0xb1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0,3,2]
vminps %xmm1, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vminps %xmm2, %xmm3, %xmm2
vcmpeqps %xmm2, %xmm1, %xmm1
vtestps %xmm0, %xmm1
je 0x240d99
vandps %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %r14d
bsfq %r14, %r14
movq %r14, -0x80(%rsp)
movl 0x90(%r15,%r14,4), %ecx
movq -0x60(%rsp), %r14
movq (%r14,%rcx,8), %r14
movl -0x64(%rsp), %esi
jmp 0x240d40
movq -0x80(%rsp), %r14
vmovss 0x80(%rsp,%r14,4), %xmm0
vmovss 0x90(%rsp,%r14,4), %xmm1
vmovss 0xa0(%rsp,%r14,4), %xmm2
movq -0x20(%rsp), %r9
movq %rcx, %rsi
movq -0x30(%rsp), %rcx
vmovss %xmm2, 0x80(%r9,%rcx,4)
vmovss 0xb0(%rsp,%r14,4), %xmm2
vmovss %xmm2, 0xc0(%r9,%rcx,4)
vmovss 0xc0(%rsp,%r14,4), %xmm2
vmovss %xmm2, 0xd0(%r9,%rcx,4)
vmovss 0xd0(%rsp,%r14,4), %xmm2
vmovss %xmm2, 0xe0(%r9,%rcx,4)
vmovss %xmm0, 0xf0(%r9,%rcx,4)
vmovss %xmm1, 0x100(%r9,%rcx,4)
movl 0xa0(%r15,%r14,4), %r14d
movl %r14d, 0x110(%r9,%rcx,4)
movl %esi, 0x120(%r9,%rcx,4)
movq 0x1108(%rsp), %r15
movq 0x8(%r15), %r14
movl (%r14), %r14d
movl %r14d, 0x130(%r9,%rcx,4)
movq 0x8(%r15), %r14
movl 0x4(%r14), %r14d
movl %r14d, 0x140(%r9,%rcx,4)
movq -0x40(%rsp), %rsi
jmp 0x240bb9
vbroadcastss 0x80(%r9,%rcx,4), %xmm12
vmovaps 0x140(%rsp), %xmm5
vmovaps 0x130(%rsp), %xmm7
vmovaps 0x120(%rsp), %xmm8
vmovaps 0x110(%rsp), %xmm9
vmovaps 0x100(%rsp), %xmm10
vmovaps 0xf0(%rsp), %xmm11
vmovaps 0xe0(%rsp), %xmm13
jmp 0x2406ed
addq $0x10c8, %rsp # imm = 0x10C8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::TriangleMIntersectorKMoeller<4, 4, false>>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::avx::MoellerTrumboreIntersectorK<4, 4>&, embree::RayK<4>&, embree::avx::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x898, %rsp # imm = 0x898
movq %r9, -0x78(%rsp)
movq 0x8d0(%rsp), %rax
leaq 0xf8(%rsp), %r14
movq %rdx, -0x8(%r14)
vbroadcastss (%rax,%rcx,4), %xmm0
vbroadcastss 0x10(%rax,%rcx,4), %xmm1
vbroadcastss 0x20(%rax,%rcx,4), %xmm2
vbroadcastss 0x60(%rax,%rcx,4), %xmm3
vbroadcastss 0x70(%rax,%rcx,4), %xmm4
vbroadcastss 0x80(%rax,%rcx,4), %xmm5
movslq 0x90(%rax,%rcx,4), %r9
movslq 0xa0(%rax,%rcx,4), %rdi
movslq 0xb0(%rax,%rcx,4), %r8
movq %r9, %r10
xorq $0x10, %r10
movq %rdi, %r11
xorq $0x10, %r11
movq %r8, %rbx
xorq $0x10, %rbx
vbroadcastss 0xc0(%rax,%rcx,4), %xmm14
vbroadcastss 0xd0(%rax,%rcx,4), %xmm7
movq %r14, %rdx
leaq 0xf0(%rsp), %rax
cmpq %rax, %r14
je 0x2420aa
leaq -0x8(%rdx), %r14
movq %rdx, -0x70(%rsp)
movq -0x8(%rdx), %r15
testb $0x8, %r15b
jne 0x241c51
vmovaps 0x20(%r15,%r9), %xmm8
vsubps %xmm0, %xmm8, %xmm8
vmulps %xmm3, %xmm8, %xmm8
vmovaps 0x20(%r15,%rdi), %xmm9
vsubps %xmm1, %xmm9, %xmm9
vmulps %xmm4, %xmm9, %xmm9
vpmaxsd %xmm9, %xmm8, %xmm8
vmovaps 0x20(%r15,%r8), %xmm9
vsubps %xmm2, %xmm9, %xmm9
vmulps %xmm5, %xmm9, %xmm9
vpmaxsd %xmm14, %xmm9, %xmm9
vpmaxsd %xmm9, %xmm8, %xmm8
vmovaps 0x20(%r15,%r10), %xmm9
vsubps %xmm0, %xmm9, %xmm9
vmulps %xmm3, %xmm9, %xmm9
vmovaps 0x20(%r15,%r11), %xmm10
vsubps %xmm1, %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm10
vpminsd %xmm10, %xmm9, %xmm9
vmovaps 0x20(%r15,%rbx), %xmm10
vsubps %xmm2, %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm10
vpminsd %xmm7, %xmm10, %xmm10
vpminsd %xmm10, %xmm9, %xmm9
vpcmpgtd %xmm9, %xmm8, %xmm8
vmovmskps %xmm8, %eax
xorb $0xf, %al
movzbl %al, %ebp
testb $0x8, %r15b
jne 0x241c87
testq %rbp, %rbp
je 0x241c8e
andq $-0x10, %r15
bsfq %rbp, %rsi
leaq -0x1(%rbp), %r12
xorl %eax, %eax
movq (%r15,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
andq %rbp, %r12
jne 0x241c95
movq %rsi, %r15
testl %eax, %eax
je 0x241bc0
jmp 0x241cd3
movl $0x6, %eax
jmp 0x241c7d
movl $0x4, %eax
jmp 0x241c7d
movq %rsi, (%r14)
addq $0x8, %r14
movq %r14, %rdx
bsfq %r12, %r14
leaq -0x1(%r12), %rsi
movq (%r15,%r14,8), %r14
prefetcht0 (%r14)
prefetcht0 0x40(%r14)
andq %r12, %rsi
je 0x241ccb
movq %r14, (%rdx)
addq $0x8, %rdx
bsfq %rsi, %r14
leaq -0x1(%rsi), %r12
jmp 0x241ca8
movq %r14, %r15
movq %rdx, %r14
jmp 0x241c7d
cmpl $0x6, %eax
jne 0x24209d
movl %r15d, %esi
andl $0xf, %esi
addq $-0x8, %rsi
setne %dl
je 0x242081
andq $-0x10, %r15
movq -0x78(%rsp), %rax
vbroadcastss (%rax,%rcx,4), %xmm6
vmovaps %xmm6, 0xc0(%rsp)
vbroadcastss 0x10(%rax,%rcx,4), %xmm9
vbroadcastss 0x20(%rax,%rcx,4), %xmm12
vbroadcastss 0x40(%rax,%rcx,4), %xmm15
vbroadcastss 0x50(%rax,%rcx,4), %xmm11
vbroadcastss 0x60(%rax,%rcx,4), %xmm6
xorl %r12d, %r12d
movq %r14, -0x68(%rsp)
movq %rsi, -0x20(%rsp)
vmovaps %xmm9, -0x40(%rsp)
vmovaps %xmm12, -0x50(%rsp)
vmovaps %xmm15, 0x60(%rsp)
vmovaps %xmm11, 0x50(%rsp)
vmovaps %xmm6, 0xb0(%rsp)
imulq $0xb0, %r12, %r14
vmovaps 0x80(%r15,%r14), %xmm6
vmovaps %xmm6, 0x10(%rsp)
vmovaps 0x40(%r15,%r14), %xmm13
vmovaps %xmm13, 0x30(%rsp)
vmulps %xmm6, %xmm13, %xmm10
vmovaps 0x70(%r15,%r14), %xmm8
vmovaps %xmm8, 0x20(%rsp)
vmovaps 0x50(%r15,%r14), %xmm6
vmovaps %xmm6, 0xe0(%rsp)
vmovaps %xmm12, %xmm13
vmulps %xmm6, %xmm8, %xmm12
vsubps %xmm10, %xmm12, %xmm8
vmovaps %xmm8, -0x10(%rsp)
vmovaps 0x60(%r15,%r14), %xmm8
vmovaps %xmm8, (%rsp)
vmulps %xmm6, %xmm8, %xmm6
vmovaps %xmm6, -0x60(%rsp)
vmovaps (%r15,%r14), %xmm10
vsubps 0xc0(%rsp), %xmm10, %xmm12
vmovaps 0x10(%r15,%r14), %xmm10
vmovaps %xmm15, %xmm6
vsubps %xmm9, %xmm10, %xmm9
vmovaps 0x20(%r15,%r14), %xmm10
vsubps %xmm13, %xmm10, %xmm8
vmovaps 0x30(%r15,%r14), %xmm10
vmulps 0x10(%rsp), %xmm10, %xmm13
vmovaps %xmm10, 0xd0(%rsp)
vsubps -0x60(%rsp), %xmm13, %xmm13
vmovaps %xmm13, -0x60(%rsp)
vmulps 0x20(%rsp), %xmm10, %xmm15
vmovaps (%rsp), %xmm10
vmulps 0x30(%rsp), %xmm10, %xmm13
vsubps %xmm15, %xmm13, %xmm10
vmulps %xmm8, %xmm11, %xmm13
vmovaps %xmm13, -0x30(%rsp)
vmovaps 0xb0(%rsp), %xmm15
vmulps %xmm9, %xmm15, %xmm13
vsubps -0x30(%rsp), %xmm13, %xmm13
vmovaps %xmm13, -0x30(%rsp)
vmulps %xmm15, %xmm12, %xmm13
vmovaps %xmm8, 0x80(%rsp)
vmulps %xmm6, %xmm8, %xmm8
vsubps %xmm13, %xmm8, %xmm8
vmovaps %xmm9, 0x90(%rsp)
vmulps %xmm6, %xmm9, %xmm13
vmovaps %xmm12, 0xa0(%rsp)
vmulps %xmm12, %xmm11, %xmm12
vsubps %xmm13, %xmm12, %xmm12
vmovaps %xmm10, 0x70(%rsp)
vmulps %xmm15, %xmm10, %xmm13
vmulps -0x60(%rsp), %xmm11, %xmm9
vaddps %xmm13, %xmm9, %xmm9
vmulps -0x10(%rsp), %xmm6, %xmm13
vaddps %xmm9, %xmm13, %xmm13
vmulps 0x10(%rsp), %xmm12, %xmm9
vmulps 0x20(%rsp), %xmm8, %xmm11
vaddps %xmm11, %xmm9, %xmm9
vmovaps -0x30(%rsp), %xmm6
vmulps (%rsp), %xmm6, %xmm11
vaddps %xmm9, %xmm11, %xmm9
vmulps 0xe0(%rsp), %xmm12, %xmm11
vmulps 0x30(%rsp), %xmm8, %xmm8
vaddps %xmm8, %xmm11, %xmm11
vbroadcastss 0x1cdefdb(%rip), %xmm8 # 0x1f20ec0
vandps %xmm8, %xmm13, %xmm8
vxorps %xmm9, %xmm8, %xmm12
vmulps 0xd0(%rsp), %xmm6, %xmm9
vaddps %xmm11, %xmm9, %xmm9
vxorps %xmm9, %xmm8, %xmm10
vxorps %xmm6, %xmm6, %xmm6
vcmpnltps %xmm6, %xmm12, %xmm9
vcmpnltps %xmm6, %xmm10, %xmm11
vandps %xmm11, %xmm9, %xmm9
vcmpneqps %xmm6, %xmm13, %xmm11
vandps %xmm11, %xmm9, %xmm9
vaddps %xmm10, %xmm12, %xmm11
vbroadcastss 0x1cdef97(%rip), %xmm10 # 0x1f20ec4
vandps %xmm10, %xmm13, %xmm10
vcmpleps %xmm10, %xmm11, %xmm11
vtestps %xmm11, %xmm9
movb %dl, -0x79(%rsp)
movq %r12, -0x18(%rsp)
jne 0x241fe7
xorl %esi, %esi
vmovdqa -0x40(%rsp), %xmm9
vmovaps -0x50(%rsp), %xmm12
testb %sil, %sil
je 0x241fb8
addq %r15, %r14
movq 0x8d8(%rsp), %rax
movq (%rax), %r12
vmovaps 0x40(%rsp), %xmm8
vmovmskps %xmm8, %esi
vmovaps -0x50(%rsp), %xmm12
bsfq %rsi, %r13
movl 0x90(%r14,%r13,4), %eax
movq 0x1e8(%r12), %rdx
movq (%rdx,%rax,8), %rdx
movq -0x78(%rsp), %rax
movl 0x90(%rax,%rcx,4), %eax
andl 0x34(%rdx), %eax
jne 0x241fab
btcq %r13, %rsi
testl %eax, %eax
jne 0x242078
testq %rsi, %rsi
jne 0x241f7e
movq -0x18(%rsp), %r12
incq %r12
movq -0x20(%rsp), %rsi
cmpq %rsi, %r12
setb %dl
movq -0x68(%rsp), %r14
vmovaps 0x60(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm11
jne 0x241d58
jmp 0x242081
vandps %xmm11, %xmm9, %xmm9
vmovaps 0x70(%rsp), %xmm11
vmulps 0x80(%rsp), %xmm11, %xmm11
vmovaps -0x60(%rsp), %xmm12
vmulps 0x90(%rsp), %xmm12, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmovaps -0x10(%rsp), %xmm6
vmulps 0xa0(%rsp), %xmm6, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vxorps %xmm11, %xmm8, %xmm8
movq -0x78(%rsp), %rax
vbroadcastss 0x30(%rax,%rcx,4), %xmm11
vmulps %xmm11, %xmm10, %xmm11
vcmpltps %xmm8, %xmm11, %xmm11
vbroadcastss 0x80(%rax,%rcx,4), %xmm12
vmulps %xmm12, %xmm10, %xmm10
vcmpleps %xmm10, %xmm8, %xmm8
vandps %xmm8, %xmm11, %xmm8
vtestps %xmm9, %xmm8
setne %sil
je 0x241f4e
vandps %xmm9, %xmm8, %xmm8
vmovaps %xmm8, 0x40(%rsp)
jmp 0x241f4e
movq -0x68(%rsp), %r14
movb -0x79(%rsp), %dl
xorl %eax, %eax
testb $0x1, %dl
je 0x24209d
movq -0x78(%rsp), %rax
movl $0xff800000, 0x80(%rax,%rcx,4) # imm = 0xFF800000
movl $0x1, %eax
movq -0x70(%rsp), %rdx
testb $0x3, %al
je 0x241b9f
leaq 0xf0(%rsp), %rax
cmpq %rax, %rdx
setne %al
addq $0x898, %rsp # imm = 0x898
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::TriangleMiIntersectorKMoeller<4, 4, true>>, true>::intersect1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::avx::MoellerTrumboreIntersectorK<4, 4>&, embree::RayHitK<4>&, embree::avx::TravRayK<4, false> const&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayHitK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
StackItemT<NodeRef> stack[stackSizeSingle]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack + 1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack + stackSizeSingle;
stack[0].ptr = root;
stack[0].dist = neg_inf;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar[k]))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(This, pre, ray, k, context, prim, num, tray1, lazy_node);
tray1.tfar = ray.tfar[k];
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x1208, %rsp # imm = 0x1208
movq %r9, %rbx
movq %rcx, %r14
movq 0x1248(%rsp), %r10
movq 0x1240(%rsp), %rax
leaq 0x2d0(%rsp), %r11
movq %rdx, -0x10(%r11)
andl $0x0, -0x8(%r11)
vbroadcastss (%rax,%rcx,4), %xmm7
vbroadcastss 0x10(%rax,%rcx,4), %xmm8
vbroadcastss 0x20(%rax,%rcx,4), %xmm9
vbroadcastss 0x60(%rax,%rcx,4), %xmm10
vbroadcastss 0x70(%rax,%rcx,4), %xmm11
vbroadcastss 0x80(%rax,%rcx,4), %xmm12
movslq 0x90(%rax,%rcx,4), %r9
movslq 0xa0(%rax,%rcx,4), %r12
movslq 0xb0(%rax,%rcx,4), %rsi
movq %r9, %rdi
xorq $0x10, %rdi
movq %r12, %r15
xorq $0x10, %r15
movq %rsi, %rcx
xorq $0x10, %rcx
movq %rcx, 0xa8(%rsp)
vbroadcastss 0xd0(%rax,%r14,4), %xmm0
vbroadcastss 0xc0(%rax,%r14,4), %xmm13
pushq $0x1
popq %rax
movl %r14d, %ecx
shll %cl, %eax
cltq
shlq $0x4, %rax
addq 0x1ee143a(%rip), %rax # 0x2124730
movq %rax, 0x98(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
movq %r15, (%rsp)
vmovaps %xmm7, 0x210(%rsp)
vmovaps %xmm8, 0x200(%rsp)
vmovaps %xmm9, 0x1f0(%rsp)
vmovaps %xmm10, 0x1e0(%rsp)
vmovaps %xmm11, 0x1d0(%rsp)
vmovaps %xmm12, 0x1c0(%rsp)
movq %r12, 0xa0(%rsp)
vmovaps %xmm13, 0x1b0(%rsp)
leaq 0x2c0(%rsp), %rax
cmpq %rax, %r11
je 0x243e72
vmovss -0x8(%r11), %xmm1
addq $-0x10, %r11
vucomiss 0x80(%rbx,%r14,4), %xmm1
ja 0x243353
movq (%r11), %rbp
testb $0x8, %bpl
jne 0x24341f
vmovaps 0x20(%rbp,%r9), %xmm1
vsubps %xmm7, %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vmovaps 0x20(%rbp,%r12), %xmm2
vsubps %xmm8, %xmm2, %xmm2
vmulps %xmm2, %xmm11, %xmm2
vpmaxsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbp,%rsi), %xmm2
vsubps %xmm9, %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vpmaxsd %xmm13, %xmm2, %xmm2
vpmaxsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%rbp,%rdi), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmovaps 0x20(%rbp,%r15), %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
movq 0xa8(%rsp), %rax
vmovaps 0x20(%rbp,%rax), %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm3, %xmm12, %xmm3
vpminsd %xmm0, %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vpcmpgtd %xmm2, %xmm1, %xmm2
vmovmskps %xmm2, %eax
xorb $0xf, %al
movzbl %al, %r13d
vmovdqa %xmm1, 0xf0(%rsp)
testb $0x8, %bpl
jne 0x243459
testq %r13, %r13
je 0x24345d
andq $-0x10, %rbp
bsfq %r13, %rdx
leaq -0x1(%r13), %r8
xorl %eax, %eax
movq (%rbp,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %r13, %r8
jne 0x243462
movq %rcx, %rbp
testl %eax, %eax
je 0x24337d
jmp 0x243609
pushq $0x6
jmp 0x24345f
pushq $0x4
popq %rax
jmp 0x24344c
movq %r9, %r15
movl 0xf0(%rsp,%rdx,4), %esi
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%rbp,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
movl 0xf0(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x2434c5
leaq 0x10(%r11), %rdx
cmpl %r9d, %esi
jae 0x2434a5
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %rdx, %r11
movq %rcx, %rbp
jmp 0x2434b2
movq %rcx, (%r11)
movl %esi, 0x8(%r11)
movq %rdx, %r11
movq %rdi, %rbp
movq %r15, %r9
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
movq (%rsp), %r15
jmp 0x24344c
vmovq %rcx, %xmm1
vmovd %esi, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovq %rdi, %xmm2
vmovd %r9d, %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
bsfq %rdx, %rsi
leaq -0x1(%rdx), %rcx
movq (%rbp,%rsi,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
vmovq %rdi, %xmm3
vmovd 0xf0(%rsp,%rsi,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
vpcmpgtd %xmm1, %xmm2, %xmm4
andq %rdx, %rcx
jne 0x243563
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm2, %xmm5
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm5, %xmm3, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm5, %xmm3, %xmm4
vblendvps %xmm2, %xmm3, %xmm5, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm5
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%r11)
vmovaps %xmm5, 0x10(%r11)
vmovq %xmm4, %rbp
addq $0x20, %r11
jmp 0x2434b2
bsfq %rcx, %rcx
movq (%rbp,%rcx,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
vmovq %rdx, %xmm5
vmovd 0xf0(%rsp,%rcx,4), %xmm6
vpunpcklqdq %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0]
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm2, %xmm6
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm3, %xmm5, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm3, %xmm5, %xmm4
vblendvps %xmm2, %xmm5, %xmm3, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm5
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm6, %xmm4, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm6, %xmm4, %xmm3
vblendvps %xmm2, %xmm4, %xmm6, %xmm2
vpcmpgtd %xmm2, %xmm5, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm2, %xmm5, %xmm6
vblendvps %xmm4, %xmm5, %xmm2, %xmm2
vmovaps %xmm1, (%r11)
vmovaps %xmm2, 0x10(%r11)
vmovaps %xmm6, 0x20(%r11)
vmovq %xmm3, %rbp
addq $0x30, %r11
jmp 0x2434b2
cmpl $0x6, %eax
jne 0x243353
movl %ebp, %r8d
andl $0xf, %r8d
addq $-0x8, %r8
je 0x243e0e
andq $-0x10, %rbp
xorl %r12d, %r12d
imulq $0x50, %r12, %rax
movq (%r10), %r15
movl 0x30(%rbp,%rax), %edx
movq 0x228(%r15), %rcx
movq (%rcx,%rdx,8), %rdx
movl (%rbp,%rax), %esi
movl 0x4(%rbp,%rax), %edi
vmovups (%rdx,%rsi,4), %xmm2
movl 0x10(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm1
movl 0x20(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm0
movl 0x34(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %rdx
vmovups (%rdx,%rdi,4), %xmm4
movl 0x14(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm6
movl 0x24(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm3
movl 0x38(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %rdx
movl 0x8(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm5
movl 0x18(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm8
movl 0x28(%rbp,%rax), %esi
vmovups (%rdx,%rsi,4), %xmm9
movl 0x3c(%rbp,%rax), %edx
movq (%rcx,%rdx,8), %rcx
movl 0xc(%rbp,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm7
movl 0x1c(%rbp,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm10
movl 0x2c(%rbp,%rax), %edx
vmovups (%rcx,%rdx,4), %xmm11
vunpcklps %xmm5, %xmm2, %xmm12 # xmm12 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vunpckhps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm5[2],xmm2[3],xmm5[3]
vunpcklps %xmm7, %xmm4, %xmm5 # xmm5 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpckhps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
vunpcklps %xmm4, %xmm2, %xmm13 # xmm13 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
vunpcklps %xmm5, %xmm12, %xmm4 # xmm4 = xmm12[0],xmm5[0],xmm12[1],xmm5[1]
vunpckhps %xmm5, %xmm12, %xmm7 # xmm7 = xmm12[2],xmm5[2],xmm12[3],xmm5[3]
vunpcklps %xmm8, %xmm1, %xmm2 # xmm2 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
vunpckhps %xmm8, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm8[2],xmm1[3],xmm8[3]
vunpcklps %xmm10, %xmm6, %xmm8 # xmm8 = xmm6[0],xmm10[0],xmm6[1],xmm10[1]
vunpckhps %xmm10, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm10[2],xmm6[3],xmm10[3]
vunpcklps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpcklps %xmm8, %xmm2, %xmm6 # xmm6 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
vunpckhps %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
vunpcklps %xmm9, %xmm0, %xmm8 # xmm8 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
vunpckhps %xmm9, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm9[2],xmm0[3],xmm9[3]
vunpcklps %xmm11, %xmm3, %xmm9 # xmm9 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vunpcklps %xmm3, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpcklps %xmm9, %xmm8, %xmm10 # xmm10 = xmm8[0],xmm9[0],xmm8[1],xmm9[1]
vunpckhps %xmm9, %xmm8, %xmm11 # xmm11 = xmm8[2],xmm9[2],xmm8[3],xmm9[3]
vmovaps 0x30(%rbp,%rax), %xmm3
vmovaps %xmm3, 0x220(%rsp)
vmovaps 0x40(%rbp,%rax), %xmm3
vmovaps %xmm3, 0xe0(%rsp)
vsubps %xmm6, %xmm4, %xmm3
vmovaps %xmm3, 0x60(%rsp)
vsubps %xmm2, %xmm7, %xmm6
vmovaps %xmm13, 0x20(%rsp)
vsubps %xmm1, %xmm13, %xmm8
vsubps %xmm4, %xmm10, %xmm9
vsubps %xmm7, %xmm11, %xmm10
vsubps %xmm13, %xmm0, %xmm11
vmulps %xmm6, %xmm11, %xmm0
vmulps %xmm10, %xmm8, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmulps %xmm9, %xmm8, %xmm1
vmulps %xmm3, %xmm11, %xmm2
vsubps %xmm1, %xmm2, %xmm5
vmulps %xmm3, %xmm10, %xmm2
vmulps %xmm6, %xmm9, %xmm12
vsubps %xmm2, %xmm12, %xmm3
vbroadcastss (%rbx,%r14,4), %xmm12
vbroadcastss 0x10(%rbx,%r14,4), %xmm13
vbroadcastss 0x20(%rbx,%r14,4), %xmm14
vbroadcastss 0x50(%rbx,%r14,4), %xmm15
vsubps %xmm12, %xmm4, %xmm2
vbroadcastss 0x60(%rbx,%r14,4), %xmm12
vsubps %xmm13, %xmm7, %xmm4
vmovaps 0x20(%rsp), %xmm0
vsubps %xmm14, %xmm0, %xmm7
vmulps %xmm7, %xmm15, %xmm13
vmulps %xmm4, %xmm12, %xmm14
vsubps %xmm13, %xmm14, %xmm13
vbroadcastss 0x40(%rbx,%r14,4), %xmm14
vmulps %xmm2, %xmm12, %xmm0
vmulps %xmm7, %xmm14, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmulps %xmm4, %xmm14, %xmm1
vmovaps %xmm2, 0x40(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm3, 0x20(%rsp)
vmulps %xmm3, %xmm12, %xmm2
vmulps %xmm5, %xmm15, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmovaps 0x50(%rsp), %xmm3
vmulps %xmm3, %xmm14, %xmm12
vmovaps %xmm3, %xmm14
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm11, %xmm11
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm11, %xmm10
vmulps %xmm13, %xmm9, %xmm9
vaddps %xmm10, %xmm9, %xmm10
vmulps %xmm1, %xmm8, %xmm1
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vbroadcastss 0x1cdd674(%rip), %xmm1 # 0x1f20ec0
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm6
vmulps 0x60(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm6, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0x1cdd643(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm3
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm10
vaddps %xmm6, %xmm8, %xmm0
vcmpleps %xmm3, %xmm0, %xmm11
vtestps %xmm11, %xmm10
jne 0x2438af
incq %r12
cmpq %r8, %r12
jne 0x24362a
jmp 0x243e0e
vmovaps %xmm5, %xmm15
vandps %xmm11, %xmm10, %xmm10
vmulps 0x20(%rsp), %xmm7, %xmm0
vmulps %xmm4, %xmm5, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0x40(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm5
vbroadcastss 0x30(%rbx,%r14,4), %xmm0
vmulps %xmm0, %xmm3, %xmm0
vcmpltps %xmm5, %xmm0, %xmm0
vbroadcastss 0x80(%rbx,%r14,4), %xmm1
vmulps %xmm1, %xmm3, %xmm1
vcmpleps %xmm1, %xmm5, %xmm1
vandps %xmm1, %xmm0, %xmm4
vtestps %xmm10, %xmm4
je 0x24389e
vandps %xmm4, %xmm10, %xmm4
vmovaps %xmm6, 0xf0(%rsp)
vmovaps %xmm8, 0x100(%rsp)
vmovaps %xmm5, 0x110(%rsp)
vmovaps %xmm3, 0x120(%rsp)
vmovaps %xmm4, 0x140(%rsp)
vmovaps %xmm14, 0x180(%rsp)
vmovaps %xmm15, 0x190(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vmovaps %xmm4, 0x30(%rsp)
vrcpps %xmm3, %xmm0
vmulps %xmm0, %xmm3, %xmm1
vbroadcastss 0x1ca8da9(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x110(%rsp), %xmm0, %xmm5
vmovaps %xmm5, 0x170(%rsp)
vmulps 0xf0(%rsp), %xmm0, %xmm1
vmovaps %xmm1, 0x150(%rsp)
vmulps 0x100(%rsp), %xmm0, %xmm0
vbroadcastss 0x1ca8073(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm4, %xmm5, %xmm1, %xmm1
vmovaps %xmm0, 0x160(%rsp)
vshufps $0xb1, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[1,0,3,2]
vminps %xmm1, %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,0]
vminps %xmm0, %xmm2, %xmm0
vcmpeqps %xmm0, %xmm1, %xmm0
vtestps %xmm4, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
je 0x2439e2
vmovaps %xmm0, %xmm1
vandps %xmm1, %xmm4, %xmm0
vmovmskps %xmm0, %eax
bsfq %rax, %rdx
movl 0x220(%rsp,%rdx,4), %eax
movq 0x1e8(%r15), %rcx
movq (%rcx,%rax,8), %rsi
movl 0x90(%rbx,%r14,4), %ecx
testl %ecx, 0x34(%rsi)
je 0x243c96
movq 0x10(%r10), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x243a27
cmpq $0x0, 0x40(%rsi)
je 0x243d63
vmovaps %xmm5, 0x40(%rsp)
movq %r8, 0x20(%rsp)
movq %r9, 0x50(%rsp)
movq %r11, 0x60(%rsp)
vbroadcastss 0x150(%rsp,%rdx,4), %xmm0
vbroadcastss 0x160(%rsp,%rdx,4), %xmm1
movq 0x8(%r10), %rcx
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vbroadcastss 0xe0(%rsp,%rdx,4), %xmm3
vbroadcastss 0x180(%rsp,%rdx,4), %xmm4
vbroadcastss 0x190(%rsp,%rdx,4), %xmm5
vbroadcastss 0x1a0(%rsp,%rdx,4), %xmm6
vmovaps %xmm4, 0x230(%rsp)
vmovaps %xmm5, 0x240(%rsp)
vmovaps %xmm6, 0x250(%rsp)
vmovaps %xmm0, 0x260(%rsp)
vmovaps %xmm1, 0x270(%rsp)
vmovaps %xmm3, 0x280(%rsp)
vmovdqa %xmm2, 0x290(%rsp)
leaq 0x2a0(%rsp), %rax
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rax)
vbroadcastss (%rcx), %xmm0
vmovaps %xmm0, 0x2a0(%rsp)
vbroadcastss 0x4(%rcx), %xmm0
vmovaps %xmm0, 0x2b0(%rsp)
vmovss 0x80(%rbx,%r14,4), %xmm0
vmovss %xmm0, 0x1c(%rsp)
movq %rdx, 0x90(%rsp)
vmovss 0x170(%rsp,%rdx,4), %xmm0
vmovss %xmm0, 0x80(%rbx,%r14,4)
movq 0x98(%rsp), %rax
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x70(%rsp)
leaq 0x70(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%rsi), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %rbx, 0xc8(%rsp)
leaq 0x230(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq %rsi, 0x88(%rsp)
movq 0x40(%rsi), %rax
testq %rax, %rax
je 0x243b95
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x70(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x243cea
movq 0x1248(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x243bdb
testb $0x2, (%rcx)
jne 0x243bce
movq 0x88(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x243bdb
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0x70(%rsp), %xmm2
vpcmpeqd 0x1ca7e27(%rip), %xmm2, %xmm1 # 0x1eeba10
vpxor 0x1ca822f(%rip), %xmm1, %xmm0 # 0x1eebe20
vptest %xmm2, %xmm2
je 0x243cfa
vpxor 0x1ca821c(%rip), %xmm1, %xmm1 # 0x1eebe20
movq 0xc8(%rsp), %rax
movq 0xd0(%rsp), %rcx
vmovaps (%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0xc0(%rax)
vmovaps 0x10(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0xd0(%rax)
vmovaps 0x20(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0xe0(%rax)
vmovaps 0x30(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0xf0(%rax)
vmovaps 0x40(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0x100(%rax)
vmovaps 0x50(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0x110(%rax)
vmovaps 0x60(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0x120(%rax)
vmovaps 0x70(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0x130(%rax)
vmovaps 0x80(%rcx), %xmm2
vmaskmovps %xmm2, %xmm1, 0x140(%rax)
jmp 0x243cfa
andl $0x0, 0x30(%rsp,%rdx,4)
vmovaps 0x30(%rsp), %xmm0
vtestps %xmm0, %xmm0
je 0x24389e
vbroadcastss 0x1ca7d6b(%rip), %xmm1 # 0x1eeba20
vblendvps %xmm0, %xmm5, %xmm1, %xmm1
vshufps $0xb1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0,3,2]
vminps %xmm1, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vminps %xmm2, %xmm3, %xmm2
vcmpeqps %xmm2, %xmm1, %xmm1
vtestps %xmm0, %xmm1
je 0x243cdd
vandps %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %eax
bsfq %rax, %rdx
jmp 0x2439ee
vpcmpeqd 0x1ca7d1e(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1ca8126(%rip), %xmm0, %xmm0 # 0x1eebe20
vmovddup 0x1cdd1e6(%rip), %xmm1 # xmm1 = mem[0,0]
vptest %xmm1, %xmm0
jne 0x243d19
vmovd 0x1c(%rsp), %xmm0
vmovd %xmm0, 0x80(%rbx,%r14,4)
movq 0x90(%rsp), %rax
andl $0x0, 0x30(%rsp,%rax,4)
vbroadcastss 0x80(%rbx,%r14,4), %xmm0
vmovaps 0x40(%rsp), %xmm5
vcmpleps %xmm0, %xmm5, %xmm0
vandps 0x30(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rsp)
movq 0x1248(%rsp), %r10
movq 0x60(%rsp), %r11
movq 0x50(%rsp), %r9
movq 0x20(%rsp), %r8
jmp 0x243c9b
vmovss 0x150(%rsp,%rdx,4), %xmm0
vmovss 0x160(%rsp,%rdx,4), %xmm1
vmovss 0x170(%rsp,%rdx,4), %xmm2
vmovss %xmm2, 0x80(%rbx,%r14,4)
vmovss 0x180(%rsp,%rdx,4), %xmm2
vmovss %xmm2, 0xc0(%rbx,%r14,4)
vmovss 0x190(%rsp,%rdx,4), %xmm2
vmovss %xmm2, 0xd0(%rbx,%r14,4)
vmovss 0x1a0(%rsp,%rdx,4), %xmm2
vmovss %xmm2, 0xe0(%rbx,%r14,4)
vmovss %xmm0, 0xf0(%rbx,%r14,4)
vmovss %xmm1, 0x100(%rbx,%r14,4)
movl 0xe0(%rsp,%rdx,4), %ecx
movl %ecx, 0x110(%rbx,%r14,4)
movl %eax, 0x120(%rbx,%r14,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%rbx,%r14,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rbx,%r14,4)
jmp 0x24389e
vbroadcastss 0x80(%rbx,%r14,4), %xmm0
vmovaps 0x210(%rsp), %xmm7
vmovaps 0x200(%rsp), %xmm8
vmovaps 0x1f0(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %xmm10
vmovaps 0x1d0(%rsp), %xmm11
vmovaps 0x1c0(%rsp), %xmm12
movq 0xa0(%rsp), %r12
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
movq (%rsp), %r15
vmovaps 0x1b0(%rsp), %xmm13
jmp 0x243353
addq $0x1208, %rsp # imm = 0x1208
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<4, 4, 1, false, embree::avx::SubGridIntersectorKMoeller<4, 4, true>, true>::occluded1(embree::Accel::Intersectors*, embree::BVHN<4> const*, embree::NodeRefPtr<4>, unsigned long, embree::avx::SubGridQuadMIntersectorKMoellerTrumbore<4, 4, true>&, embree::RayK<4>&, embree::avx::TravRayK<4, false> const&, embree::RayQueryContext*)
|
bool BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::occluded1(Accel::Intersectors* This,
const BVH* bvh,
NodeRef root,
size_t k,
Precalculations& pre,
RayK<K>& ray,
const TravRayK<K, robust>& tray,
RayQueryContext* context)
{
/* stack state */
NodeRef stack[stackSizeSingle]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSizeSingle;
stack[0] = root;
/* load the ray into SIMD registers */
TravRay<N,robust> tray1;
tray1.template init<K>(k, tray.org, tray.dir, tray.rdir, tray.nearXYZ, tray.tnear[k], tray.tfar[k]);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes, 1, 1, 1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray1, ray.time()[k], tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
BVHNNodeTraverser1Hit<N, types>::traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves, 1, 1, 1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersectorK::occluded(This, pre, ray, k, context, prim, num, tray1, lazy_node)) {
ray.tfar[k] = neg_inf;
return true;
}
if (unlikely(lazy_node)) {
*stackPtr = lazy_node;
stackPtr++;
}
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xc40, %rsp # imm = 0xC40
movq %r9, 0x10(%rsp)
movq 0x10(%rbp), %rax
leaq 0x488(%rsp), %rdi
movq %rdx, -0x8(%rdi)
vbroadcastss (%rax,%rcx,4), %xmm7
vbroadcastss 0x10(%rax,%rcx,4), %xmm8
vbroadcastss 0x20(%rax,%rcx,4), %xmm9
vbroadcastss 0x60(%rax,%rcx,4), %xmm10
vbroadcastss 0x70(%rax,%rcx,4), %xmm11
vbroadcastss 0x80(%rax,%rcx,4), %xmm12
movslq 0x90(%rax,%rcx,4), %r8
movslq 0xa0(%rax,%rcx,4), %r9
movslq 0xb0(%rax,%rcx,4), %r10
movq %r8, %r11
xorq $0x10, %r11
movq %r9, %rbx
xorq $0x10, %rbx
movq %r10, %r14
xorq $0x10, %r14
vbroadcastss 0xc0(%rax,%rcx,4), %xmm13
vbroadcastss 0xd0(%rax,%rcx,4), %xmm14
movq %r8, %rax
shrq $0x2, %rax
movq %rax, 0x148(%rsp)
leaq 0x1ecc1ba(%rip), %rax # 0x214ff80
vmovups (%rax), %ymm0
vinsertf128 $0x1, 0xf0(%rax), %ymm0, %ymm1
pushq $0x1
popq %rdx
movq %rcx, 0x8(%rsp)
shll %cl, %edx
movslq %edx, %rcx
shlq $0x4, %rcx
addq %rax, %rcx
movq %rcx, 0xd0(%rsp)
movq %r11, %rax
shrq $0x2, %rax
movq %rax, 0x140(%rsp)
movq %r9, %rax
shrq $0x2, %rax
movq %rax, 0x138(%rsp)
movq %rbx, %rax
shrq $0x2, %rax
movq %rax, 0x130(%rsp)
movq %r10, %rax
shrq $0x2, %rax
movq %rax, 0x128(%rsp)
movq %r14, %rax
shrq $0x2, %rax
movq %rax, 0x120(%rsp)
vbroadcastss 0x1c6cb88(%rip), %ymm0 # 0x1ef09cc
vbroadcastss 0x1c688c7(%rip), %ymm2 # 0x1eec714
vmovaps %ymm1, 0x460(%rsp)
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x440(%rsp)
leaq 0x480(%rsp), %r15
vmovaps %xmm7, 0xb0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x90(%rsp)
vmovaps %xmm10, 0x80(%rsp)
vmovaps %xmm11, 0x70(%rsp)
vmovaps %xmm12, 0x60(%rsp)
movq %r8, 0x100(%rsp)
movq %r9, 0xf8(%rsp)
movq %r10, 0xf0(%rsp)
movq %r11, 0xe8(%rsp)
movq %rbx, 0xe0(%rsp)
movq %r14, 0xd8(%rsp)
vmovaps %xmm13, 0x50(%rsp)
vmovaps %xmm14, 0x40(%rsp)
movq %rdi, %r12
cmpq %r15, %rdi
je 0x284b88
leaq -0x8(%r12), %rdi
movq -0x8(%r12), %rdx
testb $0x8, %dl
jne 0x283f81
vmovaps 0x20(%rdx,%r8), %xmm0
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vmovaps 0x20(%rdx,%r9), %xmm1
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vmovaps 0x20(%rdx,%r10), %xmm1
vsubps %xmm9, %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vmovaps 0x20(%rdx,%r11), %xmm2
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vmovaps 0x20(%rdx,%rbx), %xmm3
vsubps %xmm8, %xmm3, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vmovaps 0x20(%rdx,%r14), %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm3, %xmm12, %xmm3
vpmaxsd %xmm13, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm0, %xmm0
vpminsd %xmm14, %xmm3, %xmm1
vpminsd %xmm1, %xmm2, %xmm1
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovmskps %xmm0, %eax
xorb $0xf, %al
movzbl %al, %r13d
testb $0x8, %dl
jne 0x283fb9
testq %r13, %r13
je 0x283fbd
andq $-0x10, %rdx
bsfq %r13, %rcx
leaq -0x1(%r13), %rsi
xorl %eax, %eax
movq (%rdx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq %r13, %rsi
jne 0x283fc2
movq %rcx, %rdx
testl %eax, %eax
je 0x283eef
jmp 0x284039
pushq $0x6
jmp 0x283fbf
pushq $0x4
popq %rax
jmp 0x283fac
movq %rcx, (%rdi)
addq $0x8, %rdi
movq %rdx, 0x18(%rsp)
bsfq %rsi, %rcx
movq %rcx, 0x20(%rsp)
leaq -0x1(%rsi), %rcx
movq %rcx, 0x180(%rsp)
movq 0x20(%rsp), %rcx
movq 0x18(%rsp), %rdx
movq (%rdx,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
movq 0x180(%rsp), %rdx
andq %rsi, %rdx
je 0x283fa9
movq %rdx, %rsi
movq %rcx, (%rdi)
addq $0x8, %rdi
bsfq %rsi, %rdx
leaq -0x1(%rsi), %rcx
movq %rcx, 0x20(%rsp)
movq 0x18(%rsp), %rcx
movq (%rcx,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
andq 0x20(%rsp), %rsi
je 0x283fa9
jmp 0x284008
cmpl $0x6, %eax
jne 0x284b39
movq %r13, 0x108(%rsp)
movq %r12, 0x110(%rsp)
movq %rdi, 0x118(%rsp)
movl %edx, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x150(%rsp)
setne %cl
je 0x284aca
andq $-0x10, %rdx
xorl %eax, %eax
movq %rdx, 0x18(%rsp)
movb %cl, 0x7(%rsp)
movq %rax, 0x158(%rsp)
imulq $0x58, %rax, %rcx
leaq (%rdx,%rcx), %rdi
vmovq 0x20(%rdx,%rcx), %xmm0
vmovq 0x24(%rdx,%rcx), %xmm1
vpminub %xmm1, %xmm0, %xmm1
vpcmpeqb %xmm1, %xmm0, %xmm0
vpcmpeqd %xmm1, %xmm1, %xmm1
vpxor %xmm1, %xmm0, %xmm0
vpmovzxbd %xmm0, %xmm0 # xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vpxor %xmm1, %xmm0, %xmm0
vpslld $0x1f, %xmm0, %xmm0
vmovmskps %xmm0, %eax
vbroadcastss 0x38(%rdx,%rcx), %xmm0
vbroadcastss 0x44(%rdx,%rcx), %xmm1
movq %rdx, %rsi
movq 0x148(%rsp), %rdx
vmovq 0x20(%rdx,%rdi), %xmm2
vpmovzxbd %xmm2, %xmm2 # xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vcvtdq2ps %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm2
vaddps %xmm2, %xmm0, %xmm2
movq 0x140(%rsp), %rdx
vmovq 0x20(%rdx,%rdi), %xmm3
vpmovzxbd %xmm3, %xmm3 # xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vcvtdq2ps %xmm3, %xmm3
vmulps %xmm3, %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x3c(%rsi,%rcx), %xmm1
vbroadcastss 0x48(%rsi,%rcx), %xmm3
movq 0x138(%rsp), %rdx
vmovq 0x20(%rdx,%rdi), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vcvtdq2ps %xmm4, %xmm4
vmulps %xmm4, %xmm3, %xmm4
vaddps %xmm4, %xmm1, %xmm4
movq 0x130(%rsp), %rdx
vmovq 0x20(%rdx,%rdi), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm1, %xmm1
vbroadcastss 0x4c(%rsi,%rcx), %xmm3
movq 0x128(%rsp), %rdx
vmovq 0x20(%rdx,%rdi), %xmm5
vpmovzxbd %xmm5, %xmm5 # xmm5 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vcvtdq2ps %xmm5, %xmm5
vmulps %xmm5, %xmm3, %xmm5
movq 0x120(%rsp), %rdx
movq %rdi, 0x178(%rsp)
vmovq 0x20(%rdx,%rdi), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vcvtdq2ps %xmm6, %xmm6
vmulps %xmm6, %xmm3, %xmm3
vbroadcastss 0x40(%rsi,%rcx), %xmm6
vaddps %xmm5, %xmm6, %xmm5
vaddps %xmm3, %xmm6, %xmm3
vsubps %xmm7, %xmm2, %xmm2
vmulps %xmm2, %xmm10, %xmm2
vsubps %xmm8, %xmm4, %xmm4
vmulps %xmm4, %xmm11, %xmm4
vpmaxsd %xmm4, %xmm2, %xmm2
vsubps %xmm7, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vsubps %xmm8, %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vpminsd %xmm1, %xmm0, %xmm0
vsubps %xmm9, %xmm5, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vpmaxsd %xmm13, %xmm1, %xmm1
vpmaxsd %xmm1, %xmm2, %xmm1
vsubps %xmm9, %xmm3, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vpminsd %xmm14, %xmm2, %xmm2
vpminsd %xmm2, %xmm0, %xmm0
vpcmpgtd %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %ecx
notb %cl
andb %al, %cl
je 0x284a6d
movzbl %cl, %r15d
bsfq %r15, %rax
movq 0x178(%rsp), %rcx
movzwl (%rcx,%rax,8), %r10d
movzwl 0x2(%rcx,%rax,8), %r9d
movl 0x50(%rcx), %edx
movl 0x4(%rcx,%rax,8), %ecx
movq 0x18(%rbp), %rax
movq (%rax), %rax
movq 0x1e8(%rax), %rax
movq %rdx, 0x168(%rsp)
movq (%rax,%rdx,8), %r12
movq 0x58(%r12), %rdx
movq 0x68(%r12), %rsi
movq %rcx, 0x160(%rsp)
imulq %rcx, %rsi
movl %r10d, %edi
movl $0x7fff, %ecx # imm = 0x7FFF
andl %ecx, %edi
movl (%rdx,%rsi), %eax
addl %edi, %eax
movl %r9d, %r8d
andl %ecx, %r8d
movq %rdx, 0x170(%rsp)
movl 0x4(%rdx,%rsi), %edx
movl %edx, %ecx
imull %r8d, %ecx
addl %eax, %ecx
movq 0xa0(%r12), %r11
movq %r11, %rbx
imulq %rcx, %rbx
movq 0x90(%r12), %rax
vmovups (%rax,%rbx), %xmm3
leaq 0x1(%rcx), %rbx
imulq %r11, %rbx
vmovups (%rax,%rbx), %xmm2
leaq (%rcx,%rdx), %rbx
movq %rbx, %r14
imulq %r11, %r14
vmovups (%rax,%r14), %xmm0
leaq (%rcx,%rdx), %r14
incq %r14
movq %r14, %r13
imulq %r11, %r13
vmovups (%rax,%r13), %xmm1
xorl %r13d, %r13d
testw %r10w, %r10w
setns %r13b
addq %r13, %rcx
incq %rcx
imulq %r11, %rcx
vmovups (%rax,%rcx), %xmm5
addq %r14, %r13
movq %r13, %rcx
imulq %r11, %rcx
vmovups (%rax,%rcx), %xmm4
testw %r9w, %r9w
movl $0x0, %ecx
cmovnsq %rdx, %rcx
addq %rcx, %rbx
imulq %r11, %rbx
vmovups (%rax,%rbx), %xmm6
addq %rcx, %r14
imulq %r11, %r14
vmovups (%rax,%r14), %xmm7
addq %r13, %rcx
imulq %r11, %rcx
vmovups (%rax,%rcx), %xmm8
vunpcklps %xmm1, %xmm3, %xmm9 # xmm9 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
vunpckhps %xmm1, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
vunpcklps %xmm0, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm11 # xmm11 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vunpcklps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpcklps %xmm10, %xmm9, %xmm11 # xmm11 = xmm9[0],xmm10[0],xmm9[1],xmm10[1]
vunpckhps %xmm10, %xmm9, %xmm9 # xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
vunpcklps %xmm4, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
vunpckhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm4[2],xmm2[3],xmm4[3]
vunpcklps %xmm1, %xmm5, %xmm12 # xmm12 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
vunpckhps %xmm1, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
vunpcklps %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
vunpcklps %xmm12, %xmm10, %xmm13 # xmm13 = xmm10[0],xmm12[0],xmm10[1],xmm12[1]
vunpckhps %xmm12, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm12[2],xmm10[3],xmm12[3]
vunpcklps %xmm8, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
vunpckhps %xmm8, %xmm1, %xmm8 # xmm8 = xmm1[2],xmm8[2],xmm1[3],xmm8[3]
vunpcklps %xmm7, %xmm4, %xmm12 # xmm12 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
vunpckhps %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
vunpcklps %xmm4, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm4[0],xmm8[1],xmm4[1]
vunpcklps %xmm12, %xmm5, %xmm4 # xmm4 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
vunpckhps %xmm12, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
vunpcklps %xmm7, %xmm0, %xmm12 # xmm12 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
vunpckhps %xmm7, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
vunpcklps %xmm6, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
vunpcklps %xmm7, %xmm12, %xmm1 # xmm1 = xmm12[0],xmm7[0],xmm12[1],xmm7[1]
vunpckhps %xmm7, %xmm12, %xmm6 # xmm6 = xmm12[2],xmm7[2],xmm12[3],xmm7[3]
vinsertf128 $0x1, %xmm4, %ymm11, %ymm4
vinsertf128 $0x1, %xmm5, %ymm9, %ymm5
vinsertf128 $0x1, %xmm8, %ymm3, %ymm7
vinsertf128 $0x1, %xmm13, %ymm13, %ymm3
vinsertf128 $0x1, %xmm10, %ymm10, %ymm8
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vinsertf128 $0x1, %xmm6, %ymm6, %ymm10
vinsertf128 $0x1, %xmm0, %ymm0, %ymm11
vsubps %ymm3, %ymm4, %ymm0
vmovaps %ymm0, 0x20(%rsp)
vsubps %ymm8, %ymm5, %ymm6
vsubps %ymm2, %ymm7, %ymm8
vsubps %ymm4, %ymm1, %ymm9
vsubps %ymm5, %ymm10, %ymm10
vsubps %ymm7, %ymm11, %ymm11
vmulps %ymm6, %ymm11, %ymm1
vmulps %ymm8, %ymm10, %ymm2
vsubps %ymm1, %ymm2, %ymm1
vmovaps %ymm1, 0x180(%rsp)
vmulps %ymm8, %ymm9, %ymm2
vmulps %ymm0, %ymm11, %ymm3
vsubps %ymm2, %ymm3, %ymm1
vmovaps %ymm1, 0x200(%rsp)
vmulps %ymm0, %ymm10, %ymm3
vmulps %ymm6, %ymm9, %ymm12
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vbroadcastss (%rax,%rcx,4), %ymm13
vbroadcastss 0x10(%rax,%rcx,4), %ymm14
vbroadcastss 0x20(%rax,%rcx,4), %ymm15
vbroadcastss 0x50(%rax,%rcx,4), %ymm1
vsubps %ymm3, %ymm12, %ymm0
vsubps %ymm13, %ymm4, %ymm4
vbroadcastss 0x60(%rax,%rcx,4), %ymm12
vsubps %ymm14, %ymm5, %ymm5
vsubps %ymm15, %ymm7, %ymm7
vmulps %ymm1, %ymm7, %ymm13
vmulps %ymm5, %ymm12, %ymm14
vsubps %ymm13, %ymm14, %ymm13
vbroadcastss 0x40(%rax,%rcx,4), %ymm14
vmulps %ymm4, %ymm12, %ymm15
vmulps %ymm7, %ymm14, %ymm2
vsubps %ymm15, %ymm2, %ymm2
vmulps %ymm5, %ymm14, %ymm15
vmulps %ymm1, %ymm4, %ymm3
vsubps %ymm15, %ymm3, %ymm3
vmovaps 0x200(%rsp), %ymm15
vmovaps %ymm0, 0x1e0(%rsp)
vmulps %ymm0, %ymm12, %ymm12
vmulps %ymm1, %ymm15, %ymm1
vaddps %ymm1, %ymm12, %ymm1
vmovaps 0x180(%rsp), %ymm0
vmulps %ymm0, %ymm14, %ymm12
vmovaps %ymm0, %ymm14
vaddps %ymm1, %ymm12, %ymm1
vmulps %ymm3, %ymm11, %ymm11
vmulps %ymm2, %ymm10, %ymm10
vaddps %ymm10, %ymm11, %ymm10
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm10
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm2, %ymm6, %ymm2
vaddps %ymm2, %ymm3, %ymm2
vbroadcastss 0x1c9c9ba(%rip), %ymm3 # 0x1f20ec0
vandps %ymm3, %ymm1, %ymm9
vxorps %ymm10, %ymm9, %ymm6
vmulps 0x20(%rsp), %ymm13, %ymm0
vaddps %ymm2, %ymm0, %ymm0
vxorps %ymm0, %ymm9, %ymm8
vxorps %xmm3, %xmm3, %xmm3
vcmpnltps %ymm3, %ymm6, %ymm0
vcmpnltps %ymm3, %ymm8, %ymm2
vandps %ymm2, %ymm0, %ymm2
vbroadcastss 0x1c9c98c(%rip), %ymm0 # 0x1f20ec4
vandps %ymm0, %ymm1, %ymm0
vcmpneqps %ymm3, %ymm1, %ymm1
vandps %ymm1, %ymm2, %ymm10
vaddps %ymm6, %ymm8, %ymm1
vcmpleps %ymm0, %ymm1, %ymm11
vtestps %ymm11, %ymm10
jne 0x284567
leaq -0x1(%r15), %rax
andq %rax, %r15
jne 0x284218
jmp 0x284a6d
vandps %ymm11, %ymm10, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmulps 0x1e0(%rsp), %ymm7, %ymm2
vmulps %ymm5, %ymm15, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vmulps %ymm4, %ymm14, %ymm3
vaddps %ymm2, %ymm3, %ymm2
vxorps %ymm2, %ymm9, %ymm4
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vbroadcastss 0x30(%rax,%rcx,4), %ymm2
vmulps %ymm2, %ymm0, %ymm2
vcmpltps %ymm4, %ymm2, %ymm2
vbroadcastss 0x80(%rax,%rcx,4), %ymm3
vmulps %ymm3, %ymm0, %ymm3
vcmpleps %ymm3, %ymm4, %ymm3
vandps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpackssdw %xmm3, %xmm2, %xmm2
vpand %xmm1, %xmm2, %xmm1
vpmovzxwd %xmm1, %xmm2 # xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
vpslld $0x1f, %xmm2, %xmm2
vpsrad $0x1f, %xmm2, %xmm2
vpunpckhwd %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[4,4,5,5,6,6,7,7]
vpslld $0x1f, %xmm1, %xmm1
vpsrad $0x1f, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm2, %ymm5
vtestps %ymm5, %ymm5
je 0x284555
vmovaps %ymm6, 0x2c0(%rsp)
vmovaps %ymm8, 0x2e0(%rsp)
vmovaps %ymm4, 0x300(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vmovaps %ymm5, 0x360(%rsp)
vmovaps 0x2e0(%rsp), %ymm1
vsubps %ymm1, %ymm0, %ymm2
vmovaps 0x2c0(%rsp), %ymm3
vmovaps 0x460(%rsp), %ymm4
vblendvps %ymm4, %ymm2, %ymm3, %ymm5
vsubps %ymm3, %ymm0, %ymm2
vblendvps %ymm4, %ymm2, %ymm1, %ymm4
vmovaps %ymm5, 0x2c0(%rsp)
vmovaps %ymm4, 0x2e0(%rsp)
vmovaps 0x440(%rsp), %ymm2
vmulps %ymm2, %ymm14, %ymm1
vmovaps %ymm1, 0x3e0(%rsp)
vmulps %ymm2, %ymm15, %ymm1
vmovaps %ymm1, 0x400(%rsp)
vmulps 0x1e0(%rsp), %ymm2, %ymm1
vmovaps %ymm1, 0x420(%rsp)
vmovd %edi, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vpaddd 0x1c982dc(%rip), %xmm1, %xmm1 # 0x1f1c990
vmovd %r8d, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vpaddd 0x1c982da(%rip), %xmm2, %xmm2 # 0x1f1c9a0
movq 0x170(%rsp), %rcx
movzwl 0x8(%rcx,%rsi), %eax
decl %eax
vcvtsi2ss %eax, %xmm7, %xmm3
vrcpss %xmm3, %xmm3, %xmm6
vmulss %xmm3, %xmm6, %xmm3
vmovss 0x1c6c90f(%rip), %xmm8 # 0x1ef0ff8
vsubss %xmm3, %xmm8, %xmm3
movzwl 0xa(%rcx,%rsi), %eax
decl %eax
vcvtsi2ss %eax, %xmm7, %xmm7
vmulss %xmm3, %xmm6, %xmm3
vrcpss %xmm7, %xmm7, %xmm6
vmulss %xmm7, %xmm6, %xmm7
vsubss %xmm7, %xmm8, %xmm7
vmulss %xmm7, %xmm6, %xmm6
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vcvtdq2ps %ymm1, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm5, %ymm1, %ymm1
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vmovaps %ymm1, 0x2c0(%rsp)
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm0, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vshufps $0x0, %xmm6, %xmm6, %xmm3 # xmm3 = xmm6[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vmulps %ymm3, %ymm2, %ymm2
vmovaps %ymm2, 0x2e0(%rsp)
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
movl 0x90(%rax,%rcx,4), %eax
testl %eax, 0x34(%r12)
je 0x284555
movq 0x160(%rsp), %rdx
movq 0x168(%rsp), %rcx
movq 0x18(%rbp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x2847a7
cmpq $0x0, 0x48(%r12)
je 0x284b43
vrcpps %ymm0, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vbroadcastss 0x1c67f5c(%rip), %ymm4 # 0x1eec714
vsubps %ymm0, %ymm4, %ymm0
vmulps %ymm0, %ymm3, %ymm0
vaddps %ymm0, %ymm3, %ymm0
vmulps 0x300(%rsp), %ymm0, %ymm3
vmovaps %ymm3, 0x3c0(%rsp)
vmulps %ymm0, %ymm1, %ymm1
vmovaps %ymm1, 0x380(%rsp)
vmulps %ymm0, %ymm2, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vmovmskps %ymm0, %ebx
bsfq %rbx, %r14
testl %ebx, %ebx
setne %r13b
je 0x284a5e
vmovd %ecx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovdqa %xmm0, 0x180(%rsp)
vmovd %edx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovdqa %xmm0, 0x200(%rsp)
movq 0xd0(%rsp), %rax
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
movq 0x10(%rsp), %rdx
movq 0x8(%rsp), %rax
vmovss 0x80(%rdx,%rax,4), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x3c0(%rsp,%r14,4), %xmm0
vbroadcastss 0x380(%rsp,%r14,4), %xmm1
vbroadcastss 0x3a0(%rsp,%r14,4), %xmm2
vmovss %xmm0, 0x80(%rdx,%rax,4)
movq 0x18(%rbp), %rax
movq 0x8(%rax), %rax
vbroadcastss 0x3e0(%rsp,%r14,4), %xmm0
vbroadcastss 0x400(%rsp,%r14,4), %xmm3
vbroadcastss 0x420(%rsp,%r14,4), %xmm4
vmovaps %xmm0, 0x230(%rsp)
vmovaps %xmm3, 0x240(%rsp)
vmovaps %xmm4, 0x250(%rsp)
vmovaps %xmm1, 0x260(%rsp)
vmovaps %xmm2, 0x270(%rsp)
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x280(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovaps %xmm0, 0x290(%rsp)
leaq 0x2a0(%rsp), %rcx
vcmptrueps %ymm0, %ymm0, %ymm0
vmovups %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x2a0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x2b0(%rsp)
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm0, 0xc0(%rsp)
leaq 0xc0(%rsp), %rcx
movq %rcx, 0x1b0(%rsp)
movq 0x18(%r12), %rcx
movq %rcx, 0x1b8(%rsp)
movq %rax, 0x1c0(%rsp)
movq %rdx, 0x1c8(%rsp)
leaq 0x230(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x4, 0x1d8(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
je 0x28499c
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovdqa 0xc0(%rsp), %xmm0
vptest %xmm0, %xmm0
je 0x284a11
movq 0x18(%rbp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x2849d7
testb $0x2, (%rcx)
jne 0x2849ca
testb $0x40, 0x3e(%r12)
je 0x2849d7
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vpxor %xmm0, %xmm0, %xmm0
vpcmpeqd 0xc0(%rsp), %xmm0, %xmm1
vpxor 0x1c67434(%rip), %xmm1, %xmm0 # 0x1eebe20
movq 0x1c8(%rsp), %rax
vbroadcastss 0x1c68187(%rip), %xmm2 # 0x1eecb84
vblendvps %xmm1, 0x80(%rax), %xmm2, %xmm1
vmovaps %xmm1, 0x80(%rax)
jmp 0x284a21
vpcmpeqd 0x1c66ff7(%rip), %xmm0, %xmm0 # 0x1eeba10
vpxor 0x1c673ff(%rip), %xmm0, %xmm0 # 0x1eebe20
vmovddup 0x1c9c4bf(%rip), %xmm1 # xmm1 = mem[0,0]
vptest %xmm1, %xmm0
jne 0x284a5e
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x80(%rax,%rcx,4)
btcq %r14, %rbx
bsfq %rbx, %r14
testq %rbx, %rbx
setne %r13b
jne 0x284846
testb $0x1, %r13b
je 0x284555
jmp 0x284b43
movq 0x158(%rsp), %rax
incq %rax
cmpq 0x150(%rsp), %rax
setb %cl
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm14
movq 0x18(%rsp), %rdx
jne 0x28407f
xorl %eax, %eax
testb $0x1, %cl
movq 0x118(%rsp), %rdi
movq 0x100(%rsp), %r8
movq 0xf8(%rsp), %r9
movq 0xf0(%rsp), %r10
movq 0xe8(%rsp), %r11
movq 0xe0(%rsp), %rbx
movq 0xd8(%rsp), %r14
leaq 0x480(%rsp), %r15
movq 0x110(%rsp), %r12
movq 0x108(%rsp), %r13
je 0x284b39
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
movl $0xff800000, 0x80(%rax,%rcx,4) # imm = 0xFF800000
pushq $0x1
popq %rax
testb $0x3, %al
je 0x283ed9
jmp 0x284b88
vmovaps 0xb0(%rsp), %xmm7
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x70(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm14
movb 0x7(%rsp), %cl
jmp 0x284aca
cmpq %r15, %r12
setne %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::BVHNIntersectorKHybrid<8, 4, 1, false, embree::avx::ArrayIntersectorK_1<4, embree::avx::ObjectIntersectorK<4, false>>, false>::intersectCoherent(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayHitK<4>&, embree::RayQueryContext*)
|
__forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); }
|
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x3695fa
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x26a0, %rsp # imm = 0x26A0
movq (%rsi), %rsi
movq %rsi, 0x28(%rsp)
movzbl %al, %eax
vmovaps (%rdx), %xmm10
vmovaps 0x10(%rdx), %xmm11
vmovaps 0x20(%rdx), %xmm12
vmovaps 0x30(%rdx), %xmm3
vmovaps 0x40(%rdx), %xmm2
vbroadcastss 0x1bb8322(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm2, %xmm5
vbroadcastss 0x1b88439(%rip), %xmm6 # 0x1ef0fe8
vcmpltps %xmm6, %xmm5, %xmm5
vblendvps %xmm5, %xmm6, %xmm2, %xmm5
vmovaps 0x50(%rdx), %xmm7
vandps %xmm4, %xmm7, %xmm8
vcmpltps %xmm6, %xmm8, %xmm8
vblendvps %xmm8, %xmm6, %xmm7, %xmm7
vmovaps 0x60(%rdx), %xmm8
vandps %xmm4, %xmm8, %xmm4
vcmpltps %xmm6, %xmm4, %xmm4
vblendvps %xmm4, %xmm6, %xmm8, %xmm4
vrcpps %xmm5, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vbroadcastss 0x1b83b21(%rip), %xmm8 # 0x1eec714
vsubps %xmm5, %xmm8, %xmm5
vmulps %xmm5, %xmm6, %xmm5
vaddps %xmm5, %xmm6, %xmm13
vrcpps %xmm7, %xmm5
vmulps %xmm5, %xmm7, %xmm6
vsubps %xmm6, %xmm8, %xmm6
vmulps %xmm6, %xmm5, %xmm6
vaddps %xmm6, %xmm5, %xmm14
vrcpps %xmm4, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vsubps %xmm4, %xmm8, %xmm4
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm5, %xmm15
vxorps %xmm6, %xmm6, %xmm6
vmaxps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0xf0(%rsp)
vmovaps 0x80(%rdx), %xmm3
vmaxps %xmm6, %xmm3, %xmm3
vmovaps %xmm3, 0xe0(%rsp)
vcmpltps %xmm6, %xmm2, %xmm2
vbroadcastss 0x1ba9aa9(%rip), %xmm3 # 0x1f12704
vandps %xmm3, %xmm2, %xmm2
vmovaps 0x50(%rdx), %xmm3
vmovaps 0x60(%rdx), %xmm4
vcmpltps %xmm6, %xmm3, %xmm3
vbroadcastss 0x1bb8265(%rip), %xmm5 # 0x1f20edc
vandps %xmm5, %xmm3, %xmm3
vorps %xmm2, %xmm3, %xmm2
vcmpltps %xmm6, %xmm4, %xmm3
vbroadcastss 0x1bf1ce7(%rip), %xmm4 # 0x1f5a974
vandps %xmm4, %xmm3, %xmm3
vpxor %xmm0, %xmm1, %xmm0
vpor %xmm3, %xmm0, %xmm0
vpor %xmm2, %xmm0, %xmm0
vmovdqa %xmm0, 0x70(%rsp)
vbroadcastss 0x1b82d74(%rip), %xmm9 # 0x1eeba20
leaq 0x340(%rsp), %r11
pushq $0x8
popq %r8
movq %rcx, 0x40(%rsp)
movq %rdx, 0x38(%rsp)
vmovaps %xmm10, 0x150(%rsp)
vmovaps %xmm11, 0x140(%rsp)
vmovaps %xmm12, 0x130(%rsp)
vmovaps %xmm13, 0x120(%rsp)
vmovaps %xmm14, 0x110(%rsp)
vmovaps %xmm15, 0x100(%rsp)
bsfq %rax, %rsi
vbroadcastss 0x70(%rsp,%rsi,4), %xmm0
vpcmpeqd 0x70(%rsp), %xmm0, %xmm0
vmovmskps %xmm0, %esi
notq %rsi
vblendvps %xmm0, %xmm13, %xmm9, %xmm1
andq %rax, %rsi
movq %rsi, 0x30(%rsp)
vshufps $0xb1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0,3,2]
vminps %xmm1, %xmm2, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vminps %xmm1, %xmm2, %xmm1
vblendvps %xmm0, %xmm14, %xmm9, %xmm2
vshufps $0xb1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0,3,2]
vminps %xmm2, %xmm3, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vminps %xmm2, %xmm3, %xmm2
vinsertps $0x1c, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],zero,zero
vblendvps %xmm0, %xmm15, %xmm9, %xmm2
vshufps $0xb1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0,3,2]
vminps %xmm2, %xmm3, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vminps %xmm2, %xmm3, %xmm2
vbroadcastss 0x1b83e15(%rip), %xmm8 # 0x1eecb84
vblendvps %xmm0, %xmm13, %xmm8, %xmm3
vinsertps $0x20, %xmm2, %xmm1, %xmm2 # xmm2 = xmm1[0,1],xmm2[0],xmm1[3]
vshufps $0xb1, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[1,0,3,2]
vmaxps %xmm3, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,0]
vmaxps %xmm1, %xmm3, %xmm1
vblendvps %xmm0, %xmm14, %xmm8, %xmm3
vshufps $0xb1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0,3,2]
vmaxps %xmm3, %xmm4, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vmaxps %xmm3, %xmm4, %xmm3
vinsertps $0x1c, %xmm3, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm3[0],zero,zero
vblendvps %xmm0, %xmm15, %xmm8, %xmm3
vshufps $0xb1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0,3,2]
vmaxps %xmm3, %xmm4, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vmaxps %xmm3, %xmm4, %xmm3
vinsertps $0x20, %xmm3, %xmm1, %xmm3 # xmm3 = xmm1[0,1],xmm3[0],xmm1[3]
vcmpnltps 0x1b82c3e(%rip), %xmm2, %xmm4 # 0x1eeba10
vblendvps %xmm4, %xmm2, %xmm3, %xmm1
vmovshdup %xmm1, %xmm5 # xmm5 = xmm1[1,1,3,3]
xorl %esi, %esi
vxorps %xmm6, %xmm6, %xmm6
vucomiss %xmm5, %xmm6
seta %sil
shll $0x5, %esi
orq $0x40, %rsi
vshufpd $0x1, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,0]
xorl %r12d, %r12d
vucomiss %xmm5, %xmm6
seta %r12b
shll $0x5, %r12d
orq $0x80, %r12
movq %rsi, 0x68(%rsp)
xorq $0x20, %rsi
movq %rsi, 0x60(%rsp)
xorl %ebx, %ebx
vucomiss %xmm1, %xmm6
seta %bl
vblendvps %xmm0, %xmm10, %xmm9, %xmm5
vshufps $0xb1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0,3,2]
vminps %xmm5, %xmm6, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0]
vminps %xmm5, %xmm6, %xmm5
vblendvps %xmm0, %xmm11, %xmm9, %xmm6
vshufps $0xb1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0,3,2]
vminps %xmm6, %xmm7, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0]
vminps %xmm6, %xmm7, %xmm6
vblendvps %xmm0, %xmm12, %xmm9, %xmm7
vinsertps $0x1c, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],zero,zero
vshufps $0xb1, %xmm7, %xmm7, %xmm6 # xmm6 = xmm7[1,0,3,2]
vminps %xmm7, %xmm6, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0]
vminps %xmm6, %xmm7, %xmm6
vinsertps $0x20, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
vmovaps %xmm8, %xmm9
vblendvps %xmm0, %xmm10, %xmm8, %xmm6
vshufps $0xb1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0,3,2]
vmaxps %xmm6, %xmm7, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0]
vmaxps %xmm6, %xmm7, %xmm6
vblendvps %xmm0, %xmm11, %xmm8, %xmm7
vshufps $0xb1, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,0,3,2]
vmaxps %xmm7, %xmm8, %xmm7
vshufpd $0x1, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,0]
vmaxps %xmm7, %xmm8, %xmm7
vblendvps %xmm0, %xmm12, %xmm9, %xmm8
vinsertps $0x1c, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[0],zero,zero
vshufps $0xb1, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,0,3,2]
vmaxps %xmm8, %xmm7, %xmm7
vshufpd $0x1, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,0]
vmaxps %xmm7, %xmm8, %xmm7
vinsertps $0x20, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm7[0],xmm6[3]
vblendvps %xmm4, %xmm5, %xmm6, %xmm7
vblendvps %xmm4, %xmm6, %xmm5, %xmm6
vblendvps %xmm0, 0xe0(%rsp), %xmm9, %xmm8
vshufps $0xb1, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[1,0,3,2]
vmovaps %xmm8, 0x10(%rsp)
vmaxps %xmm8, %xmm5, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vbroadcastss 0x1b82b17(%rip), %xmm5 # 0x1eeba20
vblendvps %xmm0, 0xf0(%rsp), %xmm5, %xmm5
vshufps $0xb1, %xmm5, %xmm5, %xmm0 # xmm0 = xmm5[1,0,3,2]
vmovaps %xmm5, 0x170(%rsp)
vminps %xmm5, %xmm0, %xmm5
vmaxss %xmm8, %xmm9, %xmm0
vbroadcastss 0x1b82aec(%rip), %xmm9 # 0x1eeba20
vblendvps %xmm4, %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm6, %xmm4
vmulps %xmm2, %xmm7, %xmm3
shll $0x5, %ebx
movq %rbx, 0x58(%rsp)
xorq $0x20, %rbx
movq %r12, %r13
xorq $0x20, %r12
movq 0x28(%rsp), %rax
movq 0x70(%rax), %rax
movq %rax, 0x340(%rsp)
andl $0x0, 0x348(%rsp)
vshufps $0x0, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vmovaps %ymm6, 0x300(%rsp)
vshufps $0x0, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vmovaps %ymm6, 0x2e0(%rsp)
vshufps $0x55, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,1,1,1]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vmovaps %ymm6, 0x2c0(%rsp)
vshufps $0x55, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,1,1,1]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vmovaps %ymm6, 0x2a0(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x280(%rsp)
vshufps $0xaa, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[2,2,2,2]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x260(%rsp)
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x240(%rsp)
vshufps $0x0, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x220(%rsp)
vshufps $0x55, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x200(%rsp)
vshufps $0x0, %xmm5, %xmm5, %xmm1 # xmm1 = xmm5[0,0,0,0]
vshufps $0xaa, %xmm5, %xmm5, %xmm4 # xmm4 = xmm5[2,2,2,2]
vminps %xmm1, %xmm4, %xmm1
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovaps %ymm4, 0x1e0(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmovaps %ymm2, 0x1c0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmovaps %ymm2, 0x1a0(%rsp)
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovaps %ymm1, 0x180(%rsp)
leaq 0x350(%rsp), %r14
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x320(%rsp)
cmpq %r11, %r14
je 0x3695de
movq %r14, %rax
addq $-0x10, %r14
vbroadcastss -0x8(%rax), %xmm0
vcmpltps 0x10(%rsp), %xmm0, %xmm1
vtestps %xmm1, %xmm1
je 0x369097
movq -0x10(%rax), %r15
testb $0x8, %r15b
jne 0x36942e
movq 0x58(%rsp), %rax
vmovaps 0x300(%rsp), %ymm1
vmulps 0x40(%r15,%rax), %ymm1, %ymm1
movq 0x68(%rsp), %rax
vmovaps 0x2c0(%rsp), %ymm2
vmulps 0x40(%r15,%rax), %ymm2, %ymm2
vsubps 0x2e0(%rsp), %ymm1, %ymm1
vsubps 0x2a0(%rsp), %ymm2, %ymm2
vmovaps 0x280(%rsp), %ymm3
vmulps 0x40(%r15,%r13), %ymm3, %ymm3
vsubps 0x260(%rsp), %ymm3, %ymm3
vmovaps 0x240(%rsp), %ymm4
vmulps 0x40(%r15,%rbx), %ymm4, %ymm4
vsubps 0x220(%rsp), %ymm4, %ymm4
vpmaxsd %xmm2, %xmm1, %xmm5
vextractf128 $0x1, %ymm2, %xmm2
vextractf128 $0x1, %ymm1, %xmm1
vpmaxsd %xmm2, %xmm1, %xmm1
vmovdqa 0x180(%rsp), %ymm6
vpmaxsd %xmm6, %xmm3, %xmm2
vpmaxsd %xmm2, %xmm5, %xmm2
vextractf128 $0x1, %ymm3, %xmm3
vextractf128 $0x1, %ymm6, %xmm5
vpmaxsd %xmm5, %xmm3, %xmm3
movq 0x60(%rsp), %rax
vmovaps 0x200(%rsp), %ymm5
vmulps 0x40(%r15,%rax), %ymm5, %ymm5
vsubps 0x1e0(%rsp), %ymm5, %ymm5
vpmaxsd %xmm3, %xmm1, %xmm1
vmovaps 0x1c0(%rsp), %ymm3
vmulps 0x40(%r15,%r12), %ymm3, %ymm3
vsubps 0x1a0(%rsp), %ymm3, %ymm3
vinsertf128 $0x1, %xmm1, %ymm2, %ymm1
vpminsd %xmm5, %xmm4, %xmm2
vextractf128 $0x1, %ymm5, %xmm5
vextractf128 $0x1, %ymm4, %xmm4
vpminsd %xmm5, %xmm4, %xmm4
vmovdqa 0x320(%rsp), %ymm6
vpminsd %xmm6, %xmm3, %xmm5
vpminsd %xmm5, %xmm2, %xmm2
vextractf128 $0x1, %ymm3, %xmm3
vextractf128 $0x1, %ymm6, %xmm5
vpminsd %xmm5, %xmm3, %xmm3
vpminsd %xmm3, %xmm4, %xmm3
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vmovaps %ymm1, 0x80(%rsp)
vcmpleps %ymm2, %ymm1, %ymm1
vmovmskps %ymm1, %eax
testl %eax, %eax
je 0x36936f
movzbl %al, %edi
vmovaps %xmm9, %xmm0
movq %r8, %rax
xorl %r9d, %r9d
bsfq %rdi, %rsi
vbroadcastss 0x40(%r15,%rsi,4), %xmm1
vsubps %xmm10, %xmm1, %xmm1
vmulps %xmm1, %xmm13, %xmm1
vbroadcastss 0x80(%r15,%rsi,4), %xmm2
vsubps %xmm11, %xmm2, %xmm2
vmulps %xmm2, %xmm14, %xmm2
vbroadcastss 0xc0(%r15,%rsi,4), %xmm3
vsubps %xmm12, %xmm3, %xmm3
vmulps %xmm3, %xmm15, %xmm3
vbroadcastss 0x60(%r15,%rsi,4), %xmm4
vsubps %xmm10, %xmm4, %xmm4
vmulps %xmm4, %xmm13, %xmm4
vbroadcastss 0xa0(%r15,%rsi,4), %xmm5
vsubps %xmm11, %xmm5, %xmm5
vmulps %xmm5, %xmm14, %xmm5
vbroadcastss 0xe0(%r15,%rsi,4), %xmm6
vsubps %xmm12, %xmm6, %xmm6
vmulps %xmm6, %xmm15, %xmm6
vpminsd %xmm4, %xmm1, %xmm7
vpminsd %xmm5, %xmm2, %xmm8
vpmaxsd %xmm8, %xmm7, %xmm7
vpminsd %xmm6, %xmm3, %xmm8
vpmaxsd %xmm4, %xmm1, %xmm1
vpmaxsd %xmm5, %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vpmaxsd %xmm6, %xmm3, %xmm2
vpmaxsd 0x170(%rsp), %xmm8, %xmm3
vpmaxsd %xmm3, %xmm7, %xmm3
vpminsd 0x10(%rsp), %xmm2, %xmm2
vpminsd %xmm2, %xmm1, %xmm1
vcmpleps %xmm1, %xmm3, %xmm1
vtestps %xmm1, %xmm1
je 0x369337
vbroadcastss 0x80(%rsp,%rsi,4), %xmm1
movq (%r15,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
prefetcht0 0x80(%rsi)
prefetcht0 0xc0(%rsi)
vcmpltps %xmm0, %xmm1, %xmm2
vtestps %xmm2, %xmm2
je 0x369323
cmpq $0x8, %rax
je 0x369346
movq %rax, (%r14)
vmovaps %xmm0, %xmm2
vmovaps %xmm1, %xmm0
movq %rsi, %rax
jmp 0x36932a
movq %rsi, (%r14)
vmovaps %xmm1, %xmm2
incq %r9
vmovss %xmm2, 0x8(%r14)
addq $0x10, %r14
leaq -0x1(%rdi), %rsi
andq %rsi, %rdi
jne 0x36921f
jmp 0x36934f
vmovaps %xmm1, %xmm0
movq %rsi, %rax
jmp 0x369337
cmpq $0x8, %rax
je 0x369374
movb $0x1, %r10b
cmpq $0x2, %r9
jae 0x369379
movq %rax, %r15
testb %r10b, %r10b
jne 0x3690bf
jmp 0x369097
xorl %r10d, %r10d
jmp 0x369361
xorl %r10d, %r10d
jmp 0x36935e
leaq -0x20(%r14), %rsi
movl -0x18(%r14), %r8d
leaq -0x10(%r14), %rdi
cmpl -0x8(%r14), %r8d
jae 0x3693b2
vmovaps (%rsi), %xmm1
vmovaps %xmm1, (%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%rsi)
movq (%rdi), %r8
movq %r8, (%rsi)
movq (%rsp), %r8
movq %r8, (%rdi)
movl 0x8(%rsp), %r8d
movl %r8d, 0x8(%rdi)
cmpq $0x2, %r9
pushq $0x8
popq %r8
je 0x36935e
leaq -0x30(%r14), %r9
movl -0x28(%r14), %r8d
cmpl -0x8(%r14), %r8d
jae 0x3693f2
vmovaps (%r9), %xmm1
vmovaps %xmm1, (%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%r9)
movq (%rdi), %r8
movq %r8, (%r9)
movq (%rsp), %r8
movq %r8, (%rdi)
movl 0x8(%rsp), %r8d
movl %r8d, 0x8(%rdi)
movl -0x28(%r14), %edi
cmpl -0x18(%r14), %edi
pushq $0x8
popq %r8
jae 0x36935e
vmovaps (%r9), %xmm1
vmovaps %xmm1, (%rsp)
movl 0x8(%rsi), %edi
movl %edi, 0x8(%r9)
movq (%rsi), %rdi
movq %rdi, (%r9)
movq (%rsp), %rdi
movq %rdi, (%rsi)
movl 0x8(%rsp), %edi
movl %edi, 0x8(%rsi)
jmp 0x36935e
vmovaps 0x10(%rsp), %xmm1
vcmpnleps %xmm0, %xmm1, %xmm2
vtestps %xmm2, %xmm2
je 0x369097
movl %r15d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x48(%rsp)
je 0x36959b
andq $-0x10, %r15
xorl %esi, %esi
vmovaps %xmm2, 0x160(%rsp)
movq (%rcx), %rax
movl (%r15,%rsi,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vbroadcastss 0x34(%rax), %xmm0
vandps 0x90(%rdx), %xmm0, %xmm0
vpcmpeqd 0x1b82580(%rip), %xmm0, %xmm0 # 0x1eeba10
vtestps %xmm2, %xmm0
jb 0x36958d
vandnps %xmm2, %xmm0, %xmm0
movq %rsi, 0x50(%rsp)
movl 0x4(%r15,%rsi,8), %r8d
vmovaps %xmm0, (%rsp)
movq %rsp, %rsi
movq %rsi, 0x80(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x88(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x98(%rsp)
movq %rdx, 0xa0(%rsp)
movl $0x4, 0xa8(%rsp)
movl %edi, 0xac(%rsp)
movl %r8d, 0x90(%rsp)
movq %rax, 0xb0(%rsp)
andq $0x0, 0xb8(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x36951d
movq 0x60(%rax), %rcx
leaq 0x80(%rsp), %rdi
vzeroupper
callq *%rcx
movq 0x40(%rsp), %rcx
movq 0x38(%rsp), %rdx
vmovaps 0x150(%rsp), %xmm10
vmovaps 0x140(%rsp), %xmm11
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x120(%rsp), %xmm13
vmovaps 0x110(%rsp), %xmm14
vmovaps 0x100(%rsp), %xmm15
vbroadcastss 0x1b824ad(%rip), %xmm9 # 0x1eeba20
leaq 0x340(%rsp), %r11
pushq $0x8
popq %r8
vmovaps 0x160(%rsp), %xmm2
movq 0x50(%rsp), %rsi
incq %rsi
cmpq %rsi, 0x48(%rsp)
jne 0x369468
vmovaps 0x80(%rdx), %xmm0
vcmpltps 0x10(%rsp), %xmm0, %xmm1
vtestps %xmm2, %xmm1
je 0x369097
vmovaps 0x10(%rsp), %xmm1
vblendvps %xmm2, %xmm0, %xmm1, %xmm1
vshufps $0xb1, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[1,0,3,2]
vmovaps %xmm1, 0x10(%rsp)
vmaxps %xmm1, %xmm0, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0]
vmaxss %xmm0, %xmm1, %xmm0
jmp 0x369083
movq 0x30(%rsp), %rax
testq %rax, %rax
jne 0x368cf8
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint4_sse2.h
|
embree::avx::BVHNIntersectorKHybrid<8, 8, 1, false, embree::avx::ArrayIntersectorK_1<8, embree::avx::QuadMvIntersectorKMoeller<4, 8, false>>, true>::intersect(embree::vint_impl<8>*, embree::Accel::Intersectors*, embree::RayHitK<8>&, embree::RayQueryContext*)
|
void BVHNIntersectorKHybrid<N, K, types, robust, PrimitiveIntersectorK, single>::intersect(vint<K>* __restrict__ valid_i,
Accel::Intersectors* __restrict__ This,
RayHitK<K>& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
BVH* __restrict__ bvh = (BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
#if ENABLE_FAST_COHERENT_CODEPATHS == 1
assert(context);
if (unlikely(types == BVH_AN1 && context->user && context->isCoherent()))
{
intersectCoherent(valid_i, This, ray, context);
return;
}
#endif
/* filter out invalid rays */
vbool<K> valid = *valid_i == -1;
#if defined(EMBREE_IGNORE_INVALID_RAYS)
valid &= ray.valid();
#endif
/* return if there are no valid rays */
size_t valid_bits = movemask(valid);
#if defined(__AVX__)
STAT3(normal.trav_hit_boxes[popcnt(movemask(valid))], 1, 1, 1);
#endif
if (unlikely(valid_bits == 0)) return;
/* verify correct input */
assert(all(valid, ray.valid()));
assert(all(valid, ray.tnear() >= 0.0f));
assert(!(types & BVH_MB) || all(valid, (ray.time() >= 0.0f) & (ray.time() <= 1.0f)));
Precalculations pre(valid, ray);
/* load ray */
TravRayK<K, robust> tray(ray.org, ray.dir, single ? N : 0);
const vfloat<K> org_ray_tnear = max(ray.tnear(), 0.0f);
const vfloat<K> org_ray_tfar = max(ray.tfar , 0.0f);
if (single)
{
tray.tnear = select(valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(valid, org_ray_tfar , vfloat<K>(neg_inf));
for (; valid_bits!=0; ) {
const size_t i = bscf(valid_bits);
intersect1(This, bvh, bvh->root, i, pre, ray, tray, context);
}
return;
}
/* determine switch threshold based on flags */
const size_t switchThreshold = (context->user && context->isCoherent()) ? 2 : switchThresholdIncoherent;
vint<K> octant = ray.octant();
octant = select(valid, octant, vint<K>(0xffffffff));
/* test whether we have ray with opposing direction signs in the packet */
bool split = false;
{
size_t bits = valid_bits;
vbool<K> vsplit( false );
do
{
const size_t valid_index = bsf(bits);
vbool<K> octant_valid = octant[valid_index] == octant;
bits &= ~(size_t)movemask(octant_valid);
vsplit |= vint<K>(octant[valid_index]) == (octant^vint<K>(0x7));
} while (bits);
if (any(vsplit)) split = true;
}
do
{
const size_t valid_index = bsf(valid_bits);
const vint<K> diff_octant = vint<K>(octant[valid_index])^octant;
const vint<K> count_diff_octant = \
((diff_octant >> 2) & 1) +
((diff_octant >> 1) & 1) +
((diff_octant >> 0) & 1);
vbool<K> octant_valid = (count_diff_octant <= 1) & (octant != vint<K>(0xffffffff));
if (!single || !split) octant_valid = valid; // deactivate octant sorting in pure chunk mode, otherwise instance traversal performance goes down
octant = select(octant_valid,vint<K>(0xffffffff),octant);
valid_bits &= ~(size_t)movemask(octant_valid);
tray.tnear = select(octant_valid, org_ray_tnear, vfloat<K>(pos_inf));
tray.tfar = select(octant_valid, org_ray_tfar , vfloat<K>(neg_inf));
/* allocate stack and push root node */
vfloat<K> stack_near[stackSizeChunk];
NodeRef stack_node[stackSizeChunk];
stack_node[0] = BVH::invalidNode;
stack_near[0] = inf;
stack_node[1] = bvh->root;
stack_near[1] = tray.tnear;
NodeRef* stackEnd MAYBE_UNUSED = stack_node+stackSizeChunk;
NodeRef* __restrict__ sptr_node = stack_node + 2;
vfloat<K>* __restrict__ sptr_near = stack_near + 2;
while (1) pop:
{
/* pop next node from stack */
assert(sptr_node > stack_node);
sptr_node--;
sptr_near--;
NodeRef cur = *sptr_node;
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* cull node if behind closest hit point */
vfloat<K> curDist = *sptr_near;
const vbool<K> active = curDist < tray.tfar;
if (unlikely(none(active)))
continue;
/* switch to single ray traversal */
#if (!defined(__WIN32__) || defined(__X86_64__)) && ((defined(__aarch64__)) || defined(__SSE4_2__))
#if FORCE_SINGLE_MODE == 0
if (single)
#endif
{
size_t bits = movemask(active);
#if FORCE_SINGLE_MODE == 0
if (unlikely(popcnt(bits) <= switchThreshold))
#endif
{
for (; bits!=0; ) {
const size_t i = bscf(bits);
intersect1(This, bvh, cur, i, pre, ray, tray, context);
}
tray.tfar = min(tray.tfar, ray.tfar);
continue;
}
}
#endif
while (likely(!cur.isLeaf()))
{
/* process nodes */
const vbool<K> valid_node = tray.tfar > curDist;
STAT3(normal.trav_nodes, 1, popcnt(valid_node), K);
const NodeRef nodeRef = cur;
const BaseNode* __restrict__ const node = nodeRef.baseNode();
/* set cur to invalid */
cur = BVH::emptyNode;
curDist = pos_inf;
size_t num_child_hits = 0;
for (unsigned i = 0; i < N; i++)
{
const NodeRef child = node->children[i];
if (unlikely(child == BVH::emptyNode)) break;
vfloat<K> lnearP;
vbool<K> lhit = valid_node;
BVHNNodeIntersectorK<N, K, types, robust>::intersect(nodeRef, i, tray, ray.time(), lnearP, lhit);
/* if we hit the child we choose to continue with that child if it
is closer than the current next child, or we push it onto the stack */
if (likely(any(lhit)))
{
assert(sptr_node < stackEnd);
assert(child != BVH::emptyNode);
const vfloat<K> childDist = select(lhit, lnearP, inf);
/* push cur node onto stack and continue with hit child */
if (any(childDist < curDist))
{
if (likely(cur != BVH::emptyNode)) {
num_child_hits++;
*sptr_node = cur; sptr_node++;
*sptr_near = curDist; sptr_near++;
}
curDist = childDist;
cur = child;
}
/* push hit child onto stack */
else {
num_child_hits++;
*sptr_node = child; sptr_node++;
*sptr_near = childDist; sptr_near++;
}
}
}
#if defined(__AVX__)
//STAT3(normal.trav_hit_boxes[num_child_hits], 1, 1, 1);
#endif
if (unlikely(cur == BVH::emptyNode))
goto pop;
/* improved distance sorting for 3 or more hits */
if (unlikely(num_child_hits >= 2))
{
if (any(sptr_near[-2] < sptr_near[-1]))
{
std::swap(sptr_near[-2],sptr_near[-1]);
std::swap(sptr_node[-2],sptr_node[-1]);
}
if (unlikely(num_child_hits >= 3))
{
if (any(sptr_near[-3] < sptr_near[-1]))
{
std::swap(sptr_near[-3],sptr_near[-1]);
std::swap(sptr_node[-3],sptr_node[-1]);
}
if (any(sptr_near[-3] < sptr_near[-2]))
{
std::swap(sptr_near[-3],sptr_near[-2]);
std::swap(sptr_node[-3],sptr_node[-2]);
}
}
}
#if SWITCH_DURING_DOWN_TRAVERSAL == 1
if (single)
{
// seems to be the best place for testing utilization
if (unlikely(popcnt(tray.tfar > curDist) <= switchThreshold))
{
*sptr_node++ = cur;
*sptr_near++ = curDist;
goto pop;
}
}
#endif
}
/* return if stack is empty */
if (unlikely(cur == BVH::invalidNode)) {
assert(sptr_node == stack_node);
break;
}
/* intersect leaf */
assert(cur != BVH::emptyNode);
const vbool<K> valid_leaf = tray.tfar > curDist;
STAT3(normal.trav_leaves, 1, popcnt(valid_leaf), K);
if (unlikely(none(valid_leaf))) continue;
size_t items; const Primitive* prim = (Primitive*)cur.leaf(items);
size_t lazy_node = 0;
PrimitiveIntersectorK::intersect(valid_leaf, This, pre, ray, context, prim, items, tray, lazy_node);
tray.tfar = select(valid_leaf, ray.tfar, tray.tfar);
if (unlikely(lazy_node)) {
*sptr_node = lazy_node; sptr_node++;
*sptr_near = neg_inf; sptr_near++;
}
}
} while(valid_bits);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x200, %rsp # imm = 0x200
movq (%rsi), %r14
cmpq $0x8, 0x70(%r14)
je 0x385464
movq %rcx, %r12
movq %rdx, %r15
movq %rsi, %rbx
cmpq $0x0, 0x8(%rcx)
je 0x385257
movq 0x10(%r12), %rax
testb $0x1, 0x2(%rax)
jne 0x385473
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %xmm1
vpcmpeqd 0x10(%rdi), %xmm0, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpmovmskb %xmm0, %eax
testl %eax, %eax
je 0x385464
vpacksswb %xmm0, %xmm0, %xmm1
vpmovmskb %xmm1, %eax
vmovaps (%r15), %ymm1
leaq 0x20(%rsp), %rcx
vmovaps %ymm1, (%rcx)
vmovaps 0x20(%r15), %ymm1
vmovaps %ymm1, 0x20(%rcx)
vmovaps 0x40(%r15), %ymm1
vmovaps %ymm1, 0x40(%rcx)
vmovaps 0x80(%r15), %ymm1
vmovaps %ymm1, 0x60(%rcx)
vmovaps 0xa0(%r15), %ymm2
vmovaps %ymm2, 0x80(%rcx)
vmovaps 0xc0(%r15), %ymm3
vbroadcastss 0x1b9bbf3(%rip), %ymm4 # 0x1f20ec4
vbroadcastss 0x1b6bd0e(%rip), %ymm5 # 0x1ef0fe8
vandps %ymm4, %ymm1, %ymm6
vcmpltps %ymm5, %ymm6, %ymm6
vblendvps %ymm6, %ymm5, %ymm1, %ymm1
vandps %ymm4, %ymm2, %ymm6
vcmpltps %ymm5, %ymm6, %ymm6
vblendvps %ymm6, %ymm5, %ymm2, %ymm2
vandps %ymm4, %ymm3, %ymm4
vcmpltps %ymm5, %ymm4, %ymm4
vblendvps %ymm4, %ymm5, %ymm3, %ymm4
movzbl %al, %r13d
vrcpps %ymm1, %ymm5
vmovaps %ymm3, 0xa0(%rcx)
vmulps %ymm5, %ymm1, %ymm1
vbroadcastss 0x1b673f0(%rip), %ymm3 # 0x1eec714
vsubps %ymm1, %ymm3, %ymm1
vrcpps %ymm2, %ymm6
vmulps %ymm1, %ymm5, %ymm1
vaddps %ymm1, %ymm5, %ymm1
vmulps %ymm6, %ymm2, %ymm2
vsubps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm6, %ymm2
vrcpps %ymm4, %ymm5
vaddps %ymm2, %ymm6, %ymm2
vmulps %ymm5, %ymm4, %ymm4
vsubps %ymm4, %ymm3, %ymm3
vmulps %ymm3, %ymm5, %ymm3
vaddps %ymm3, %ymm5, %ymm3
vmovaps %ymm1, 0xc0(%rcx)
vmovaps %ymm2, 0xe0(%rcx)
vmovaps %ymm3, 0x100(%rcx)
vxorps %xmm4, %xmm4, %xmm4
vcmpltps %ymm4, %ymm1, %ymm1
vbroadcastss 0x1bcf61e(%rip), %ymm5 # 0x1f549a0
vandps %ymm5, %ymm1, %ymm1
vcmpnltps %ymm4, %ymm2, %ymm2
vbroadcastss 0x1bd55d8(%rip), %ymm5 # 0x1f5a96c
vbroadcastss 0x1bd6063(%rip), %ymm6 # 0x1f5b400
vblendvps %ymm2, %ymm5, %ymm6, %ymm2
vmovaps %ymm1, 0x120(%rcx)
vcmpnltps %ymm4, %ymm3, %ymm1
vbroadcastss 0x1bd604b(%rip), %ymm3 # 0x1f5b404
vbroadcastss 0x1bd6046(%rip), %ymm5 # 0x1f5b408
vblendvps %ymm1, %ymm3, %ymm5, %ymm1
vmovaps %ymm2, 0x140(%rcx)
vmovaps %ymm1, 0x160(%rcx)
vmovaps 0x60(%r15), %ymm1
vmovaps 0x100(%r15), %ymm2
vmaxps %ymm4, %ymm1, %ymm1
vpmovsxwd %xmm0, %xmm3
vpunpckhwd %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[4,4,5,5,6,6,7,7]
vinsertf128 $0x1, %xmm0, %ymm3, %ymm0
vbroadcastss 0x1b6661d(%rip), %ymm3 # 0x1eeba20
vblendvps %ymm0, %ymm1, %ymm3, %ymm1
vmaxps %ymm4, %ymm2, %ymm2
vmovaps %ymm1, 0x180(%rcx)
vbroadcastss 0x1b67766(%rip), %ymm1 # 0x1eecb84
vblendvps %ymm0, %ymm2, %ymm1, %ymm0
vmovaps %ymm0, 0x1a0(%rcx)
bsfq %r13, %rcx
leaq -0x1(%r13), %rax
movq %rax, 0x18(%rsp)
movq 0x70(%r14), %rdx
movq %rbx, %rdi
movq %r14, %rsi
leaq 0x17(%rsp), %r8
movq %r15, %r9
pushq %r12
leaq 0x28(%rsp), %rax
pushq %rax
vzeroupper
callq 0x3be6a4
popq %rax
popq %rcx
andq 0x18(%rsp), %r13
jne 0x38542c
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rbx, %rsi
movq %r15, %rdx
movq %r12, %rcx
callq 0x3bd0f2
jmp 0x385464
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector_hybrid.cpp
|
embree::avx::InstanceArrayIntersector1MB::occluded(embree::avx::InstanceArrayIntersector1MB::Precalculations const&, embree::RayK<1>&, embree::RayQueryContext*, embree::InstanceArrayPrimitive const&)
|
bool InstanceArrayIntersector1MB::occluded(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
/* perform ray mask test */
#if defined(EMBREE_RAY_MASK)
if ((ray.mask & instance->mask) == 0)
return false;
#endif
RTCRayQueryContext* user_context = context->user;
bool occluded = false;
if (likely(instance_id_stack::push(user_context, prim.instID_, prim.primID_)))
{
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_, ray.time());
const Vec3ff ray_org = ray.org;
const Vec3ff ray_dir = ray.dir;
ray.org = Vec3ff(xfmPoint(world2local, ray_org), ray.tnear());
ray.dir = Vec3ff(xfmVector(world2local, ray_dir), ray.time());
RayQueryContext newcontext((Scene*)object, user_context, context->args);
object->intersectors.occluded((RTCRay&)ray, &newcontext);
ray.org = ray_org;
ray.dir = ray_dir;
occluded = ray.tfar < 0.0f;
instance_id_stack::pop(user_context);
}
return occluded;
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x178, %rsp # imm = 0x178
movq %rdx, %r14
movq %rsi, %rbx
movq (%rdx), %rdx
movl (%rcx), %eax
movl 0x4(%rcx), %esi
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rsi,8), %rdx
movq 0x58(%rdx), %r15
testq %r15, %r15
jne 0x40912e
movq 0x90(%rdx), %rdi
movq 0xa0(%rdx), %r8
imulq %rax, %r8
movl (%rdi,%r8), %edi
movl $0xffffffff, %r8d # imm = 0xFFFFFFFF
cmpq %r8, %rdi
je 0x40912b
movq 0x60(%rdx), %r8
movq (%r8,%rdi,8), %r15
jmp 0x40912e
xorl %r15d, %r15d
testq %r15, %r15
je 0x4091d4
movl 0x34(%rdx), %edi
testl %edi, 0x24(%rbx)
je 0x4091d4
movq 0x8(%r14), %r12
cmpl $-0x1, (%r12)
jne 0x4091d4
movl %esi, (%r12)
movl %eax, 0x4(%r12)
movl (%rcx), %eax
cmpl $0x1, 0x24(%rdx)
jne 0x4091db
movzbl 0x3d(%rdx), %esi
shll $0x8, %esi
movq 0x88(%rdx), %rcx
movl 0x20(%rcx), %edx
cmpl $0x100, %esi # imm = 0x100
je 0x4099ba
cmpl $0x9134, %edx # imm = 0x9134
je 0x409533
cmpl $0x9234, %edx # imm = 0x9234
je 0x4092a1
cmpl $0xb001, %edx # imm = 0xB001
je 0x40936c
cmpl $0x9244, %edx # imm = 0x9244
jne 0x40984c
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm0
vmovaps 0x10(%rdx,%rax), %xmm4
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm1
jmp 0x40984c
xorl %eax, %eax
jmp 0x4099ab
vmovss 0x1c(%rbx), %xmm0
vmovss 0x28(%rdx), %xmm1
vmovss 0x2c(%rdx), %xmm2
vmovss 0x30(%rdx), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x1ae77bf(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm1
vcvttss2si %xmm1, %ecx
vsubss %xmm1, %xmm0, %xmm15
movzbl 0x3d(%rdx), %r8d
shll $0x8, %r8d
movq 0x88(%rdx), %rdx
imulq $0x38, %rcx, %rdi
leaq (%rdx,%rdi), %rsi
movl 0x20(%rdx,%rdi), %edi
cmpl $0x100, %r8d # imm = 0x100
je 0x409a0e
cmpl $0x9134, %edi # imm = 0x9134
je 0x409597
cmpl $0x9234, %edi # imm = 0x9234
je 0x409305
cmpl $0xb001, %edi # imm = 0xB001
je 0x40944e
cmpl $0x9244, %edi # imm = 0x9244
jne 0x4095f9
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm0
vmovaps 0x10(%rdi,%rsi), %xmm1
vmovaps 0x20(%rdi,%rsi), %xmm2
vmovaps 0x30(%rdi,%rsi), %xmm3
jmp 0x4095f9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm1
vmovss 0xc(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm1
vmovss 0x18(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm1
vmovss 0x24(%rdx,%rax), %xmm3
vshufps $0x4c, %xmm1, %xmm3, %xmm1 # xmm1 = xmm3[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
jmp 0x40984c
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovsd 0x10(%rdi,%rsi), %xmm1
vmovss 0xc(%rdi,%rsi), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
vmovsd 0x1c(%rdi,%rsi), %xmm2
vmovss 0x18(%rdi,%rsi), %xmm3
vshufps $0x4c, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0,3],xmm2[0,1]
vshufps $0x78, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
vmovsd 0x28(%rdi,%rsi), %xmm3
vmovss 0x24(%rdi,%rsi), %xmm4
vshufps $0x4c, %xmm3, %xmm4, %xmm3 # xmm3 = xmm4[0,3],xmm3[0,1]
vshufps $0x78, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,2,3,1]
jmp 0x4095f9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,2],xmm0[1,3]
vmovss 0x18(%rdx,%rax), %xmm1
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x1ae3323(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x1ae331f(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm4, %xmm3, %xmm4 # xmm4 = xmm3[0,1,2],xmm4[0]
jmp 0x40984c
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm4
vmovlhps %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0,2],xmm0[1,3]
vmovss 0x18(%rdi,%rsi), %xmm2
vmovsd 0x1c(%rdi,%rsi), %xmm3
vmovlhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
vshufps $0xd8, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm3[1,3]
vmovss 0x24(%rdi,%rsi), %xmm3
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm3, %xmm3, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x1ae323e(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x1ae323a(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm3, %xmm8, %xmm3
vinsertps $0x30, %xmm3, %xmm2, %xmm3 # xmm3 = xmm2[0,1,2],xmm3[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm5
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],xmm4[2,3]
vinsertps $0x30, %xmm2, %xmm1, %xmm2 # xmm2 = xmm1[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm4, %xmm1 # xmm1 = xmm4[0,1],mem[0],xmm4[3]
vinsertps $0x30, %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0]
jmp 0x4095f9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm1, %xmm4 # xmm4 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm1 # xmm1 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm1, %xmm2 # xmm2 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm1 # xmm1 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
jmp 0x40984c
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x4097a4
cmpl $0x9234, %ecx # imm = 0x9234
je 0x409657
cmpl $0xb001, %ecx # imm = 0xB001
je 0x4096bb
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x409803
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovaps (%rcx,%rax), %xmm4
vmovaps 0x10(%rcx,%rax), %xmm6
vmovaps 0x20(%rcx,%rax), %xmm7
vmovaps 0x30(%rcx,%rax), %xmm5
jmp 0x409803
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovsd 0x4(%rcx,%rax), %xmm4
vmovss (%rcx,%rax), %xmm5
vshufps $0x4c, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,3],xmm4[0,1]
vshufps $0x78, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
vmovsd 0x10(%rcx,%rax), %xmm5
vmovss 0xc(%rcx,%rax), %xmm6
vshufps $0x4c, %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,2,3,1]
vmovsd 0x1c(%rcx,%rax), %xmm5
vmovss 0x18(%rcx,%rax), %xmm7
vshufps $0x4c, %xmm5, %xmm7, %xmm5 # xmm5 = xmm7[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[0,2,3,1]
vmovsd 0x28(%rcx,%rax), %xmm5
vmovss 0x24(%rcx,%rax), %xmm8
vshufps $0x4c, %xmm5, %xmm8, %xmm5 # xmm5 = xmm8[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x409803
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovsd 0x10(%rcx,%rax), %xmm4
vinsertps $0x20, 0x8(%rcx,%rax), %xmm4, %xmm6 # xmm6 = xmm4[0,1],mem[0],xmm4[3]
vmovsd 0x34(%rcx,%rax), %xmm4
vmovss (%rcx,%rax), %xmm5
vmovss 0xc(%rcx,%rax), %xmm7
vmovlhps %xmm4, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,2],xmm4[1,3]
vmovss 0x18(%rcx,%rax), %xmm5
vmovsd 0x1c(%rcx,%rax), %xmm8
vmovlhps %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm8[0]
vshufps $0xd8, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,2],xmm8[1,3]
vmovss 0x24(%rcx,%rax), %xmm8
vmovss 0x28(%rcx,%rax), %xmm9
vmovss 0x2c(%rcx,%rax), %xmm10
vmovss 0x30(%rcx,%rax), %xmm11
vmulss %xmm9, %xmm9, %xmm12
vmulss %xmm8, %xmm8, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm10, %xmm10, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm11, %xmm11, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss 0x1ae2fce(%rip), %xmm13, %xmm14 # 0x1eec718
vmulss 0x1ae2fca(%rip), %xmm12, %xmm12 # 0x1eec71c
vmulss %xmm12, %xmm13, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm12, %xmm13, %xmm12
vaddss %xmm12, %xmm14, %xmm12
vmulss %xmm12, %xmm8, %xmm8
vinsertps $0x30, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm8[0]
vmulss %xmm12, %xmm9, %xmm8
vinsertps $0x30, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[0]
vmulss %xmm12, %xmm10, %xmm8
vmulss %xmm12, %xmm11, %xmm9
vinsertps $0x10, 0x4(%rcx,%rax), %xmm7, %xmm10 # xmm10 = xmm7[0],mem[0],xmm7[2,3]
vinsertps $0x30, %xmm9, %xmm6, %xmm7 # xmm7 = xmm6[0,1,2],xmm9[0]
vinsertps $0x20, 0x3c(%rcx,%rax), %xmm10, %xmm6 # xmm6 = xmm10[0,1],mem[0],xmm10[3]
vinsertps $0x30, %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm8[0]
jmp 0x409803
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovss (%rcx,%rax), %xmm4
vmovss 0x4(%rcx,%rax), %xmm5
vmovss 0x8(%rcx,%rax), %xmm7
vmovss 0xc(%rcx,%rax), %xmm8
vinsertps $0x1c, 0x10(%rcx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rcx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rcx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rcx,%rax), %xmm5, %xmm6 # xmm6 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rcx,%rax), %xmm7, %xmm5 # xmm5 = xmm7[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rcx,%rax), %xmm5, %xmm7 # xmm7 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm8, %xmm5 # xmm5 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rcx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
vmovss 0x1ae2f09(%rip), %xmm8 # 0x1eec714
vsubss %xmm15, %xmm8, %xmm8
vshufps $0x0, %xmm15, %xmm15, %xmm9 # xmm9 = xmm15[0,0,0,0]
vmulps %xmm4, %xmm9, %xmm4
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm0, %xmm8, %xmm0
vaddps %xmm4, %xmm0, %xmm0
vmulps %xmm6, %xmm9, %xmm4
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm4, %xmm1, %xmm4
vmulps %xmm7, %xmm9, %xmm1
vmulps %xmm2, %xmm8, %xmm2
vaddps %xmm1, %xmm2, %xmm2
vmulps %xmm5, %xmm9, %xmm1
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vmulps %xmm5, %xmm2, %xmm6
vmulps %xmm3, %xmm4, %xmm7
vsubps %xmm6, %xmm7, %xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,2,0,3]
vmulps %xmm0, %xmm3, %xmm3
vmulps %xmm2, %xmm8, %xmm2
vsubps %xmm3, %xmm2, %xmm2
vmulps %xmm4, %xmm8, %xmm3
vmulps %xmm5, %xmm0, %xmm4
vsubps %xmm3, %xmm4, %xmm3
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vunpcklps %xmm4, %xmm7, %xmm4 # xmm4 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
vunpcklps %xmm3, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
vinsertps $0x4a, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1],zero,xmm2[2],zero
vxorps %xmm6, %xmm6, %xmm6
vmovss %xmm2, %xmm6, %xmm2 # xmm2 = xmm2[0],xmm6[1,2,3]
vunpcklps %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
vunpcklps %xmm5, %xmm4, %xmm3 # xmm3 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vdpps $0x7f, %xmm7, %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vdivps %xmm0, %xmm3, %xmm3
vdivps %xmm0, %xmm4, %xmm4
vdivps %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vshufps $0x55, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm0, %xmm1, %xmm1
vmulps %xmm4, %xmm5, %xmm5
vaddps %xmm1, %xmm5, %xmm1
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmovaps (%rbx), %xmm2
vmovaps %xmm2, 0x20(%rsp)
vbroadcastss (%rbx), %xmm2
vbroadcastss 0x4(%rbx), %xmm5
vbroadcastss 0x8(%rbx), %xmm6
vmulps %xmm0, %xmm6, %xmm6
vsubps %xmm1, %xmm6, %xmm1
vmulps %xmm4, %xmm5, %xmm5
vaddps %xmm1, %xmm5, %xmm1
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vinsertps $0x30, 0xc(%rbx), %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],mem[0]
vmovaps 0x10(%rbx), %xmm2
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm1, (%rbx)
vbroadcastss 0x10(%rbx), %xmm1
vbroadcastss 0x14(%rbx), %xmm2
vbroadcastss 0x18(%rbx), %xmm5
vmulps %xmm0, %xmm5, %xmm0
vmulps %xmm4, %xmm2, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmulps %xmm3, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vinsertps $0x30, 0x1c(%rbx), %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],mem[0]
vmovaps %xmm0, 0x10(%rbx)
movq 0x10(%r14), %rax
leaq 0x160(%rsp), %rdx
movq %r15, (%rdx)
movq %r12, 0x8(%rdx)
movq %rax, 0x10(%rdx)
leaq 0x58(%r15), %rdi
movq %rbx, %rsi
callq *0x80(%r15)
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, (%rbx)
vmovaps 0x10(%rsp), %xmm0
vmovaps %xmm0, 0x10(%rbx)
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x20(%rbx), %xmm0
seta %al
orq $-0x1, (%r12)
addq $0x178, %rsp # imm = 0x178
popq %rbx
popq %r12
popq %r14
popq %r15
retq
cmpl $0x9134, %edx # imm = 0x9134
je 0x409d39
cmpl $0x9234, %edx # imm = 0x9234
je 0x409a7d
cmpl $0xb001, %edx # imm = 0xB001
je 0x409b5a
cmpl $0x9244, %edx # imm = 0x9244
jne 0x409d98
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm9
vmovaps 0x10(%rdx,%rax), %xmm6
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm0
jmp 0x409d98
vmovaps %xmm15, 0x20(%rsp)
cmpl $0x9134, %edi # imm = 0x9134
je 0x409f75
cmpl $0x9234, %edi # imm = 0x9234
je 0x409ae1
cmpl $0xb001, %edi # imm = 0xB001
je 0x409c3c
cmpl $0x9244, %edi # imm = 0x9244
jne 0x409fef
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps 0x10(%rdi,%rsi), %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps 0x20(%rdi,%rsi), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovaps 0x30(%rdi,%rsi), %xmm0
jmp 0x409fe9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm9 # xmm9 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm1
vmovss 0xc(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm1
vmovss 0x18(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm1
vmovss 0x24(%rdx,%rax), %xmm4
vshufps $0x4c, %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[0,2,3,1]
jmp 0x409d98
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovaps %xmm0, 0x70(%rsp)
vmovsd 0x10(%rdi,%rsi), %xmm0
vmovss 0xc(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovaps %xmm0, 0x60(%rsp)
vmovsd 0x1c(%rdi,%rsi), %xmm0
vmovss 0x18(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovaps %xmm0, 0x40(%rsp)
vmovsd 0x28(%rdi,%rsi), %xmm0
vmovss 0x24(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
jmp 0x409fe9
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm1, %xmm11 # xmm11 = xmm1[0,2],xmm0[1,3]
vmovss 0x18(%rdx,%rax), %xmm1
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x1ae2b35(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x1ae2b31(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm1, %xmm0 # xmm0 = xmm1[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm11, %xmm9 # xmm9 = xmm11[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm4, %xmm3, %xmm6 # xmm6 = xmm3[0,1,2],xmm4[0]
jmp 0x409d98
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm1
vmovss (%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdi,%rsi), %xmm2
vmovsd 0x1c(%rdi,%rsi), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdi,%rsi), %xmm4
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x1ae2a50(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x1ae2a4c(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm4[0]
vmovaps %xmm2, 0x50(%rsp)
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[0]
vmovaps %xmm1, 0x70(%rsp)
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm2[0]
vmovaps %xmm0, 0x40(%rsp)
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0]
vmovaps %xmm0, 0x60(%rsp)
jmp 0x409fef
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm4
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm9 # xmm9 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm1, %xmm6 # xmm6 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm1 # xmm1 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm1, %xmm2 # xmm2 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0,1],mem[0],zero
vshufps $0xff, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[3,3,3,3]
vmovaps %xmm9, %xmm3
vshufps $0xff, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[3,3,3,3]
vshufps $0xff, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[3,3,3,3]
vmovaps %xmm2, %xmm1
vshufps $0xff, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[3,3,3,3]
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm5, %xmm5, %xmm11
vmovaps %xmm6, %xmm2
vaddss %xmm10, %xmm11, %xmm6
vbroadcastss 0x1b170f0(%rip), %xmm12 # 0x1f20ec0
vxorps %xmm7, %xmm12, %xmm8
vmulss %xmm7, %xmm8, %xmm8
vaddss %xmm6, %xmm8, %xmm6
vxorps %xmm4, %xmm12, %xmm12
vmulss %xmm4, %xmm12, %xmm12
vaddss %xmm6, %xmm12, %xmm13
vmulss %xmm5, %xmm4, %xmm6
vmulss %xmm7, %xmm9, %xmm14
vaddss %xmm6, %xmm14, %xmm15
vsubss %xmm6, %xmm14, %xmm6
vmovss %xmm6, 0x20(%rsp)
vmulss %xmm4, %xmm9, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm7, %xmm7, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm6
vmulss %xmm5, %xmm7, %xmm12
vmulss %xmm5, %xmm9, %xmm5
vmovaps %xmm0, %xmm11
vsubss %xmm12, %xmm14, %xmm0
vmulss %xmm4, %xmm7, %xmm9
vaddss %xmm12, %xmm14, %xmm7
vaddss %xmm5, %xmm9, %xmm12
vsubss %xmm5, %xmm9, %xmm9
vaddss %xmm15, %xmm15, %xmm5
vaddss %xmm0, %xmm0, %xmm0
vaddss %xmm10, %xmm8, %xmm8
vmulss %xmm4, %xmm4, %xmm4
vaddss %xmm4, %xmm8, %xmm4
vshufps $0x0, %xmm13, %xmm13, %xmm8 # xmm8 = xmm13[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps 0x1ae289b(%rip), %xmm10 # 0x1eec700
vmulps %xmm0, %xmm10, %xmm0
vmovsd 0x1ae287f(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm5, %xmm13, %xmm5
vaddps %xmm0, %xmm5, %xmm0
vmovss 0x1ae2893(%rip), %xmm14 # 0x1eec714
vmulps %xmm14, %xmm8, %xmm5
vaddps %xmm0, %xmm5, %xmm5
vaddss %xmm12, %xmm12, %xmm0
vshufps $0x0, %xmm6, %xmm6, %xmm8 # xmm8 = xmm6[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm13, %xmm8, %xmm8
vaddps %xmm0, %xmm8, %xmm0
vmovss 0x20(%rsp), %xmm6
vaddss %xmm6, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm14, %xmm6
vaddps %xmm0, %xmm6, %xmm6
vshufps $0x0, %xmm4, %xmm4, %xmm0 # xmm0 = xmm4[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vaddss %xmm9, %xmm9, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vxorps %xmm4, %xmm4, %xmm4
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm14, %xmm7
vshufps $0xe9, %xmm4, %xmm3, %xmm8 # xmm8 = xmm3[1,2],xmm4[2,3]
vblendps $0x4, %xmm2, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm2[2],xmm8[3]
vaddps %xmm0, %xmm7, %xmm7
vaddps %xmm4, %xmm8, %xmm8
vshufps $0x0, %xmm3, %xmm3, %xmm0 # xmm0 = xmm3[0,0,0,0]
vmulps %xmm4, %xmm7, %xmm9
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm9, %xmm4
vmulps %xmm5, %xmm0, %xmm0
vaddps %xmm4, %xmm0, %xmm0
vshufps $0x0, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vmulps %xmm5, %xmm4, %xmm4
vaddps %xmm3, %xmm4, %xmm4
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vshufps $0xaa, %xmm11, %xmm11, %xmm3 # xmm3 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm3, %xmm3
vshufps $0x55, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[1,1,1,1]
vmulps %xmm6, %xmm7, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vshufps $0x0, %xmm11, %xmm11, %xmm1 # xmm1 = xmm11[0,0,0,0]
jmp 0x40a8fb
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, 0x70(%rsp)
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, 0x60(%rsp)
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, 0x40(%rsp)
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, 0x50(%rsp)
incl %ecx
imulq $0x38, %rcx, %rcx
leaq (%rdx,%rcx), %rsi
movl 0x20(%rdx,%rcx), %ecx
cmpl $0x9134, %ecx # imm = 0x9134
je 0x40a190
cmpl $0x9234, %ecx # imm = 0x9234
je 0x40a04d
cmpl $0xb001, %ecx # imm = 0xB001
je 0x40a0b1
cmpl $0x9244, %ecx # imm = 0x9244
jne 0x40a1ef
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovaps (%rcx,%rax), %xmm4
vmovaps 0x10(%rcx,%rax), %xmm5
vmovaps 0x20(%rcx,%rax), %xmm2
vmovaps 0x30(%rcx,%rax), %xmm9
jmp 0x40a1ef
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovsd 0x4(%rcx,%rax), %xmm0
vmovss (%rcx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,2,3,1]
vmovsd 0x10(%rcx,%rax), %xmm0
vmovss 0xc(%rcx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,2,3,1]
vmovsd 0x1c(%rcx,%rax), %xmm0
vmovss 0x18(%rcx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x28(%rcx,%rax), %xmm0
vmovss 0x24(%rcx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm9 # xmm9 = xmm0[0,2,3,1]
jmp 0x40a1ef
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovsd 0x10(%rcx,%rax), %xmm0
vinsertps $0x20, 0x8(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rcx,%rax), %xmm1
vmovss (%rcx,%rax), %xmm2
vmovss 0xc(%rcx,%rax), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rcx,%rax), %xmm2
vmovsd 0x1c(%rcx,%rax), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rcx,%rax), %xmm4
vmovss 0x28(%rcx,%rax), %xmm5
vmovss 0x2c(%rcx,%rax), %xmm6
vmovss 0x30(%rcx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x1ae25de(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x1ae25da(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm9 # xmm9 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rcx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm2 # xmm2 = xmm0[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rcx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0,1,2],xmm1[0]
jmp 0x40a1ef
movq (%rsi), %rcx
imulq 0x10(%rsi), %rax
vmovss (%rcx,%rax), %xmm0
vmovss 0x4(%rcx,%rax), %xmm1
vmovss 0x8(%rcx,%rax), %xmm2
vmovss 0xc(%rcx,%rax), %xmm3
vinsertps $0x1c, 0x10(%rcx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rcx,%rax), %xmm0, %xmm4 # xmm4 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rcx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rcx,%rax), %xmm0, %xmm5 # xmm5 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rcx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rcx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rcx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rcx,%rax), %xmm0, %xmm9 # xmm9 = xmm0[0,1],mem[0],zero
vpermilps $0xff, 0x50(%rsp), %xmm7 # xmm7 = mem[3,3,3,3]
vpermilps $0xff, 0x70(%rsp), %xmm1 # xmm1 = mem[3,3,3,3]
vpermilps $0xff, 0x60(%rsp), %xmm6 # xmm6 = mem[3,3,3,3]
vpermilps $0xff, 0x40(%rsp), %xmm3 # xmm3 = mem[3,3,3,3]
vmovaps %xmm9, 0x130(%rsp)
vshufps $0xff, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[3,3,3,3]
vmovaps %xmm4, 0x150(%rsp)
vshufps $0xff, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[3,3,3,3]
vmovaps %xmm5, 0x140(%rsp)
vshufps $0xff, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[3,3,3,3]
vmovaps %xmm2, 0x120(%rsp)
vshufps $0xff, %xmm2, %xmm2, %xmm0 # xmm0 = xmm2[3,3,3,3]
vmovaps %xmm1, 0xe0(%rsp)
vmulss %xmm4, %xmm1, %xmm1
vmovaps %xmm7, 0x110(%rsp)
vmulss %xmm7, %xmm8, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmovaps %xmm6, 0xf0(%rsp)
vmovaps %xmm5, 0x90(%rsp)
vmulss %xmm5, %xmm6, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmovaps %xmm3, 0x100(%rsp)
vmulss %xmm0, %xmm3, %xmm2
vaddss %xmm1, %xmm2, %xmm2
vbroadcastss 0x1b16c26(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm2, %xmm3
vmovaps %xmm2, 0x10(%rsp)
vmovaps %xmm3, 0x80(%rsp)
vucomiss %xmm2, %xmm3
vxorps %xmm1, %xmm0, %xmm1
vmovaps %xmm1, 0xd0(%rsp)
ja 0x40a2cb
vmovaps %xmm8, 0xb0(%rsp)
jmp 0x40a2e5
vbroadcastss 0x1b16bec(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm8, %xmm8
vmovaps %xmm8, 0xb0(%rsp)
vxorps %xmm1, %xmm4, %xmm4
vmovaps %xmm4, 0xc0(%rsp)
ja 0x40a2f9
vmovaps %xmm0, 0xd0(%rsp)
vbroadcastss 0x1b16bc2(%rip), %xmm0 # 0x1f20ec4
vmovaps 0x10(%rsp), %xmm1
vandps %xmm0, %xmm1, %xmm3
vmulss 0x1ae666c(%rip), %xmm3, %xmm0 # 0x1ef0980
vaddss 0x1ae6668(%rip), %xmm0, %xmm0 # 0x1ef0984
vmulss %xmm0, %xmm3, %xmm0
vaddss 0x1ae6660(%rip), %xmm0, %xmm0 # 0x1ef0988
vmulss %xmm0, %xmm3, %xmm0
vaddss 0x1ae6658(%rip), %xmm0, %xmm0 # 0x1ef098c
vmovaps 0x80(%rsp), %xmm2
vmaxss %xmm1, %xmm2, %xmm1
vmovss %xmm1, 0xc(%rsp)
vmulss %xmm0, %xmm3, %xmm0
vaddss 0x1ae663d(%rip), %xmm0, %xmm0 # 0x1ef0990
vmulss %xmm0, %xmm3, %xmm0
vaddss 0x1ae6635(%rip), %xmm0, %xmm0 # 0x1ef0994
vmovss %xmm0, 0x3c(%rsp)
vmovss 0x1ae23a7(%rip), %xmm0 # 0x1eec714
vmovaps %xmm3, 0xa0(%rsp)
vsubss %xmm3, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jb 0x40a38a
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x40a38f
callq 0x6aa20
vmulss 0x3c(%rsp), %xmm0, %xmm0
vmovss 0x1ae65fb(%rip), %xmm1 # 0x1ef0998
vsubss %xmm0, %xmm1, %xmm0
vxorps %xmm5, %xmm5, %xmm5
vmaxss %xmm0, %xmm5, %xmm0
vbroadcastss 0x1b16b0e(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm4, %xmm0, %xmm2
vmovss 0xc(%rsp), %xmm3
vcmpltss %xmm5, %xmm3, %xmm3
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vsubss %xmm0, %xmm1, %xmm0
vmovss 0x1ae2341(%rip), %xmm7 # 0x1eec714
vcmpltss 0xa0(%rsp), %xmm7, %xmm2
vbroadcastss 0x1ae65b6(%rip), %xmm3 # 0x1ef099c
vblendvps %xmm2, %xmm3, %xmm0, %xmm0
vmulss 0x20(%rsp), %xmm0, %xmm0
vmulss 0x1ae65a6(%rip), %xmm0, %xmm2 # 0x1ef09a0
vroundss $0x9, %xmm2, %xmm2, %xmm2
vcvttss2si %xmm2, %eax
vmulss %xmm1, %xmm2, %xmm1
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm1
vmulss 0x1ae658c(%rip), %xmm1, %xmm2 # 0x1ef09a4
vaddss 0x1ae6588(%rip), %xmm2, %xmm2 # 0x1ef09a8
vmulss 0x1ae6584(%rip), %xmm1, %xmm3 # 0x1ef09ac
vaddss 0x1ae6580(%rip), %xmm3, %xmm5 # 0x1ef09b0
vxorps 0x90(%rsp), %xmm4, %xmm3
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x1ae656f(%rip), %xmm2, %xmm2 # 0x1ef09b4
vmovaps 0x10(%rsp), %xmm4
vcmpltss 0x80(%rsp), %xmm4, %xmm4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x1ae6557(%rip), %xmm5, %xmm5 # 0x1ef09b8
movl %eax, %ecx
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x1ae654d(%rip), %xmm2, %xmm2 # 0x1ef09bc
andl $0x3, %ecx
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x1ae6542(%rip), %xmm5, %xmm5 # 0x1ef09c0
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x1ae653a(%rip), %xmm2, %xmm6 # 0x1ef09c4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x1ae2286(%rip), %xmm5, %xmm5 # 0x1eec71c
vmulss %xmm6, %xmm1, %xmm6
vaddss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm1, %xmm1
vaddss %xmm7, %xmm1, %xmm5
vmulss %xmm6, %xmm0, %xmm14
testb $0x1, %al
je 0x40a4ba
vmovaps %xmm14, 0x10(%rsp)
vmovaps %xmm5, %xmm14
jmp 0x40a4c0
vmovaps %xmm5, 0x10(%rsp)
vmovaps 0x90(%rsp), %xmm1
vblendvps %xmm4, %xmm3, %xmm1, %xmm2
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x40a4e4
vbroadcastss 0x1b169e0(%rip), %xmm4 # 0x1f20ec0
vxorps %xmm4, %xmm14, %xmm14
cmpl $0x2, %eax
jae 0x40a502
vbroadcastss 0x1b169ce(%rip), %xmm4 # 0x1f20ec0
vmovaps 0x10(%rsp), %xmm0
vxorps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovaps 0x110(%rsp), %xmm11
vmovss 0xc(%rsp), %xmm1
vmulss %xmm1, %xmm11, %xmm4
vmovaps 0xb0(%rsp), %xmm13
vsubss %xmm13, %xmm4, %xmm4
vmovaps 0xe0(%rsp), %xmm3
vmulss %xmm3, %xmm1, %xmm5
vmovaps 0xc0(%rsp), %xmm15
vsubss %xmm15, %xmm5, %xmm5
vmovaps 0xf0(%rsp), %xmm7
vmulss %xmm7, %xmm1, %xmm6
vsubss %xmm2, %xmm6, %xmm10
vmovaps 0x100(%rsp), %xmm8
vmulss %xmm1, %xmm8, %xmm6
vmovaps 0xd0(%rsp), %xmm0
vsubss %xmm0, %xmm6, %xmm12
vmovss %xmm5, 0x90(%rsp)
vmulss %xmm5, %xmm5, %xmm6
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm10, 0x80(%rsp)
vmulss %xmm10, %xmm10, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm12, 0xa0(%rsp)
vmulss %xmm12, %xmm12, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vrsqrtss %xmm6, %xmm6, %xmm9
vmovss 0x1ae216e(%rip), %xmm12 # 0x1eec71c
vmulss %xmm6, %xmm12, %xmm6
vmulss %xmm6, %xmm9, %xmm6
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm6, %xmm10, %xmm6
vmulss 0x1ae2151(%rip), %xmm9, %xmm9 # 0x1eec718
vaddss %xmm6, %xmm9, %xmm9
vmulss %xmm4, %xmm9, %xmm4
vmovaps %xmm14, %xmm5
vmulss %xmm4, %xmm14, %xmm4
vmulss 0x10(%rsp), %xmm11, %xmm6
vsubss %xmm4, %xmm6, %xmm4
vmovaps 0x20(%rsp), %xmm14
vmovss 0x1ae2125(%rip), %xmm6 # 0x1eec714
vsubss %xmm14, %xmm6, %xmm6
vmulss %xmm13, %xmm14, %xmm10
vmulss %xmm6, %xmm11, %xmm11
vaddss %xmm10, %xmm11, %xmm13
vmulss %xmm15, %xmm14, %xmm10
vmulss %xmm3, %xmm6, %xmm11
vaddss %xmm10, %xmm11, %xmm10
vmulss %xmm2, %xmm14, %xmm3
vmulss %xmm7, %xmm6, %xmm11
vaddss %xmm3, %xmm11, %xmm3
vmulss %xmm0, %xmm14, %xmm11
vmulss %xmm6, %xmm8, %xmm14
vaddss %xmm11, %xmm14, %xmm11
vmulss %xmm10, %xmm10, %xmm14
vmulss %xmm13, %xmm13, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm3, %xmm3, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm11, %xmm11, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm12, %xmm14, %xmm2
vrsqrtss %xmm14, %xmm14, %xmm14
vmulss 0x1ae20bb(%rip), %xmm14, %xmm12 # 0x1eec718
vmulss %xmm2, %xmm14, %xmm2
vmulss %xmm14, %xmm14, %xmm14
vmulss %xmm2, %xmm14, %xmm2
vaddss %xmm2, %xmm12, %xmm2
vmovss 0x1ae6352(%rip), %xmm12 # 0x1ef09c8
vucomiss %xmm12, %xmm1
vcmpltss %xmm1, %xmm12, %xmm12
vmulss %xmm2, %xmm13, %xmm13
vblendvps %xmm12, %xmm13, %xmm4, %xmm4
ja 0x40a6e2
vmulss 0x90(%rsp), %xmm9, %xmm2
vmulss 0x80(%rsp), %xmm9, %xmm3
vmulss 0xa0(%rsp), %xmm9, %xmm9
vmulss %xmm5, %xmm2, %xmm2
vmovaps 0x10(%rsp), %xmm0
vmulss 0xe0(%rsp), %xmm0, %xmm7
vsubss %xmm2, %xmm7, %xmm7
vmulss %xmm5, %xmm3, %xmm2
vmulss 0xf0(%rsp), %xmm0, %xmm3
vsubss %xmm2, %xmm3, %xmm8
vmulss %xmm5, %xmm9, %xmm1
vmulss 0x100(%rsp), %xmm0, %xmm0
vsubss %xmm1, %xmm0, %xmm5
jmp 0x40a6ee
vmulss %xmm2, %xmm10, %xmm7
vmulss %xmm2, %xmm3, %xmm8
vmulss %xmm2, %xmm11, %xmm5
vpermilps $0x0, 0x20(%rsp), %xmm1 # xmm1 = mem[0,0,0,0]
vmulps 0x150(%rsp), %xmm1, %xmm0
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps 0x70(%rsp), %xmm6, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmovaps %xmm0, 0x20(%rsp)
vmulps 0x140(%rsp), %xmm1, %xmm2
vmulps 0x60(%rsp), %xmm6, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vmulps 0x120(%rsp), %xmm1, %xmm2
vmulps 0x40(%rsp), %xmm6, %xmm9
vaddps %xmm2, %xmm9, %xmm2
vmulps 0x130(%rsp), %xmm1, %xmm1
vmulps 0x50(%rsp), %xmm6, %xmm6
vaddps %xmm1, %xmm6, %xmm1
vmulss %xmm7, %xmm7, %xmm10
vmulss %xmm4, %xmm4, %xmm11
vaddss %xmm10, %xmm11, %xmm6
vbroadcastss 0x1b1675d(%rip), %xmm0 # 0x1f20ec0
vxorps %xmm0, %xmm8, %xmm9
vmulss %xmm8, %xmm9, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vxorps %xmm0, %xmm5, %xmm12
vmulss %xmm5, %xmm12, %xmm12
vaddss %xmm6, %xmm12, %xmm13
vmulss %xmm5, %xmm4, %xmm6
vmulss %xmm7, %xmm8, %xmm14
vaddss %xmm6, %xmm14, %xmm15
vsubss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm7, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm8, %xmm8, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vmulss %xmm4, %xmm8, %xmm12
vmulss %xmm7, %xmm4, %xmm7
vsubss %xmm12, %xmm14, %xmm0
vmulss %xmm5, %xmm8, %xmm8
vaddss %xmm12, %xmm14, %xmm4
vaddss %xmm7, %xmm8, %xmm12
vsubss %xmm7, %xmm8, %xmm7
vaddss %xmm15, %xmm15, %xmm8
vaddss %xmm0, %xmm0, %xmm0
vaddss %xmm10, %xmm9, %xmm9
vmulss %xmm5, %xmm5, %xmm5
vaddss %xmm5, %xmm9, %xmm9
vshufps $0x0, %xmm13, %xmm13, %xmm5 # xmm5 = xmm13[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps 0x1ae1f0f(%rip), %xmm10 # 0x1eec700
vmulps %xmm0, %xmm10, %xmm0
vmovsd 0x1ae1ef3(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm13, %xmm8, %xmm8
vaddps %xmm0, %xmm8, %xmm0
vmovss 0x1ae1f06(%rip), %xmm8 # 0x1eec714
vmulps %xmm5, %xmm8, %xmm5
vaddps %xmm0, %xmm5, %xmm5
vaddss %xmm12, %xmm12, %xmm0
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm13, %xmm11, %xmm11
vaddps %xmm0, %xmm11, %xmm0
vaddss %xmm6, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm8, %xmm6
vaddps %xmm0, %xmm6, %xmm6
vshufps $0x0, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vxorps %xmm7, %xmm7, %xmm7
vaddss %xmm4, %xmm4, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm4
vmovaps 0x20(%rsp), %xmm10
vshufps $0xe9, %xmm7, %xmm10, %xmm8 # xmm8 = xmm10[1,2],xmm7[2,3]
vblendps $0x4, %xmm3, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm3[2],xmm8[3]
vaddps %xmm0, %xmm4, %xmm9
vaddps %xmm7, %xmm8, %xmm8
vshufps $0x0, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[0,0,0,0]
vmulps %xmm7, %xmm9, %xmm4
vmulps %xmm7, %xmm6, %xmm7
vaddps %xmm4, %xmm7, %xmm7
vmulps %xmm5, %xmm0, %xmm0
vaddps %xmm7, %xmm0, %xmm0
vshufps $0x0, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm3
vmulps %xmm5, %xmm7, %xmm4
vaddps %xmm3, %xmm4, %xmm4
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vshufps $0xaa, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[2,2,2,2]
vmulps %xmm3, %xmm9, %xmm3
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
vmulps %xmm6, %xmm7, %xmm6
vaddps %xmm3, %xmm6, %xmm3
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm3, %xmm1, %xmm1
vaddps %xmm1, %xmm8, %xmm1
jmp 0x40984c
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::avx::InstanceArrayIntersector1::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&) (.cold.1)
|
__forceinline void updateAABB()
{
if (likely(query_ws->radius == (float)inf || userContext->instStackSize == 0)) {
query_radius = Vec3fa(query_ws->radius);
return;
}
const AffineSpace3fa m = AffineSpace3fa_load_unaligned((AffineSpace3fa*)userContext->world2inst[userContext->instStackSize-1]);
BBox3fa bbox(Vec3fa(-query_ws->radius), Vec3fa(query_ws->radius));
bbox = xfmBounds(m, bbox);
query_radius = 0.5f * (bbox.upper - bbox.lower);
}
|
decl %edi
shlq $0x6, %rdi
vmovups (%rsi,%rdi), %xmm1
vmovups 0x10(%rsi,%rdi), %xmm2
vmovups 0x30(%rsi,%rdi), %xmm3
vbroadcastss 0x1b16594(%rip), %xmm4 # 0x1f20ec0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vxorps %xmm4, %xmm0, %xmm4
vmulps 0x20(%rsi,%rdi), %xmm0, %xmm5
vsubps %xmm5, %xmm3, %xmm6
vmulps %xmm2, %xmm4, %xmm7
vaddps %xmm6, %xmm7, %xmm8
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm8, %xmm9
vbroadcastss 0x1ae10c8(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1ae221e(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm7, %xmm5
vaddps %xmm5, %xmm4, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vmulps %xmm2, %xmm0, %xmm2
vaddps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm4, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm4, %xmm3
vminps %xmm3, %xmm10, %xmm4
vmaxps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm1
vminps %xmm1, %xmm4, %xmm4
vmaxps %xmm1, %xmm3, %xmm1
vaddps %xmm5, %xmm0, %xmm3
vminps %xmm3, %xmm4, %xmm4
vmaxps %xmm3, %xmm1, %xmm1
vaddps %xmm6, %xmm0, %xmm3
vminps %xmm3, %xmm4, %xmm4
vmaxps %xmm3, %xmm1, %xmm1
vaddps %xmm2, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm2
vmaxps %xmm0, %xmm1, %xmm0
vsubps %xmm2, %xmm0, %xmm0
vbroadcastss 0x1ae219a(%rip), %xmm1 # 0x1eecb80
vmulps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
retq
|
/embree[P]embree/kernels/geometry/../common/context.h
|
embree::avx::InstanceArrayIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&) (.cold.1)
|
__forceinline void updateAABB()
{
if (likely(query_ws->radius == (float)inf || userContext->instStackSize == 0)) {
query_radius = Vec3fa(query_ws->radius);
return;
}
const AffineSpace3fa m = AffineSpace3fa_load_unaligned((AffineSpace3fa*)userContext->world2inst[userContext->instStackSize-1]);
BBox3fa bbox(Vec3fa(-query_ws->radius), Vec3fa(query_ws->radius));
bbox = xfmBounds(m, bbox);
query_radius = 0.5f * (bbox.upper - bbox.lower);
}
|
decl %edi
shlq $0x6, %rdi
vmovups (%rsi,%rdi), %xmm1
vmovups 0x10(%rsi,%rdi), %xmm2
vmovups 0x30(%rsi,%rdi), %xmm3
vbroadcastss 0x1b164b1(%rip), %xmm4 # 0x1f20ec0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vxorps %xmm4, %xmm0, %xmm4
vmulps 0x20(%rsi,%rdi), %xmm0, %xmm5
vsubps %xmm5, %xmm3, %xmm6
vmulps %xmm2, %xmm4, %xmm7
vaddps %xmm6, %xmm7, %xmm8
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm8, %xmm9
vbroadcastss 0x1ae0fe5(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0x1ae213b(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vaddps %xmm3, %xmm5, %xmm3
vaddps %xmm3, %xmm7, %xmm5
vaddps %xmm5, %xmm4, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vmulps %xmm2, %xmm0, %xmm2
vaddps %xmm6, %xmm2, %xmm6
vaddps %xmm6, %xmm4, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm4, %xmm3
vminps %xmm3, %xmm10, %xmm4
vmaxps %xmm3, %xmm7, %xmm3
vmulps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm1
vminps %xmm1, %xmm4, %xmm4
vmaxps %xmm1, %xmm3, %xmm1
vaddps %xmm5, %xmm0, %xmm3
vminps %xmm3, %xmm4, %xmm4
vmaxps %xmm3, %xmm1, %xmm1
vaddps %xmm6, %xmm0, %xmm3
vminps %xmm3, %xmm4, %xmm4
vmaxps %xmm3, %xmm1, %xmm1
vaddps %xmm2, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm2
vmaxps %xmm0, %xmm1, %xmm0
vsubps %xmm2, %xmm0, %xmm0
vbroadcastss 0x1ae20b7(%rip), %xmm1 # 0x1eecb80
vmulps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%rdx)
retq
|
/embree[P]embree/kernels/geometry/../common/context.h
|
embree::avx2::BVHNIntersector1<8, 16777232, false, embree::avx2::ArrayIntersector1<embree::avx2::InstanceArrayIntersector1MB>>::occluded(embree::Accel::Intersectors const*, embree::RayK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::occluded(const Accel::Intersectors* __restrict__ This,
Ray& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* early out for already occluded rays */
if (unlikely(ray.tfar < 0.0f))
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
NodeRef stack[stackSize]; // stack of nodes that still need to get traversed
NodeRef* stackPtr = stack+1; // current stack pointer
NodeRef* stackEnd = stack+stackSize;
stack[0] = bvh->root;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = (NodeRef)*stackPtr;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(shadow.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(shadow.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseAnyHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(shadow.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::occluded(This, pre, ray, context, prim, num, tray, lazy_node)) {
ray.tfar = neg_inf;
break;
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
*stackPtr = (NodeRef)lazy_node;
stackPtr++;
}
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x12f8, %rsp # imm = 0x12F8
movq %rdx, 0x38(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x4336d8
vmovss 0x20(%rsi), %xmm0
vxorps %xmm2, %xmm2, %xmm2
vucomiss %xmm0, %xmm2
ja 0x4336d8
leaq 0x158(%rsp), %rdx
movq 0x70(%rax), %rax
movq %rax, -0x8(%rdx)
vmaxss 0xc(%rsi), %xmm2, %xmm1
vmovaps 0x10(%rsi), %xmm3
vbroadcastss 0x1aedc1e(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0x1abdd35(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
vrcpps %xmm3, %xmm4
vbroadcastss 0x1ab9449(%rip), %xmm5 # 0x1eec714
vfnmadd231ps %xmm3, %xmm4, %xmm5 # xmm5 = -(xmm4 * xmm3) + xmm5
vfmadd132ps %xmm4, %xmm4, %xmm5 # xmm5 = (xmm5 * xmm4) + xmm4
xorl %edi, %edi
vucomiss %xmm2, %xmm5
setb %dil
vbroadcastss %xmm5, %ymm8
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vbroadcastsd %xmm6, %ymm9
vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0]
vshufps $0xaa, %xmm5, %xmm5, %xmm3 # xmm3 = xmm5[2,2,2,2]
vbroadcastsd %xmm3, %ymm10
vmulps (%rsi), %xmm5, %xmm5
vbroadcastss %xmm5, %ymm3
vmovshdup %xmm5, %xmm4 # xmm4 = xmm5[1,1,3,3]
vbroadcastsd %xmm4, %ymm4
vshufps $0xaa, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
shll $0x5, %edi
xorl %r8d, %r8d
vucomiss %xmm2, %xmm6
vbroadcastsd %xmm5, %ymm5
setb %r8b
shll $0x5, %r8d
orq $0x40, %r8
xorl %r9d, %r9d
vucomiss %xmm2, %xmm7
setb %r9b
shll $0x5, %r9d
orq $0x80, %r9
movq %rdi, %r10
xorq $0x20, %r10
movq %r8, %r15
xorq $0x20, %r15
movq %r9, %rbx
xorq $0x20, %rbx
vbroadcastss 0x1aedb5e(%rip), %ymm2 # 0x1f20ec0
vxorps %ymm2, %ymm3, %ymm6
vxorps %ymm2, %ymm4, %ymm7
vxorps %ymm2, %ymm5, %ymm5
vbroadcastss %xmm1, %ymm11
vbroadcastss %xmm0, %ymm12
leaq 0x150(%rsp), %r11
movq %rsi, 0x8(%rsp)
movq %rdi, 0x28(%rsp)
vmovups %ymm8, 0x130(%rsp)
vmovups %ymm9, 0x110(%rsp)
vmovups %ymm10, 0xf0(%rsp)
movq %r8, 0x20(%rsp)
movq %r9, 0x18(%rsp)
movq %r10, 0x10(%rsp)
vmovups %ymm6, 0xd0(%rsp)
vmovups %ymm7, 0xb0(%rsp)
vmovups %ymm5, 0x90(%rsp)
vmovups %ymm11, 0x70(%rsp)
vmovups %ymm12, 0x50(%rsp)
cmpq %r11, %rdx
je 0x4336d8
movq -0x8(%rdx), %r12
addq $-0x8, %rdx
testb $0x8, %r12b
jne 0x4334ce
movq %r12, %rax
vbroadcastss 0x1c(%rsi), %ymm0
andq $-0x10, %rax
vmovaps 0x100(%rax,%rdi), %ymm1
vfmadd213ps 0x40(%rax,%rdi), %ymm0, %ymm1 # ymm1 = (ymm0 * ymm1) + mem
vmovaps 0x100(%rax,%r8), %ymm2
vfmadd213ps 0x40(%rax,%r8), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem
vfmadd213ps %ymm6, %ymm8, %ymm1 # ymm1 = (ymm8 * ymm1) + ymm6
vfmadd213ps %ymm7, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm2) + ymm7
vpmaxsd %ymm2, %ymm1, %ymm1
vmovaps 0x100(%rax,%r9), %ymm2
vfmadd213ps 0x40(%rax,%r9), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem
vfmadd213ps %ymm5, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm2) + ymm5
vpmaxsd %ymm11, %ymm2, %ymm2
vpmaxsd %ymm2, %ymm1, %ymm1
vmovaps 0x100(%rax,%r10), %ymm2
vfmadd213ps 0x40(%rax,%r10), %ymm0, %ymm2 # ymm2 = (ymm0 * ymm2) + mem
vfmadd213ps %ymm6, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm2) + ymm6
vmovaps 0x100(%rax,%r15), %ymm3
vfmadd213ps 0x40(%rax,%r15), %ymm0, %ymm3 # ymm3 = (ymm0 * ymm3) + mem
vfmadd213ps %ymm7, %ymm9, %ymm3 # ymm3 = (ymm9 * ymm3) + ymm7
vmovaps 0x100(%rax,%rbx), %ymm4
vfmadd213ps 0x40(%rax,%rbx), %ymm0, %ymm4 # ymm4 = (ymm0 * ymm4) + mem
vpminsd %ymm3, %ymm2, %ymm2
vfmadd213ps %ymm5, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm4) + ymm5
vpminsd %ymm12, %ymm4, %ymm3
vpminsd %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm1
movl %r12d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x43352a
vextractf128 $0x1, %ymm1, %xmm0
vpackssdw %xmm0, %xmm1, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r14d
testb $0x8, %r12b
jne 0x433526
testq %r14, %r14
je 0x433557
andq $-0x10, %r12
tzcntq %r14, %rcx
xorl %r13d, %r13d
blsrq %r14, %rax
movq (%r12,%rcx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
je 0x433518
movq %rcx, (%rdx)
addq $0x8, %rdx
tzcntq %rax, %rcx
blsrq %rax, %rax
jmp 0x4334ea
movq %rcx, %r12
testl %r13d, %r13d
je 0x4333ec
jmp 0x43355d
pushq $0x6
jmp 0x433559
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vandps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vpackssdw %xmm1, %xmm0, %xmm0
jmp 0x4334bd
pushq $0x4
popq %r13
jmp 0x43351b
cmpl $0x6, %r13d
jne 0x4336ce
movl %r12d, %eax
andl $0xf, %eax
xorl %r13d, %r13d
movq %rax, 0x40(%rsp)
addq $-0x8, %rax
setne %bpl
je 0x4336ce
movq %rax, 0x48(%rsp)
movq %rdx, 0x30(%rsp)
andq $-0x10, %r12
leaq 0x7(%rsp), %rdi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
vzeroupper
callq 0x60a670
testb %al, %al
jne 0x4335ec
addq $0x8, %r12
addq $-0x9, 0x40(%rsp)
xorl %ebp, %ebp
cmpq %rbp, 0x40(%rsp)
je 0x433666
leaq 0x7(%rsp), %rdi
movq 0x8(%rsp), %rsi
movq 0x38(%rsp), %rdx
movq %r12, %rcx
callq 0x60a670
addq $0x8, %r12
incq %rbp
testb %al, %al
je 0x4335b6
cmpq 0x48(%rsp), %rbp
setb %bpl
testb %bpl, %bpl
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x28(%rsp), %rdi
vmovups 0x130(%rsp), %ymm8
vmovups 0x110(%rsp), %ymm9
vmovups 0xf0(%rsp), %ymm10
movq 0x20(%rsp), %r8
movq 0x18(%rsp), %r9
movq 0x10(%rsp), %r10
vmovups 0xd0(%rsp), %ymm6
vmovups 0xb0(%rsp), %ymm7
vmovups 0x90(%rsp), %ymm5
vmovups 0x70(%rsp), %ymm11
vmovups 0x50(%rsp), %ymm12
leaq 0x150(%rsp), %r11
je 0x4336ce
movl $0xff800000, 0x20(%rsi) # imm = 0xFF800000
pushq $0x3
popq %r13
jmp 0x4336ce
movq 0x8(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x28(%rsp), %rdi
vmovups 0x130(%rsp), %ymm8
vmovups 0x110(%rsp), %ymm9
vmovups 0xf0(%rsp), %ymm10
movq 0x20(%rsp), %r8
movq 0x18(%rsp), %r9
movq 0x10(%rsp), %r10
vmovups 0xd0(%rsp), %ymm6
vmovups 0xb0(%rsp), %ymm7
vmovups 0x90(%rsp), %ymm5
vmovups 0x70(%rsp), %ymm11
vmovups 0x50(%rsp), %ymm12
leaq 0x150(%rsp), %r11
cmpl $0x3, %r13d
jne 0x4333db
addq $0x12f8, %rsp # imm = 0x12F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx2::BVHNIntersector1<8, 1, false, embree::avx2::ArrayIntersector1<embree::avx2::QuadMiIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*) (.cold.1)
|
static __forceinline vfloat8 select(const vboolf8& m, const vfloat8& t, const vfloat8& f) {
return _mm256_blendv_ps(f, t, m);
}
|
vbroadcastss 0x1ab5132(%rip), %ymm2 # 0x1eeba20
vblendvps %ymm1, %ymm0, %ymm2, %ymm0
vshufps $0xb1, %ymm0, %ymm0, %ymm2 # ymm2 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm0, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vpermpd $0x4e, %ymm2, %ymm3 # ymm3 = ymm2[2,3,0,1]
vminps %ymm3, %ymm2, %ymm2
vcmpeqps %ymm2, %ymm0, %ymm0
vtestps %ymm1, %ymm0
je 0x436920
vandps %ymm1, %ymm0, %ymm1
vmovmskps %ymm1, %eax
tzcntl %eax, %eax
movl %eax, (%rdi)
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vfloat8_avx.h
|
embree::avx2::BVHNIntersectorKHybrid<4, 8, 1, false, embree::avx2::ArrayIntersectorK_1<8, embree::avx2::ObjectIntersectorK<8, false>>, false>::intersectCoherent(embree::vint_impl<8>*, embree::Accel::Intersectors*, embree::RayHitK<8>&, embree::RayQueryContext*)
|
static __forceinline vboolf8 operator ==(const vint8& a, const vint8& b) { return _mm256_castsi256_ps(_mm256_cmpeq_epi32(a, b)); }
|
vpcmpeqd %ymm0, %ymm0, %ymm0
vpcmpeqd (%rdi), %ymm0, %ymm1
vmovmskps %ymm1, %eax
testl %eax, %eax
je 0x50e0f4
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x1320, %rsp # imm = 0x1320
movq (%rsi), %rsi
movq %rsi, 0x58(%rsp)
movzbl %al, %esi
vmovaps (%rdx), %ymm7
vmovaps 0x20(%rdx), %ymm8
vmovaps 0x40(%rdx), %ymm9
vmovaps 0x80(%rdx), %ymm2
vbroadcastss 0x1a138ca(%rip), %ymm3 # 0x1f20ec4
vandps %ymm3, %ymm2, %ymm4
vbroadcastss 0x19e39e1(%rip), %ymm5 # 0x1ef0fe8
vcmpltps %ymm5, %ymm4, %ymm4
vblendvps %ymm4, %ymm5, %ymm2, %ymm11
vmovaps 0xa0(%rdx), %ymm4
vandps %ymm3, %ymm4, %ymm6
vcmpltps %ymm5, %ymm6, %ymm6
vblendvps %ymm6, %ymm5, %ymm4, %ymm12
vmovaps 0xc0(%rdx), %ymm4
vandps %ymm3, %ymm4, %ymm3
vcmpltps %ymm5, %ymm3, %ymm3
vblendvps %ymm3, %ymm5, %ymm4, %ymm13
vmovaps 0x60(%rdx), %ymm3
vrcpps %ymm11, %ymm4
vbroadcastss 0x19df0c1(%rip), %ymm5 # 0x1eec714
vfnmadd213ps %ymm5, %ymm4, %ymm11 # ymm11 = -(ymm4 * ymm11) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm11 # ymm11 = (ymm11 * ymm4) + ymm4
vrcpps %ymm12, %ymm4
vfnmadd213ps %ymm5, %ymm4, %ymm12 # ymm12 = -(ymm4 * ymm12) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm12 # ymm12 = (ymm12 * ymm4) + ymm4
vrcpps %ymm13, %ymm4
vfnmadd213ps %ymm5, %ymm4, %ymm13 # ymm13 = -(ymm4 * ymm13) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm13 # ymm13 = (ymm13 * ymm4) + ymm4
vxorps %xmm14, %xmm14, %xmm14
vmaxps %ymm14, %ymm3, %ymm3
vmovaps %ymm3, 0x2c0(%rsp)
vmovaps 0x100(%rdx), %ymm3
vmaxps %ymm14, %ymm3, %ymm3
vmovaps %ymm3, 0x2a0(%rsp)
vbroadcastss 0x1a05057(%rip), %ymm3 # 0x1f12704
vcmpltps %ymm14, %ymm2, %ymm2
vandps %ymm3, %ymm2, %ymm2
vmovaps 0xa0(%rdx), %ymm3
vmovaps 0xc0(%rdx), %ymm4
vcmpltps %ymm14, %ymm3, %ymm3
vbroadcastss 0x1a13806(%rip), %ymm5 # 0x1f20edc
vandps %ymm5, %ymm3, %ymm3
vorps %ymm2, %ymm3, %ymm2
vcmpltps %ymm14, %ymm4, %ymm3
vbroadcastss 0x1a4d287(%rip), %ymm4 # 0x1f5a974
vandps %ymm4, %ymm3, %ymm3
vpxor %ymm0, %ymm1, %ymm0
vpor %ymm3, %ymm0, %ymm0
vpor %ymm2, %ymm0, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss 0x1a137b1(%rip), %ymm0 # 0x1f20ec0
vmovaps %ymm7, 0x320(%rsp)
vxorps %ymm0, %ymm7, %ymm1
vmulps %ymm1, %ymm11, %ymm10
vmovaps %ymm8, 0x300(%rsp)
vxorps %ymm0, %ymm8, %ymm1
vmulps %ymm1, %ymm12, %ymm15
vmovaps %ymm9, 0x2e0(%rsp)
vxorps %ymm0, %ymm9, %ymm0
vmulps %ymm0, %ymm13, %ymm9
leaq 0x3c0(%rsp), %r11
pushq $0x8
popq %r8
vbroadcastss 0x1a13769(%rip), %xmm0 # 0x1f20ec0
vmovaps %xmm0, 0x110(%rsp)
vbroadcastss 0x19de2b7(%rip), %ymm0 # 0x1eeba20
vmovaps %ymm0, 0x280(%rsp)
movq %rcx, 0x70(%rsp)
movq %rdx, 0x68(%rsp)
vmovaps %ymm11, 0x380(%rsp)
vmovaps %ymm12, 0x360(%rsp)
vmovaps %ymm13, 0x340(%rsp)
vmovaps %ymm10, 0x240(%rsp)
vmovaps %ymm15, 0x220(%rsp)
vmovaps %ymm9, 0x200(%rsp)
tzcntq %rsi, %rax
vpbroadcastd 0x260(%rsp,%rax,4), %ymm0
vpcmpeqd 0x260(%rsp), %ymm0, %ymm0
vmovmskps %ymm0, %eax
andnq %rsi, %rax, %rsi
movq %rsi, 0x60(%rsp)
vbroadcastss 0x19de23f(%rip), %ymm4 # 0x1eeba20
vblendvps %ymm0, %ymm11, %ymm4, %ymm1
vshufps $0xb1, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm1, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vminps %xmm2, %xmm1, %xmm1
vblendvps %ymm0, %ymm12, %ymm4, %ymm2
vshufps $0xb1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2,5,4,7,6]
vminps %ymm3, %ymm2, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vminps %xmm3, %xmm2, %xmm2
vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vblendvps %ymm0, %ymm13, %ymm4, %ymm2
vmovaps %ymm4, %ymm7
vshufps $0xb1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2,5,4,7,6]
vminps %ymm3, %ymm2, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x19df32c(%rip), %ymm15 # 0x1eecb84
vblendvps %ymm0, %ymm11, %ymm15, %ymm3
vinsertps $0x28, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],zero
vshufps $0xb1, %ymm3, %ymm3, %ymm2 # ymm2 = ymm3[1,0,3,2,5,4,7,6]
vmaxps %ymm2, %ymm3, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vmaxps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vblendvps %ymm0, %ymm12, %ymm15, %ymm4
vmaxps %xmm3, %xmm2, %xmm2
vshufps $0xb1, %ymm4, %ymm4, %ymm3 # ymm3 = ymm4[1,0,3,2,5,4,7,6]
vmaxps %ymm3, %ymm4, %ymm3
vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2]
vmaxps %ymm4, %ymm3, %ymm3
vextractf128 $0x1, %ymm3, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vunpcklps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vblendvps %ymm0, %ymm13, %ymm15, %ymm3
vshufps $0xb1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2,5,4,7,6]
vmaxps %ymm4, %ymm3, %ymm3
vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2]
vmaxps %ymm4, %ymm3, %ymm3
vextractf128 $0x1, %ymm3, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vinsertps $0x28, %xmm3, %xmm2, %xmm3 # xmm3 = xmm2[0,1],xmm3[0],zero
vmovaps %xmm1, 0xc0(%rsp)
vcmpnltps 0x19de130(%rip), %xmm1, %xmm4 # 0x1eeba10
vblendvps %xmm4, %xmm1, %xmm3, %xmm2
vmovshdup %xmm2, %xmm5 # xmm5 = xmm2[1,1,3,3]
xorl %esi, %esi
vxorps %xmm6, %xmm6, %xmm6
vucomiss %xmm5, %xmm6
seta %sil
shll $0x4, %esi
orq $0x20, %rsi
vshufpd $0x1, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,0]
xorl %r14d, %r14d
vucomiss %xmm5, %xmm6
seta %r14b
shll $0x4, %r14d
orq $0x40, %r14
movq %rsi, 0x98(%rsp)
xorq $0x10, %rsi
movq %rsi, 0x90(%rsp)
xorl %r12d, %r12d
vucomiss %xmm2, %xmm6
seta %r12b
vmovaps 0x320(%rsp), %ymm8
vmovaps %ymm7, %ymm1
vblendvps %ymm0, %ymm8, %ymm7, %ymm5
vshufps $0xb1, %ymm5, %ymm5, %ymm6 # ymm6 = ymm5[1,0,3,2,5,4,7,6]
vminps %ymm6, %ymm5, %ymm5
vshufpd $0x5, %ymm5, %ymm5, %ymm6 # ymm6 = ymm5[1,0,3,2]
vminps %ymm6, %ymm5, %ymm5
vextractf128 $0x1, %ymm5, %xmm6
vminps %xmm6, %xmm5, %xmm5
vmovaps 0x300(%rsp), %ymm9
vblendvps %ymm0, %ymm9, %ymm7, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vminps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vminps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vminps %xmm7, %xmm6, %xmm6
vunpcklps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vmovaps 0x2e0(%rsp), %ymm10
vblendvps %ymm0, %ymm10, %ymm1, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vminps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vminps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vminps %xmm7, %xmm6, %xmm6
vinsertps $0x28, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm6[0],zero
vblendvps %ymm0, %ymm8, %ymm15, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vmaxps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vmaxps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vblendvps %ymm0, %ymm9, %ymm15, %ymm7
vshufps $0xb1, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2,5,4,7,6]
vmaxps %ymm8, %ymm7, %ymm7
vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2]
vmaxps %ymm8, %ymm7, %ymm7
vextractf128 $0x1, %ymm7, %xmm8
vmaxps %xmm8, %xmm7, %xmm7
vblendvps %ymm0, %ymm10, %ymm15, %ymm8
vunpcklps %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vshufps $0xb1, %ymm8, %ymm8, %ymm7 # ymm7 = ymm8[1,0,3,2,5,4,7,6]
vmaxps %ymm7, %ymm8, %ymm7
vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2]
vmaxps %ymm8, %ymm7, %ymm7
vextractf128 $0x1, %ymm7, %xmm8
vmaxps %xmm8, %xmm7, %xmm7
vinsertps $0x28, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm7[0],zero
vblendvps %xmm4, %xmm5, %xmm6, %xmm7
vblendvps %xmm4, %xmm6, %xmm5, %xmm8
vblendvps %ymm0, 0x2a0(%rsp), %ymm15, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm5 # ymm5 = ymm6[1,0,3,2,5,4,7,6]
vmovaps %ymm6, 0xa0(%rsp)
vmaxps %ymm5, %ymm6, %ymm5
vshufpd $0x5, %ymm5, %ymm5, %ymm6 # ymm6 = ymm5[1,0,3,2]
vmaxps %ymm6, %ymm5, %ymm5
vmovaps 0x2c0(%rsp), %ymm15
vblendvps %ymm0, %ymm15, %ymm1, %ymm6
vextractf128 $0x1, %ymm5, %xmm9
vshufps $0xb1, %ymm6, %ymm6, %ymm10 # ymm10 = ymm6[1,0,3,2,5,4,7,6]
vminps %ymm10, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm10 # ymm10 = ymm6[1,0,3,2]
vminps %ymm10, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm10
vminps %xmm10, %xmm6, %xmm6
vmovaps 0x240(%rsp), %ymm10
vmaxss %xmm9, %xmm5, %xmm5
vmovaps 0x200(%rsp), %ymm9
vmovaps 0xc0(%rsp), %xmm1
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmulps %xmm2, %xmm8, %xmm3
vmulps %xmm1, %xmm7, %xmm4
shll $0x4, %r12d
movq %r12, 0x88(%rsp)
xorq $0x10, %r12
movq 0x58(%rsp), %rax
movq 0x70(%rax), %rax
movq %rax, 0x3c0(%rsp)
andl $0x0, 0x3c8(%rsp)
movq %r14, %r13
xorq $0x10, %r14
vbroadcastss %xmm2, %xmm7
vmovaps %xmm7, 0x1e0(%rsp)
vbroadcastss %xmm3, %xmm7
vmovaps 0x110(%rsp), %xmm8
vxorps %xmm7, %xmm8, %xmm7
vmovaps %xmm7, 0x1d0(%rsp)
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vmovaps %xmm7, 0x1c0(%rsp)
vshufps $0x55, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1,1,1]
vxorps %xmm7, %xmm8, %xmm7
vmovaps %xmm7, 0x1b0(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmovaps %xmm2, 0x1a0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[2,2,2,2]
vxorps %xmm2, %xmm8, %xmm2
vmovaps %xmm2, 0x190(%rsp)
vbroadcastss %xmm1, %xmm2
vmovaps %xmm2, 0x180(%rsp)
vbroadcastss %xmm4, %xmm2
vxorps %xmm2, %xmm8, %xmm2
vmovaps %xmm2, 0x170(%rsp)
vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
vmovaps %xmm2, 0x160(%rsp)
vshufps $0x55, %xmm4, %xmm4, %xmm2 # xmm2 = xmm4[1,1,1,1]
vxorps %xmm2, %xmm8, %xmm2
vmovaps %xmm2, 0x150(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps %xmm1, 0x140(%rsp)
vshufps $0xaa, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[2,2,2,2]
vxorps %xmm1, %xmm8, %xmm1
vmovaps %xmm1, 0x130(%rsp)
vbroadcastss %xmm6, %xmm1
vmovaps %xmm1, 0x120(%rsp)
vmovaps 0x280(%rsp), %ymm1
vblendvps %ymm0, %ymm15, %ymm1, %ymm0
vmovaps %ymm0, 0xc0(%rsp)
vmovaps 0x220(%rsp), %ymm15
leaq 0x3d0(%rsp), %rbx
vbroadcastss %xmm5, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
cmpq %r11, %rbx
je 0x50e0d8
vbroadcastss -0x8(%rbx), %ymm0
addq $-0x10, %rbx
vcmpltps 0xa0(%rsp), %ymm0, %ymm1
vtestps %ymm1, %ymm1
je 0x50dc0d
movq (%rbx), %r15
testb $0x8, %r15b
jne 0x50df28
movq 0x88(%rsp), %rax
vmovaps 0x20(%r15,%rax), %xmm1
vmovaps 0x1d0(%rsp), %xmm2
vfmadd132ps 0x1e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
movq 0x98(%rsp), %rax
vmovaps 0x20(%r15,%rax), %xmm2
vmovaps 0x1b0(%rsp), %xmm3
vfmadd132ps 0x1c0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vpmaxsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r15,%r13), %xmm2
vmovaps 0x190(%rsp), %xmm3
vfmadd132ps 0x1a0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vpmaxsd 0x120(%rsp), %xmm2, %xmm2
vpmaxsd %xmm2, %xmm1, %xmm1
vmovaps 0x20(%r15,%r12), %xmm2
vmovaps 0x170(%rsp), %xmm3
vfmadd132ps 0x180(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
movq 0x90(%rsp), %rax
vmovaps 0x20(%r15,%rax), %xmm3
vmovaps 0x150(%rsp), %xmm4
vfmadd132ps 0x160(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vpminsd %xmm3, %xmm2, %xmm2
vmovaps 0x20(%r15,%r14), %xmm3
vmovaps 0x130(%rsp), %xmm4
vfmadd132ps 0x140(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vpminsd 0x1f0(%rsp), %xmm3, %xmm3
vpminsd %xmm3, %xmm2, %xmm2
vmovdqa %xmm1, 0xe0(%rsp)
vcmpleps %xmm2, %xmm1, %xmm1
vmovmskps %xmm1, %eax
testl %eax, %eax
je 0x50de65
movzbl %al, %edi
vbroadcastss 0x19ddce0(%rip), %ymm0 # 0x1eeba20
movq %r8, %rax
xorl %r9d, %r9d
tzcntq %rdi, %rsi
vbroadcastss 0x20(%r15,%rsi,4), %ymm1
vbroadcastss 0x40(%r15,%rsi,4), %ymm2
vfmadd213ps %ymm10, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm1) + ymm10
vfmadd213ps %ymm15, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm2) + ymm15
vbroadcastss 0x60(%r15,%rsi,4), %ymm3
vfmadd213ps %ymm9, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm3) + ymm9
vbroadcastss 0x30(%r15,%rsi,4), %ymm4
vfmadd213ps %ymm10, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm4) + ymm10
vbroadcastss 0x50(%r15,%rsi,4), %ymm5
vbroadcastss 0x70(%r15,%rsi,4), %ymm6
vfmadd213ps %ymm15, %ymm12, %ymm5 # ymm5 = (ymm12 * ymm5) + ymm15
vfmadd213ps %ymm9, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm6) + ymm9
vpminsd %ymm4, %ymm1, %ymm7
vpminsd %ymm5, %ymm2, %ymm8
vpmaxsd %ymm8, %ymm7, %ymm7
vpminsd %ymm6, %ymm3, %ymm8
vpmaxsd 0xc0(%rsp), %ymm8, %ymm8
vpmaxsd %ymm8, %ymm7, %ymm7
vpmaxsd %ymm4, %ymm1, %ymm1
vpmaxsd %ymm5, %ymm2, %ymm2
vpminsd %ymm2, %ymm1, %ymm1
vpmaxsd %ymm6, %ymm3, %ymm2
vpminsd 0xa0(%rsp), %ymm2, %ymm2
vpminsd %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm7, %ymm1
vtestps %ymm1, %ymm1
je 0x50de2f
vbroadcastss 0xe0(%rsp,%rsi,4), %ymm1
movq (%r15,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
vcmpltps %ymm0, %ymm1, %ymm2
vtestps %ymm2, %ymm2
jne 0x50de0f
movq %rsi, (%rbx)
vmovaps %xmm1, %xmm2
jmp 0x50de23
cmpq $0x8, %rax
je 0x50de3c
movq %rax, (%rbx)
vmovaps %xmm0, %xmm2
vmovaps %ymm1, %ymm0
movq %rsi, %rax
incq %r9
vmovss %xmm2, 0x8(%rbx)
addq $0x10, %rbx
blsrq %rdi, %rdi
jne 0x50dd46
jmp 0x50de45
vmovaps %ymm1, %ymm0
movq %rsi, %rax
jmp 0x50de2f
cmpq $0x8, %rax
je 0x50de6a
movb $0x1, %r10b
cmpq $0x2, %r9
jae 0x50de6f
movq %rax, %r15
testb %r10b, %r10b
jne 0x50dc34
jmp 0x50dc0d
xorl %r10d, %r10d
jmp 0x50de57
xorl %r10d, %r10d
jmp 0x50de54
leaq -0x20(%rbx), %rsi
movl -0x18(%rbx), %r8d
leaq -0x10(%rbx), %rdi
cmpl -0x8(%rbx), %r8d
jae 0x50deaa
vmovaps (%rsi), %xmm1
vmovaps %xmm1, 0x10(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%rsi)
movq (%rdi), %r8
movq %r8, (%rsi)
movq 0x10(%rsp), %r8
movq %r8, (%rdi)
movl 0x18(%rsp), %r8d
movl %r8d, 0x8(%rdi)
cmpq $0x2, %r9
pushq $0x8
popq %r8
je 0x50de54
leaq -0x30(%rbx), %r9
movl -0x28(%rbx), %r8d
cmpl -0x8(%rbx), %r8d
jae 0x50deec
vmovaps (%r9), %xmm1
vmovaps %xmm1, 0x10(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%r9)
movq (%rdi), %r8
movq %r8, (%r9)
movq 0x10(%rsp), %r8
movq %r8, (%rdi)
movl 0x18(%rsp), %r8d
movl %r8d, 0x8(%rdi)
movl -0x28(%rbx), %edi
cmpl -0x18(%rbx), %edi
pushq $0x8
popq %r8
jae 0x50de54
vmovaps (%r9), %xmm1
vmovaps %xmm1, 0x10(%rsp)
movl 0x8(%rsi), %edi
movl %edi, 0x8(%r9)
movq (%rsi), %rdi
movq %rdi, (%r9)
movq 0x10(%rsp), %rdi
movq %rdi, (%rsi)
movl 0x18(%rsp), %edi
movl %edi, 0x8(%rsi)
jmp 0x50de54
vmovaps 0xa0(%rsp), %ymm1
vcmpnleps %ymm0, %ymm1, %ymm2
vtestps %ymm2, %ymm2
je 0x50dc0d
movl %r15d, %esi
andl $0xf, %esi
addq $-0x8, %rsi
je 0x50e082
andq $-0x10, %r15
xorl %r9d, %r9d
vmovaps %ymm2, 0x3a0(%rsp)
movq %rsi, 0x78(%rsp)
movq (%rcx), %rax
movl (%r15,%r9,8), %edi
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rax
vpbroadcastd 0x34(%rax), %ymm0
vpand 0x120(%rdx), %ymm0, %ymm0
vpcmpeqd %ymm0, %ymm14, %ymm0
vtestps %ymm2, %ymm0
jb 0x50e076
vandnps %ymm2, %ymm0, %ymm0
movq %r9, 0x80(%rsp)
movl 0x4(%r15,%r9,8), %r8d
vmovaps %ymm0, 0xe0(%rsp)
leaq 0xe0(%rsp), %rsi
movq %rsi, 0x10(%rsp)
movq 0x18(%rax), %rsi
movq %rsi, 0x18(%rsp)
movq 0x8(%rcx), %rsi
movq %rsi, 0x28(%rsp)
movq %rdx, 0x30(%rsp)
movl $0x8, 0x38(%rsp)
movl %edi, 0x3c(%rsp)
movl %r8d, 0x20(%rsp)
movq %rax, 0x40(%rsp)
andq $0x0, 0x48(%rsp)
movq 0x10(%rcx), %rcx
movq %rcx, 0x50(%rsp)
movq 0x18(%rcx), %rcx
testq %rcx, %rcx
jne 0x50e005
movq 0x60(%rax), %rcx
leaq 0x10(%rsp), %rdi
vzeroupper
callq *%rcx
movq 0x70(%rsp), %rcx
movq 0x68(%rsp), %rdx
vmovaps 0x380(%rsp), %ymm11
vmovaps 0x360(%rsp), %ymm12
vmovaps 0x340(%rsp), %ymm13
vxorps %xmm14, %xmm14, %xmm14
vmovaps 0x240(%rsp), %ymm10
vmovaps 0x220(%rsp), %ymm15
vmovaps 0x200(%rsp), %ymm9
leaq 0x3c0(%rsp), %r11
pushq $0x8
popq %r8
vmovaps 0x3a0(%rsp), %ymm2
movq 0x78(%rsp), %rsi
movq 0x80(%rsp), %r9
incq %r9
cmpq %r9, %rsi
jne 0x50df66
vmovaps 0x100(%rdx), %ymm0
vcmpltps 0xa0(%rsp), %ymm0, %ymm1
vtestps %ymm2, %ymm1
je 0x50dc0d
vmovaps 0xa0(%rsp), %ymm1
vblendvps %ymm2, %ymm0, %ymm1, %ymm1
vshufps $0xb1, %ymm1, %ymm1, %ymm0 # ymm0 = ymm1[1,0,3,2,5,4,7,6]
vmovaps %ymm1, 0xa0(%rsp)
vmaxps %ymm0, %ymm1, %ymm0
vshufpd $0x5, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2]
vmaxps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vmaxss %xmm1, %xmm0, %xmm5
jmp 0x50dbff
movq 0x60(%rsp), %rsi
testq %rsi, %rsi
jne 0x50d7b2
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint8_avx2.h
|
embree::avx2::BVHNIntersectorKHybrid<4, 8, 1, false, embree::avx2::ArrayIntersectorK_1<8, embree::avx2::InstanceIntersectorK<8>>, false>::intersectCoherent(embree::vint_impl<8>*, embree::Accel::Intersectors*, embree::RayHitK<8>&, embree::RayQueryContext*)
|
static __forceinline vboolf8 operator ==(const vint8& a, const vint8& b) { return _mm256_castsi256_ps(_mm256_cmpeq_epi32(a, b)); }
|
vpcmpeqd %ymm0, %ymm0, %ymm0
vpcmpeqd (%rdi), %ymm0, %ymm1
vmovmskps %ymm1, %eax
testl %eax, %eax
je 0x50f6b9
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x12e0, %rsp # imm = 0x12E0
movq (%rsi), %rsi
movq %rsi, 0x58(%rsp)
movzbl %al, %r9d
vmovaps (%rdx), %ymm7
vmovaps 0x20(%rdx), %ymm8
vmovaps 0x40(%rdx), %ymm9
vmovaps 0x80(%rdx), %ymm2
vbroadcastss 0x1a12293(%rip), %ymm3 # 0x1f20ec4
vandps %ymm3, %ymm2, %ymm4
vbroadcastss 0x19e23aa(%rip), %ymm5 # 0x1ef0fe8
vcmpltps %ymm5, %ymm4, %ymm4
vblendvps %ymm4, %ymm5, %ymm2, %ymm11
vmovaps 0xa0(%rdx), %ymm4
vandps %ymm3, %ymm4, %ymm6
vcmpltps %ymm5, %ymm6, %ymm6
vblendvps %ymm6, %ymm5, %ymm4, %ymm12
vmovaps 0xc0(%rdx), %ymm4
vandps %ymm3, %ymm4, %ymm3
vcmpltps %ymm5, %ymm3, %ymm3
vblendvps %ymm3, %ymm5, %ymm4, %ymm13
vmovaps 0x60(%rdx), %ymm3
vrcpps %ymm11, %ymm4
vbroadcastss 0x19dda8a(%rip), %ymm5 # 0x1eec714
vfnmadd213ps %ymm5, %ymm4, %ymm11 # ymm11 = -(ymm4 * ymm11) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm11 # ymm11 = (ymm11 * ymm4) + ymm4
vrcpps %ymm12, %ymm4
vfnmadd213ps %ymm5, %ymm4, %ymm12 # ymm12 = -(ymm4 * ymm12) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm12 # ymm12 = (ymm12 * ymm4) + ymm4
vrcpps %ymm13, %ymm4
vfnmadd213ps %ymm5, %ymm4, %ymm13 # ymm13 = -(ymm4 * ymm13) + ymm5
vfmadd132ps %ymm4, %ymm4, %ymm13 # ymm13 = (ymm13 * ymm4) + ymm4
vxorps %xmm4, %xmm4, %xmm4
vmaxps %ymm4, %ymm3, %ymm3
vmovaps %ymm3, 0x200(%rsp)
vmovaps 0x100(%rdx), %ymm3
vmaxps %ymm4, %ymm3, %ymm3
vmovaps %ymm3, 0x2e0(%rsp)
vbroadcastss 0x1a03a23(%rip), %ymm3 # 0x1f12704
vcmpltps %ymm4, %ymm2, %ymm2
vandps %ymm3, %ymm2, %ymm2
vmovaps 0xa0(%rdx), %ymm3
vmovaps 0xc0(%rdx), %ymm5
vcmpltps %ymm4, %ymm3, %ymm3
vbroadcastss 0x1a121d4(%rip), %ymm6 # 0x1f20edc
vandps %ymm6, %ymm3, %ymm3
vorps %ymm2, %ymm3, %ymm2
vcmpltps %ymm4, %ymm5, %ymm3
vbroadcastss 0x1a4bc56(%rip), %ymm4 # 0x1f5a974
vandps %ymm4, %ymm3, %ymm3
vpxor %ymm0, %ymm1, %ymm0
vpor %ymm3, %ymm0, %ymm0
vpor %ymm2, %ymm0, %ymm0
vmovdqa %ymm0, 0x2a0(%rsp)
vbroadcastss 0x1a12180(%rip), %ymm0 # 0x1f20ec0
vmovaps %ymm7, 0x340(%rsp)
vxorps %ymm0, %ymm7, %ymm1
vmulps %ymm1, %ymm11, %ymm15
vmovaps %ymm8, 0x320(%rsp)
vxorps %ymm0, %ymm8, %ymm1
vmulps %ymm1, %ymm12, %ymm10
vmovaps %ymm9, 0x300(%rsp)
vxorps %ymm0, %ymm9, %ymm0
vmulps %ymm0, %ymm13, %ymm14
vbroadcastss 0x1a12144(%rip), %xmm0 # 0x1f20ec0
vmovaps %xmm0, 0x110(%rsp)
leaq 0x380(%rsp), %r11
pushq $0x8
popq %r8
leaq 0x20(%rsp), %rdi
leaq 0x1f(%rsp), %rsi
vbroadcastss 0x19dcc7c(%rip), %ymm0 # 0x1eeba20
vmovaps %ymm0, 0x2c0(%rsp)
movq %rcx, 0x70(%rsp)
movq %rdx, 0x68(%rsp)
vmovaps %ymm11, 0x280(%rsp)
vmovaps %ymm12, 0x260(%rsp)
vmovaps %ymm13, 0x240(%rsp)
vmovaps %ymm15, 0x220(%rsp)
vmovaps %ymm10, 0xc0(%rsp)
vmovaps %ymm14, 0xa0(%rsp)
tzcntq %r9, %rax
vpbroadcastd 0x2a0(%rsp,%rax,4), %ymm0
vpcmpeqd 0x2a0(%rsp), %ymm0, %ymm0
vmovmskps %ymm0, %eax
andnq %r9, %rax, %r9
movq %r9, 0x60(%rsp)
vbroadcastss 0x19dcc04(%rip), %ymm4 # 0x1eeba20
vblendvps %ymm0, %ymm11, %ymm4, %ymm1
vshufps $0xb1, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm1, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vminps %xmm2, %xmm1, %xmm1
vblendvps %ymm0, %ymm12, %ymm4, %ymm2
vshufps $0xb1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2,5,4,7,6]
vminps %ymm3, %ymm2, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vminps %xmm3, %xmm2, %xmm2
vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vblendvps %ymm0, %ymm13, %ymm4, %ymm2
vmovaps %ymm4, %ymm7
vshufps $0xb1, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2,5,4,7,6]
vminps %ymm3, %ymm2, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vminps %xmm3, %xmm2, %xmm2
vbroadcastss 0x19ddcf1(%rip), %ymm14 # 0x1eecb84
vblendvps %ymm0, %ymm11, %ymm14, %ymm3
vinsertps $0x28, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1],xmm2[0],zero
vshufps $0xb1, %ymm3, %ymm3, %ymm2 # ymm2 = ymm3[1,0,3,2,5,4,7,6]
vmaxps %ymm2, %ymm3, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vmaxps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vblendvps %ymm0, %ymm12, %ymm14, %ymm4
vmaxps %xmm3, %xmm2, %xmm2
vshufps $0xb1, %ymm4, %ymm4, %ymm3 # ymm3 = ymm4[1,0,3,2,5,4,7,6]
vmaxps %ymm3, %ymm4, %ymm3
vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2]
vmaxps %ymm4, %ymm3, %ymm3
vextractf128 $0x1, %ymm3, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vunpcklps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
vblendvps %ymm0, %ymm13, %ymm14, %ymm3
vshufps $0xb1, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2,5,4,7,6]
vmaxps %ymm4, %ymm3, %ymm3
vshufpd $0x5, %ymm3, %ymm3, %ymm4 # ymm4 = ymm3[1,0,3,2]
vmaxps %ymm4, %ymm3, %ymm3
vextractf128 $0x1, %ymm3, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vinsertps $0x28, %xmm3, %xmm2, %xmm3 # xmm3 = xmm2[0,1],xmm3[0],zero
vmovaps %xmm1, 0xe0(%rsp)
vcmpnltps 0x19dcaf5(%rip), %xmm1, %xmm4 # 0x1eeba10
vblendvps %xmm4, %xmm1, %xmm3, %xmm2
vmovshdup %xmm2, %xmm5 # xmm5 = xmm2[1,1,3,3]
xorl %r9d, %r9d
vxorps %xmm6, %xmm6, %xmm6
vucomiss %xmm5, %xmm6
seta %r9b
shll $0x4, %r9d
orq $0x20, %r9
vshufpd $0x1, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[1,0]
xorl %r14d, %r14d
vucomiss %xmm5, %xmm6
seta %r14b
shll $0x4, %r14d
orq $0x40, %r14
movq %r9, 0x88(%rsp)
xorq $0x10, %r9
movq %r9, 0x80(%rsp)
xorl %r12d, %r12d
vucomiss %xmm2, %xmm6
seta %r12b
vmovaps 0x340(%rsp), %ymm8
vmovaps %ymm7, %ymm1
vblendvps %ymm0, %ymm8, %ymm7, %ymm5
vshufps $0xb1, %ymm5, %ymm5, %ymm6 # ymm6 = ymm5[1,0,3,2,5,4,7,6]
vminps %ymm6, %ymm5, %ymm5
vshufpd $0x5, %ymm5, %ymm5, %ymm6 # ymm6 = ymm5[1,0,3,2]
vminps %ymm6, %ymm5, %ymm5
vextractf128 $0x1, %ymm5, %xmm6
vminps %xmm6, %xmm5, %xmm5
vmovaps 0x320(%rsp), %ymm9
vblendvps %ymm0, %ymm9, %ymm7, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vminps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vminps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vminps %xmm7, %xmm6, %xmm6
vunpcklps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vmovaps 0x300(%rsp), %ymm10
vblendvps %ymm0, %ymm10, %ymm1, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vminps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vminps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vminps %xmm7, %xmm6, %xmm6
vinsertps $0x28, %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm6[0],zero
vblendvps %ymm0, %ymm8, %ymm14, %ymm6
vshufps $0xb1, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2,5,4,7,6]
vmaxps %ymm7, %ymm6, %ymm6
vshufpd $0x5, %ymm6, %ymm6, %ymm7 # ymm7 = ymm6[1,0,3,2]
vmaxps %ymm7, %ymm6, %ymm6
vextractf128 $0x1, %ymm6, %xmm7
vmaxps %xmm7, %xmm6, %xmm6
vblendvps %ymm0, %ymm9, %ymm14, %ymm7
vshufps $0xb1, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2,5,4,7,6]
vmaxps %ymm8, %ymm7, %ymm7
vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2]
vmaxps %ymm8, %ymm7, %ymm7
vextractf128 $0x1, %ymm7, %xmm8
vmaxps %xmm8, %xmm7, %xmm7
vblendvps %ymm0, %ymm10, %ymm14, %ymm8
vunpcklps %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vshufps $0xb1, %ymm8, %ymm8, %ymm7 # ymm7 = ymm8[1,0,3,2,5,4,7,6]
vmaxps %ymm7, %ymm8, %ymm7
vshufpd $0x5, %ymm7, %ymm7, %ymm8 # ymm8 = ymm7[1,0,3,2]
vmaxps %ymm8, %ymm7, %ymm7
vextractf128 $0x1, %ymm7, %xmm8
vmaxps %xmm8, %xmm7, %xmm7
vinsertps $0x28, %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm7[0],zero
vblendvps %xmm4, %xmm5, %xmm6, %xmm7
vblendvps %xmm4, %xmm6, %xmm5, %xmm6
vblendvps %ymm0, 0x2e0(%rsp), %ymm14, %ymm14
vshufps $0xb1, %ymm14, %ymm14, %ymm5 # ymm5 = ymm14[1,0,3,2,5,4,7,6]
vmaxps %ymm5, %ymm14, %ymm5
vshufpd $0x5, %ymm5, %ymm5, %ymm8 # ymm8 = ymm5[1,0,3,2]
vmaxps %ymm8, %ymm5, %ymm8
vblendvps %ymm0, 0x200(%rsp), %ymm1, %ymm5
vextractf128 $0x1, %ymm8, %xmm9
vshufps $0xb1, %ymm5, %ymm5, %ymm10 # ymm10 = ymm5[1,0,3,2,5,4,7,6]
vminps %ymm10, %ymm5, %ymm5
vshufpd $0x5, %ymm5, %ymm5, %ymm10 # ymm10 = ymm5[1,0,3,2]
vminps %ymm10, %ymm5, %ymm5
vextractf128 $0x1, %ymm5, %xmm10
vminps %xmm10, %xmm5, %xmm5
vmovaps 0xc0(%rsp), %ymm10
vmaxss %xmm9, %xmm8, %xmm8
vmovaps 0xe0(%rsp), %xmm1
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmulps %xmm2, %xmm6, %xmm3
vmulps %xmm1, %xmm7, %xmm4
shll $0x4, %r12d
movq %r12, 0x78(%rsp)
xorq $0x10, %r12
movq 0x58(%rsp), %rax
movq 0x70(%rax), %rax
movq %rax, 0x380(%rsp)
andl $0x0, 0x388(%rsp)
movq %r14, %r15
xorq $0x10, %r14
vbroadcastss %xmm2, %xmm6
vmovaps %xmm6, 0x1e0(%rsp)
vbroadcastss %xmm3, %xmm6
vmovaps 0x110(%rsp), %xmm7
vxorps %xmm7, %xmm6, %xmm6
vmovaps %xmm6, 0x1d0(%rsp)
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm6, 0x1c0(%rsp)
vshufps $0x55, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[1,1,1,1]
vxorps %xmm7, %xmm6, %xmm6
vmovaps %xmm6, 0x1b0(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmovaps %xmm2, 0x1a0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[2,2,2,2]
vxorps %xmm7, %xmm2, %xmm2
vmovaps %xmm2, 0x190(%rsp)
vbroadcastss %xmm1, %xmm2
vmovaps %xmm2, 0x180(%rsp)
vbroadcastss %xmm4, %xmm2
vxorps %xmm7, %xmm2, %xmm2
vmovaps %xmm2, 0x170(%rsp)
vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
vmovaps %xmm2, 0x160(%rsp)
vshufps $0x55, %xmm4, %xmm4, %xmm2 # xmm2 = xmm4[1,1,1,1]
vxorps %xmm7, %xmm2, %xmm2
vmovaps %xmm2, 0x150(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps %xmm1, 0x140(%rsp)
vshufps $0xaa, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[2,2,2,2]
vxorps %xmm7, %xmm1, %xmm1
vmovaps %xmm1, 0x130(%rsp)
vbroadcastss %xmm5, %xmm1
vmovaps %xmm1, 0x120(%rsp)
vmovaps %ymm14, %ymm5
vmovaps 0x2c0(%rsp), %ymm1
vblendvps %ymm0, 0x200(%rsp), %ymm1, %ymm0
vmovaps %ymm0, 0xe0(%rsp)
vmovaps 0xa0(%rsp), %ymm14
leaq 0x390(%rsp), %rbx
vmovaps %xmm8, 0x90(%rsp)
vbroadcastss %xmm8, %xmm0
vmovaps %xmm0, 0x40(%rsp)
cmpq %r11, %rbx
je 0x50f69d
vbroadcastss -0x8(%rbx), %ymm1
addq $-0x10, %rbx
vcmpltps %ymm5, %ymm1, %ymm2
vtestps %ymm2, %ymm2
je 0x50f242
movq (%rbx), %r13
testb $0x8, %r13b
jne 0x50f560
movq 0x78(%rsp), %rax
vmovaps 0x20(%r13,%rax), %xmm2
vmovaps 0x1d0(%rsp), %xmm0
vfmadd132ps 0x1e0(%rsp), %xmm0, %xmm2 # xmm2 = (xmm2 * mem) + xmm0
movq 0x88(%rsp), %rax
vmovaps 0x20(%r13,%rax), %xmm3
vmovaps 0x1b0(%rsp), %xmm0
vfmadd132ps 0x1c0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vpmaxsd %xmm3, %xmm2, %xmm2
vmovaps 0x20(%r13,%r15), %xmm3
vmovaps 0x190(%rsp), %xmm0
vfmadd132ps 0x1a0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vpmaxsd 0x120(%rsp), %xmm3, %xmm3
vpmaxsd %xmm3, %xmm2, %xmm2
vmovaps 0x20(%r13,%r12), %xmm3
vmovaps 0x170(%rsp), %xmm0
vfmadd132ps 0x180(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
movq 0x80(%rsp), %rax
vmovaps 0x20(%r13,%rax), %xmm4
vmovaps 0x150(%rsp), %xmm0
vfmadd132ps 0x160(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vpminsd %xmm4, %xmm3, %xmm3
vmovaps 0x20(%r13,%r14), %xmm4
vmovaps 0x130(%rsp), %xmm0
vfmadd132ps 0x140(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vpminsd 0x40(%rsp), %xmm4, %xmm4
vpminsd %xmm4, %xmm3, %xmm3
vmovdqa %xmm2, 0x1f0(%rsp)
vcmpleps %xmm3, %xmm2, %xmm2
vmovmskps %xmm2, %eax
testl %eax, %eax
je 0x50f49d
movzbl %al, %edi
vbroadcastss 0x19dc6b6(%rip), %ymm1 # 0x1eeba20
movq %r8, %rax
xorl %r9d, %r9d
tzcntq %rdi, %rsi
vbroadcastss 0x20(%r13,%rsi,4), %ymm2
vbroadcastss 0x40(%r13,%rsi,4), %ymm3
vfmadd213ps %ymm15, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm2) + ymm15
vfmadd213ps %ymm10, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm10
vbroadcastss 0x60(%r13,%rsi,4), %ymm4
vfmadd213ps %ymm14, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm4) + ymm14
vmovaps %ymm5, %ymm0
vbroadcastss 0x30(%r13,%rsi,4), %ymm5
vfmadd213ps %ymm15, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm5) + ymm15
vbroadcastss 0x50(%r13,%rsi,4), %ymm6
vbroadcastss 0x70(%r13,%rsi,4), %ymm7
vfmadd213ps %ymm10, %ymm12, %ymm6 # ymm6 = (ymm12 * ymm6) + ymm10
vfmadd213ps %ymm14, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm7) + ymm14
vpminsd %ymm5, %ymm2, %ymm8
vpminsd %ymm6, %ymm3, %ymm9
vpmaxsd %ymm9, %ymm8, %ymm8
vpminsd %ymm7, %ymm4, %ymm9
vpmaxsd 0xe0(%rsp), %ymm9, %ymm9
vpmaxsd %ymm9, %ymm8, %ymm8
vpmaxsd %ymm5, %ymm2, %ymm2
vmovaps %ymm0, %ymm5
vpmaxsd %ymm6, %ymm3, %ymm3
vpminsd %ymm3, %ymm2, %ymm2
vpmaxsd %ymm7, %ymm4, %ymm3
vpminsd %ymm0, %ymm3, %ymm3
vpminsd %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm8, %ymm2
vtestps %ymm2, %ymm2
je 0x50f45d
vbroadcastss 0x1f0(%rsp,%rsi,4), %ymm2
movq (%r13,%rsi,8), %rsi
prefetcht0 (%rsi)
prefetcht0 0x40(%rsi)
vcmpltps %ymm1, %ymm2, %ymm3
vtestps %ymm3, %ymm3
jne 0x50f43d
movq %rsi, (%rbx)
vmovaps %xmm2, %xmm3
jmp 0x50f451
cmpq $0x8, %rax
je 0x50f46a
movq %rax, (%rbx)
vmovaps %xmm1, %xmm3
vmovaps %ymm2, %ymm1
movq %rsi, %rax
incq %r9
vmovss %xmm3, 0x8(%rbx)
addq $0x10, %rbx
blsrq %rdi, %rdi
jne 0x50f370
jmp 0x50f473
vmovaps %ymm2, %ymm1
movq %rsi, %rax
jmp 0x50f45d
cmpq $0x8, %rax
je 0x50f4a2
movb $0x1, %r10b
cmpq $0x2, %r9
jae 0x50f4a7
movq %rax, %r13
leaq 0x20(%rsp), %rdi
leaq 0x1f(%rsp), %rsi
testb %r10b, %r10b
jne 0x50f264
jmp 0x50f242
xorl %r10d, %r10d
jmp 0x50f48f
xorl %r10d, %r10d
jmp 0x50f482
leaq -0x20(%rbx), %rsi
movl -0x18(%rbx), %r8d
leaq -0x10(%rbx), %rdi
cmpl -0x8(%rbx), %r8d
jae 0x50f4e2
vmovaps (%rsi), %xmm2
vmovaps %xmm2, 0x20(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%rsi)
movq (%rdi), %r8
movq %r8, (%rsi)
movq 0x20(%rsp), %r8
movq %r8, (%rdi)
movl 0x28(%rsp), %r8d
movl %r8d, 0x8(%rdi)
cmpq $0x2, %r9
pushq $0x8
popq %r8
je 0x50f482
leaq -0x30(%rbx), %r9
movl -0x28(%rbx), %r8d
cmpl -0x8(%rbx), %r8d
jae 0x50f524
vmovaps (%r9), %xmm2
vmovaps %xmm2, 0x20(%rsp)
movl 0x8(%rdi), %r8d
movl %r8d, 0x8(%r9)
movq (%rdi), %r8
movq %r8, (%r9)
movq 0x20(%rsp), %r8
movq %r8, (%rdi)
movl 0x28(%rsp), %r8d
movl %r8d, 0x8(%rdi)
movl -0x28(%rbx), %edi
cmpl -0x18(%rbx), %edi
pushq $0x8
popq %r8
jae 0x50f482
vmovaps (%r9), %xmm2
vmovaps %xmm2, 0x20(%rsp)
movl 0x8(%rsi), %edi
movl %edi, 0x8(%r9)
movq (%rsi), %rdi
movq %rdi, (%r9)
movq 0x20(%rsp), %rdi
movq %rdi, (%rsi)
movl 0x28(%rsp), %edi
movl %edi, 0x8(%rsi)
jmp 0x50f482
vcmpnleps %ymm1, %ymm5, %ymm0
vmovaps %ymm0, 0x20(%rsp)
vtestps %ymm0, %ymm0
je 0x50f68f
vmovaps %ymm5, 0x360(%rsp)
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
je 0x50f5c1
andq $-0x10, %r13
movq %rax, 0x40(%rsp)
movq %r13, %r8
vzeroupper
callq 0x5f2ef4
movq 0x40(%rsp), %rax
leaq 0x1f(%rsp), %rsi
leaq 0x20(%rsp), %rdi
movq 0x68(%rsp), %rdx
movq 0x70(%rsp), %rcx
addq $0x10, %r13
decq %rax
jne 0x50f58f
vmovaps 0x100(%rdx), %ymm0
vmovaps 0x360(%rsp), %ymm5
vcmpltps %ymm5, %ymm0, %ymm2
vmovaps 0x20(%rsp), %ymm1
vtestps %ymm1, %ymm2
je 0x50f64d
vblendvps %ymm1, %ymm0, %ymm5, %ymm5
vshufps $0xb1, %ymm5, %ymm5, %ymm0 # ymm0 = ymm5[1,0,3,2,5,4,7,6]
vmaxps %ymm0, %ymm5, %ymm0
vshufpd $0x5, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2]
vmaxps %ymm1, %ymm0, %ymm0
vextractf128 $0x1, %ymm0, %xmm1
vmaxss %xmm1, %xmm0, %xmm8
vmovaps 0x280(%rsp), %ymm11
vmovaps 0x260(%rsp), %ymm12
vmovaps 0x240(%rsp), %ymm13
vmovaps 0x220(%rsp), %ymm15
vmovaps 0xc0(%rsp), %ymm10
vmovaps 0xa0(%rsp), %ymm14
leaq 0x380(%rsp), %r11
pushq $0x8
popq %r8
jmp 0x50f22e
vmovaps 0x280(%rsp), %ymm11
vmovaps 0x260(%rsp), %ymm12
vmovaps 0x240(%rsp), %ymm13
vmovaps 0x220(%rsp), %ymm15
vmovaps 0xc0(%rsp), %ymm10
vmovaps 0xa0(%rsp), %ymm14
leaq 0x380(%rsp), %r11
pushq $0x8
popq %r8
vmovaps 0x90(%rsp), %xmm8
jmp 0x50f22e
movq 0x60(%rsp), %r9
testq %r9, %r9
jne 0x50eded
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint8_avx2.h
|
embree::avx512::BVHNIntersectorKHybrid<8, 4, 1, false, embree::avx512::ArrayIntersectorK_1<4, embree::avx512::QuadMvIntersectorKMoeller<4, 4, false>>, true>::occludedCoherent(embree::vint_impl<4>*, embree::Accel::Intersectors*, embree::RayK<4>&, embree::RayQueryContext*)
|
__forceinline vboolf4 operator !=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_NE); }
|
vpcmpeqd %xmm0, %xmm0, %xmm0
vpcmpeqd (%rdi), %xmm0, %k1
kortestb %k1, %k1
je 0x74b449
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x25d8, %rsp # imm = 0x25D8
movq %rcx, -0x68(%rsp)
kmovd %k1, %eax
movq (%rsi), %r14
vmovaps (%rdx), %xmm8
vmovaps 0x10(%rdx), %xmm9
vmovaps 0x20(%rdx), %xmm10
vmovaps 0x30(%rdx), %xmm2
movl %eax, -0x6c(%rsp)
movzbl %al, %ebp
vmovaps 0x40(%rdx), %xmm1
vbroadcastss 0x17d68dd(%rip), %xmm11 # 0x1f20ec4
vandps %xmm1, %xmm11, %xmm3
vbroadcastss 0x17a69f4(%rip), %xmm7 # 0x1ef0fe8
vcmpltps %xmm7, %xmm3, %k2
vblendmps %xmm7, %xmm1, %xmm4 {%k2}
vmovaps 0x50(%rdx), %xmm5
vandps %xmm5, %xmm11, %xmm3
vcmpltps %xmm7, %xmm3, %k2
vmovaps %xmm7, %xmm5 {%k2}
vmovaps 0x60(%rdx), %xmm6
vandps %xmm6, %xmm11, %xmm3
vcmpltps %xmm7, %xmm3, %k2
vmovaps %xmm7, %xmm6 {%k2}
vrcp14ps %xmm4, %xmm3
vxorps %xmm11, %xmm11, %xmm11
vbroadcastss 0x17a20d3(%rip), %xmm7 # 0x1eec714
vfnmadd213ps %xmm7, %xmm3, %xmm4 # xmm4 = -(xmm3 * xmm4) + xmm7
vfmadd132ps %xmm3, %xmm3, %xmm4 # xmm4 = (xmm4 * xmm3) + xmm3
vrcp14ps %xmm5, %xmm3
vfnmadd213ps %xmm7, %xmm3, %xmm5 # xmm5 = -(xmm3 * xmm5) + xmm7
vfmadd132ps %xmm3, %xmm3, %xmm5 # xmm5 = (xmm5 * xmm3) + xmm3
vrcp14ps %xmm6, %xmm3
vfnmadd213ps %xmm7, %xmm3, %xmm6 # xmm6 = -(xmm3 * xmm6) + xmm7
vfmadd132ps %xmm3, %xmm3, %xmm6 # xmm6 = (xmm6 * xmm3) + xmm3
vmaxps %xmm11, %xmm2, %xmm2
vmovaps %xmm2, 0x220(%rsp)
leaq 0x80(%rdx), %rax
movq %rax, 0xc8(%rsp)
vmovaps 0x80(%rdx), %xmm2
vmaxps %xmm11, %xmm2, %xmm2
vmovaps %xmm2, 0x210(%rsp)
kmovd %k1, %eax
vcmpltps %xmm11, %xmm1, %xmm1
vandps 0x17c8052(%rip){1to4}, %xmm1, %xmm2 # 0x1f12704
vcmpgtps 0x50(%rdx), %xmm11, %k2
xorb $0xf, %al
movl %eax, -0x74(%rsp)
vpbroadcastd 0x17d6813(%rip), %xmm3 # 0x1f20edc
vpsubd %xmm1, %xmm3, %xmm2 {%k2}
movq %rdx, 0xd8(%rsp)
vcmpgtps 0x60(%rdx), %xmm11, %k2
vpord 0x181028b(%rip){1to4}, %xmm2, %xmm2 {%k2} # 0x1f5a974
vmovdqa32 %xmm2, %xmm0 {%k1}
vmovdqa %xmm0, 0x200(%rsp)
vbroadcastss 0x17d67bf(%rip), %xmm1 # 0x1f20ec0
vmovaps %xmm8, 0x250(%rsp)
vxorps %xmm1, %xmm8, %xmm0
vmulps %xmm0, %xmm4, %xmm12
vmovaps %xmm9, 0x240(%rsp)
vxorps %xmm1, %xmm9, %xmm0
vmulps %xmm0, %xmm5, %xmm13
vmovaps %xmm10, 0x230(%rsp)
vxorps %xmm1, %xmm10, %xmm0
vmulps %xmm0, %xmm6, %xmm14
movabsq $0x8, %rbx
movq %r14, 0xd0(%rsp)
tzcntq %rbp, %rax
vpbroadcastd 0x200(%rsp,%rax,4), %xmm0
vpcmpeqd 0x200(%rsp), %xmm0, %k1
kmovb %k1, %edx
vbroadcastss 0x17a12b5(%rip), %xmm20 # 0x1eeba20
vblendmps %xmm4, %xmm20, %xmm0 {%k1}
vblendmps %xmm5, %xmm20, %xmm1 {%k1}
vblendmps %xmm6, %xmm20, %xmm2 {%k1}
vbroadcastss 0x17a23fd(%rip), %xmm18 # 0x1eecb84
vblendmps %xmm4, %xmm18, %xmm3 {%k1}
vblendmps %xmm5, %xmm18, %xmm7 {%k1}
vblendmps %xmm6, %xmm18, %xmm8 {%k1}
vmovaps 0x250(%rsp), %xmm15
vblendmps %xmm15, %xmm20, %xmm9 {%k1}
vmovaps 0x240(%rsp), %xmm16
vblendmps %xmm16, %xmm20, %xmm10 {%k1}
vmovaps 0x230(%rsp), %xmm17
vblendmps %xmm17, %xmm20, %xmm11 {%k1}
vblendmps %xmm15, %xmm18, %xmm15 {%k1}
vblendmps %xmm16, %xmm18, %xmm16 {%k1}
vblendmps %xmm17, %xmm18, %xmm17 {%k1}
vblendmps 0x210(%rsp), %xmm18, %xmm21 {%k1}
vmovaps 0x220(%rsp), %xmm19
vblendmps %xmm19, %xmm20, %xmm18 {%k1}
vpbroadcastd 0x17a122a(%rip), %xmm20 # 0x1eeba20
vpblendmd %xmm19, %xmm20, %xmm22 {%k1}
kmovd %k1, %eax
movl %eax, -0x70(%rsp)
andnq %rbp, %rdx, %rbp
vshufps $0xb1, %xmm0, %xmm0, %xmm19 # xmm19 = xmm0[1,0,3,2]
vminps %xmm0, %xmm19, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm19 # xmm19 = xmm0[1,0]
vminps %xmm0, %xmm19, %xmm0
vshufps $0xb1, %xmm1, %xmm1, %xmm19 # xmm19 = xmm1[1,0,3,2]
vminps %xmm1, %xmm19, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm19 # xmm19 = xmm1[1,0]
vminps %xmm1, %xmm19, %xmm1
vinsertps $0x1c, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],zero,zero
vshufps $0xb1, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,0,3,2]
vminps %xmm2, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vminps %xmm1, %xmm2, %xmm1
vinsertps $0x20, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
vshufps $0xb1, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[1,0,3,2]
vmaxps %xmm3, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vmaxps %xmm1, %xmm2, %xmm1
vshufps $0xb1, %xmm7, %xmm7, %xmm2 # xmm2 = xmm7[1,0,3,2]
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vmaxps %xmm2, %xmm3, %xmm2
vinsertps $0x1c, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],zero,zero
vshufps $0xb1, %xmm8, %xmm8, %xmm2 # xmm2 = xmm8[1,0,3,2]
vmaxps %xmm8, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vmaxps %xmm2, %xmm3, %xmm2
vinsertps $0x20, %xmm2, %xmm1, %xmm3 # xmm3 = xmm1[0,1],xmm2[0],xmm1[3]
vxorps %xmm1, %xmm1, %xmm1
vcmpnltps %xmm1, %xmm0, %k1
vblendmps %xmm0, %xmm3, %xmm1 {%k1}
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
xorl %r15d, %r15d
vxorps %xmm8, %xmm8, %xmm8
vucomiss %xmm2, %xmm8
seta %r15b
shll $0x5, %r15d
orq $0x40, %r15
vshufpd $0x1, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,0]
xorl %eax, %eax
vucomiss %xmm7, %xmm8
seta %al
shll $0x5, %eax
orq $0x80, %rax
movq %r15, %r13
xorq $0x20, %r13
xorl %ecx, %ecx
vucomiss %xmm1, %xmm8
seta %cl
vshufps $0xb1, %xmm9, %xmm9, %xmm7 # xmm7 = xmm9[1,0,3,2]
vminps %xmm9, %xmm7, %xmm7
vshufpd $0x1, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,0]
vminps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[1,0,3,2]
vminps %xmm10, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vminps %xmm8, %xmm9, %xmm8
vinsertps $0x1c, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0],xmm8[0],zero,zero
vshufps $0xb1, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,0,3,2]
vminps %xmm11, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vminps %xmm8, %xmm9, %xmm8
vinsertps $0x20, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0,1],xmm8[0],xmm7[3]
vshufps $0xb1, %xmm15, %xmm15, %xmm8 # xmm8 = xmm15[1,0,3,2]
vmaxps %xmm15, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,0]
vmaxps %xmm8, %xmm9, %xmm8
vshufps $0xb1, %xmm16, %xmm16, %xmm9 # xmm9 = xmm16[1,0,3,2]
vmaxps %xmm16, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmaxps %xmm9, %xmm10, %xmm9
vinsertps $0x1c, %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm9[0],zero,zero
vshufps $0xb1, %xmm17, %xmm17, %xmm9 # xmm9 = xmm17[1,0,3,2]
vmaxps %xmm17, %xmm9, %xmm9
vshufpd $0x1, %xmm9, %xmm9, %xmm10 # xmm10 = xmm9[1,0]
vmaxps %xmm9, %xmm10, %xmm9
vinsertps $0x20, %xmm9, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm9[0],xmm8[3]
vblendmps %xmm7, %xmm8, %xmm9 {%k1}
vmovaps %xmm8, %xmm7 {%k1}
vshufps $0xb1, %xmm21, %xmm21, %xmm8 # xmm8 = xmm21[1,0,3,2]
vmaxps %xmm21, %xmm8, %xmm8
vshufpd $0x1, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,0]
vshufps $0xb1, %xmm18, %xmm18, %xmm11 # xmm11 = xmm18[1,0,3,2]
vminps %xmm18, %xmm11, %xmm11
vshufpd $0x1, %xmm11, %xmm11, %xmm15 # xmm15 = xmm11[1,0]
vminps %xmm11, %xmm15, %xmm11
vmaxps %xmm8, %xmm10, %xmm8
vmovaps %xmm3, %xmm0 {%k1}
vmulps %xmm1, %xmm7, %xmm3
vmulps %xmm0, %xmm9, %xmm7
shll $0x5, %ecx
movq %rcx, %r8
xorq $0x20, %r8
movq %rax, %r12
xorq $0x20, %r12
movq 0x70(%r14), %rsi
movq %rsi, 0x290(%rsp)
movq %rdx, 0x298(%rsp)
vbroadcastss %xmm1, %ymm15
vbroadcastss %xmm3, %ymm9
vbroadcastss 0x17d64ad(%rip), %ymm10 # 0x1f20ec0
vxorps %ymm10, %ymm9, %ymm16
vbroadcastsd %xmm2, %ymm17
vbroadcastss 0x17c7cdc(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm2
vxorps %ymm10, %ymm2, %ymm18
vbroadcastss 0x17d64a0(%rip), %ymm2 # 0x1f20edc
vpermps %ymm1, %ymm2, %ymm19
vpermps %ymm3, %ymm2, %ymm1
vxorps %ymm10, %ymm1, %ymm20
vbroadcastss %xmm0, %ymm23
vbroadcastss %xmm7, %ymm1
vxorps %ymm10, %ymm1, %ymm24
vpermps %ymm0, %ymm9, %ymm25
vpermps %ymm7, %ymm9, %ymm1
vxorps %ymm10, %ymm1, %ymm26
vpermps %ymm0, %ymm2, %ymm27
vpermps %ymm7, %ymm2, %ymm0
vxorps %ymm10, %ymm0, %ymm28
vbroadcastss %xmm11, %ymm11
vbroadcastss %xmm8, %ymm29
leaq 0x2a0(%rsp), %rsi
leaq 0x290(%rsp), %rdx
cmpq %rdx, %rsi
je 0x74b40c
leaq -0x10(%rsi), %r10
movzbl -0x74(%rsp), %edx
andnq -0x8(%rsi), %rdx, %rdx
je 0x74b3f9
movq (%r10), %rsi
testb $0x8, %sil
jne 0x74ac52
vmovaps 0x40(%rsi,%rcx), %ymm0
vfmadd132ps %ymm15, %ymm16, %ymm0 # ymm0 = (ymm0 * ymm15) + ymm16
vmovaps 0x40(%rsi,%r15), %ymm1
vfmadd132ps %ymm17, %ymm18, %ymm1 # ymm1 = (ymm1 * ymm17) + ymm18
vmovaps 0x40(%rsi,%rax), %ymm2
vpmaxsd %ymm1, %ymm0, %ymm0
vfmadd132ps %ymm19, %ymm20, %ymm2 # ymm2 = (ymm2 * ymm19) + ymm20
vpmaxsd %ymm11, %ymm2, %ymm1
vpmaxsd %ymm1, %ymm0, %ymm0
vmovaps 0x40(%rsi,%r8), %ymm1
vfmadd132ps %ymm23, %ymm24, %ymm1 # ymm1 = (ymm1 * ymm23) + ymm24
vmovaps 0x40(%rsi,%r13), %ymm2
vfmadd132ps %ymm25, %ymm26, %ymm2 # ymm2 = (ymm2 * ymm25) + ymm26
vmovaps 0x40(%rsi,%r12), %ymm3
vpminsd %ymm2, %ymm1, %ymm1
vfmadd132ps %ymm27, %ymm28, %ymm3 # ymm3 = (ymm3 * ymm27) + ymm28
vpminsd %ymm29, %ymm3, %ymm2
vpminsd %ymm2, %ymm1, %ymm1
vcmpleps %ymm1, %ymm0, %k0
kortestb %k0, %k0
je 0x74ac4e
kmovd %k0, %edx
movzbl %dl, %edi
movq %rbx, %r9
xorl %edx, %edx
tzcntq %rdi, %r11
vbroadcastss 0x40(%rsi,%r11,4), %xmm0
vfmadd132ps %xmm4, %xmm12, %xmm0 # xmm0 = (xmm0 * xmm4) + xmm12
vbroadcastss 0x80(%rsi,%r11,4), %xmm1
vfmadd132ps %xmm5, %xmm13, %xmm1 # xmm1 = (xmm1 * xmm5) + xmm13
vbroadcastss 0xc0(%rsi,%r11,4), %xmm2
vfmadd132ps %xmm6, %xmm14, %xmm2 # xmm2 = (xmm2 * xmm6) + xmm14
vbroadcastss 0x60(%rsi,%r11,4), %xmm3
vfmadd132ps %xmm4, %xmm12, %xmm3 # xmm3 = (xmm3 * xmm4) + xmm12
vbroadcastss 0xa0(%rsi,%r11,4), %xmm7
vfmadd132ps %xmm5, %xmm13, %xmm7 # xmm7 = (xmm7 * xmm5) + xmm13
vbroadcastss 0xe0(%rsi,%r11,4), %xmm8
vfmadd132ps %xmm6, %xmm14, %xmm8 # xmm8 = (xmm8 * xmm6) + xmm14
vpminsd %xmm3, %xmm0, %xmm9
vpminsd %xmm7, %xmm1, %xmm10
vpmaxsd %xmm10, %xmm9, %xmm9
vpminsd %xmm8, %xmm2, %xmm10
vpmaxsd %xmm22, %xmm10, %xmm10
vpmaxsd %xmm10, %xmm9, %xmm9
vpmaxsd %xmm3, %xmm0, %xmm0
vpmaxsd %xmm7, %xmm1, %xmm1
vpminsd %xmm1, %xmm0, %xmm0
vpmaxsd %xmm8, %xmm2, %xmm1
vpminsd %xmm21, %xmm1, %xmm1
vpminsd %xmm1, %xmm0, %xmm0
vpcmpled %xmm0, %xmm9, %k0
kortestb %k0, %k0
je 0x74ac2a
movq (%rsi,%r11,8), %r11
prefetcht0 (%r11)
prefetcht0 0x40(%r11)
prefetcht0 0x80(%r11)
prefetcht0 0xc0(%r11)
cmpq $0x8, %r9
je 0x74ac20
movq %r9, (%r10)
movq %rdx, 0x8(%r10)
addq $0x10, %r10
kmovd %k0, %edx
movzbl %dl, %edx
movq %r11, %r9
blsrq %rdi, %rdi
jne 0x74ab4e
cmpq $0x8, %r9
setne %dil
movq %r9, %rsi
testb %dil, %dil
jne 0x74aabc
jmp 0x74b3f9
xorl %edi, %edi
jmp 0x74ac40
testq %rdx, %rdx
je 0x74b3f9
movl %esi, %edi
andl $0xf, %edi
movl -0x74(%rsp), %edx
movl %edx, %r9d
andb $0xf, %r9b
addq $-0x8, %rdi
movq %rdi, 0xe8(%rsp)
je 0x74b3cb
movq %rbp, 0xe0(%rsp)
andq $-0x10, %rsi
xorb $0xf, %r9b
movq 0xd8(%rsp), %rdx
vmovaps (%rdx), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x10(%rdx), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps 0x20(%rdx), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vmovaps 0x30(%rdx), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps 0x50(%rdx), %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vmovaps 0x60(%rdx), %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vmovaps 0x40(%rdx), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vmovaps 0x80(%rdx), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x90(%rdx), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
addq $0xc0, %rsi
xorl %edi, %edi
vmovups %ymm15, 0xa0(%rsp)
vmovups %ymm16, 0x80(%rsp)
vmovups %ymm17, 0x60(%rsp)
vmovups %ymm18, 0x40(%rsp)
vmovups %ymm19, 0x20(%rsp)
vmovups %ymm20, (%rsp)
vmovups %ymm23, -0x20(%rsp)
vmovups %ymm24, -0x40(%rsp)
vmovups %ymm25, -0x60(%rsp)
vmovups %ymm26, 0x180(%rsp)
vmovups %ymm27, 0x160(%rsp)
vmovups %ymm28, 0x140(%rsp)
vmovups %ymm11, 0x270(%rsp)
vmovups %ymm29, 0x120(%rsp)
xorl %edx, %edx
movb %r9b, -0x75(%rsp)
movl %r9d, %ebp
movl (%rsi,%rdx,4), %r9d
movl $0xffffffff, %r11d # imm = 0xFFFFFFFF
cmpq %r11, %r9
je 0x74b395
vbroadcastss -0xc0(%rsi,%rdx,4), %xmm3
vbroadcastss -0xb0(%rsi,%rdx,4), %xmm9
vbroadcastss -0xa0(%rsi,%rdx,4), %xmm17
vbroadcastss -0x90(%rsi,%rdx,4), %xmm11
vbroadcastss -0x80(%rsi,%rdx,4), %xmm15
vbroadcastss -0x70(%rsi,%rdx,4), %xmm7
vbroadcastss -0x30(%rsi,%rdx,4), %xmm10
vbroadcastss -0x20(%rsi,%rdx,4), %xmm0
vbroadcastss -0x10(%rsi,%rdx,4), %xmm1
vsubss %xmm11, %xmm3, %xmm19
vbroadcastss %xmm19, %xmm26
vsubss %xmm15, %xmm9, %xmm2
vbroadcastss %xmm2, %xmm29
vmovaps %xmm7, 0x260(%rsp)
vsubss %xmm7, %xmm17, %xmm20
vbroadcastss %xmm20, %xmm31
vsubss %xmm3, %xmm10, %xmm23
vbroadcastss %xmm23, %xmm8
vsubss %xmm9, %xmm0, %xmm24
vbroadcastss %xmm24, %xmm18
vsubss %xmm17, %xmm1, %xmm25
vbroadcastss %xmm25, %xmm16
vmulss %xmm25, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmsub231ps %xmm31, %xmm18, %xmm2 # xmm2 = (xmm18 * xmm31) - xmm2
vmulss %xmm23, %xmm20, %xmm20
vbroadcastss %xmm20, %xmm23
vfmsub231ps %xmm26, %xmm16, %xmm23 # xmm23 = (xmm16 * xmm26) - xmm23
vmulss %xmm24, %xmm19, %xmm19
vbroadcastss %xmm19, %xmm27
vfmsub231ps %xmm29, %xmm8, %xmm27 # xmm27 = (xmm8 * xmm29) - xmm27
vsubps 0x1f0(%rsp), %xmm3, %xmm24
vsubps 0x1e0(%rsp), %xmm9, %xmm28
vsubps 0x1d0(%rsp), %xmm17, %xmm30
vmovaps 0x1c0(%rsp), %xmm3
vmulps %xmm30, %xmm3, %xmm20
vmovaps 0x1b0(%rsp), %xmm17
vfmsub231ps %xmm17, %xmm28, %xmm20 # xmm20 = (xmm28 * xmm17) - xmm20
vmulps %xmm24, %xmm17, %xmm9
vmovaps 0x1a0(%rsp), %xmm7
vfmsub231ps %xmm7, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm7) - xmm9
vmulps %xmm28, %xmm7, %xmm19
vfmsub231ps %xmm3, %xmm24, %xmm19 # xmm19 = (xmm24 * xmm3) - xmm19
vmulps %xmm17, %xmm27, %xmm25
vfmadd231ps %xmm3, %xmm23, %xmm25 # xmm25 = (xmm23 * xmm3) + xmm25
vfmadd231ps %xmm7, %xmm2, %xmm25 # xmm25 = (xmm2 * xmm7) + xmm25
vandps 0x17d5fd1(%rip){1to4}, %xmm25, %xmm3 # 0x1f20ec0
vmulps %xmm19, %xmm16, %xmm16
vfmadd231ps %xmm18, %xmm9, %xmm16 # xmm16 = (xmm9 * xmm18) + xmm16
vfmadd231ps %xmm8, %xmm20, %xmm16 # xmm16 = (xmm20 * xmm8) + xmm16
vxorps %xmm16, %xmm3, %xmm17
vxorps %xmm8, %xmm8, %xmm8
vcmpnltps %xmm8, %xmm17, %k0
kmovd %k0, %r14d
andb %bpl, %r14b
jne 0x74b113
testb %bpl, %bpl
vmovups 0x180(%rsp), %ymm26
vmovups 0x160(%rsp), %ymm27
vmovups 0x140(%rsp), %ymm28
vmovups 0x120(%rsp), %ymm29
je 0x74b0a0
vbroadcastss -0x60(%rsi,%rdx,4), %xmm2
vbroadcastss -0x50(%rsi,%rdx,4), %xmm3
vbroadcastss -0x40(%rsi,%rdx,4), %xmm8
vsubps %xmm10, %xmm2, %xmm23
vsubps %xmm0, %xmm3, %xmm24
vsubps %xmm1, %xmm8, %xmm25
vsubps %xmm2, %xmm11, %xmm16
vsubps %xmm3, %xmm15, %xmm17
vmovaps 0x260(%rsp), %xmm0
vsubps %xmm8, %xmm0, %xmm18
vmulps %xmm18, %xmm24, %xmm0
vfmsub231ps %xmm25, %xmm17, %xmm0 # xmm0 = (xmm17 * xmm25) - xmm0
vmulps %xmm16, %xmm25, %xmm1
vfmsub231ps %xmm23, %xmm18, %xmm1 # xmm1 = (xmm18 * xmm23) - xmm1
vmulps %xmm17, %xmm23, %xmm10
vfmsub231ps %xmm24, %xmm16, %xmm10 # xmm10 = (xmm16 * xmm24) - xmm10
vsubps 0x1f0(%rsp), %xmm2, %xmm2
vsubps 0x1e0(%rsp), %xmm3, %xmm11
vsubps 0x1d0(%rsp), %xmm8, %xmm15
vmovaps 0x1c0(%rsp), %xmm3
vmulps %xmm15, %xmm3, %xmm20
vmovaps 0x1b0(%rsp), %xmm7
vfmsub231ps %xmm7, %xmm11, %xmm20 # xmm20 = (xmm11 * xmm7) - xmm20
vmulps %xmm2, %xmm7, %xmm9
vmovaps 0x1a0(%rsp), %xmm8
vfmsub231ps %xmm8, %xmm15, %xmm9 # xmm9 = (xmm15 * xmm8) - xmm9
vmulps %xmm11, %xmm8, %xmm19
vfmsub231ps %xmm3, %xmm2, %xmm19 # xmm19 = (xmm2 * xmm3) - xmm19
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm3, %xmm1, %xmm7 # xmm7 = (xmm1 * xmm3) + xmm7
vfmadd231ps %xmm8, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm8) + xmm7
vandps 0x17d5ea2(%rip){1to4}, %xmm7, %xmm3 # 0x1f20ec0
vmulps %xmm19, %xmm18, %xmm8
vfmadd231ps %xmm17, %xmm9, %xmm8 # xmm8 = (xmm9 * xmm17) + xmm8
vfmadd231ps %xmm16, %xmm20, %xmm8 # xmm8 = (xmm20 * xmm16) + xmm8
vxorps %xmm8, %xmm3, %xmm17
vxorps %xmm8, %xmm8, %xmm8
vcmpnltps %xmm8, %xmm17, %k0
kmovd %k0, %r14d
andb %bpl, %r14b
jne 0x74b204
vmovups 0xa0(%rsp), %ymm15
vmovups 0x80(%rsp), %ymm16
vmovups 0x60(%rsp), %ymm17
vmovups 0x40(%rsp), %ymm18
vmovups 0x20(%rsp), %ymm19
vmovups (%rsp), %ymm20
vmovups -0x20(%rsp), %ymm23
vmovups -0x40(%rsp), %ymm24
vmovups -0x60(%rsp), %ymm25
testb %bpl, %bpl
setne %r9b
jmp 0x74b0eb
xorl %r9d, %r9d
vmovups 0xa0(%rsp), %ymm15
vmovups 0x80(%rsp), %ymm16
vmovups 0x60(%rsp), %ymm17
vmovups 0x40(%rsp), %ymm18
vmovups 0x20(%rsp), %ymm19
vmovups (%rsp), %ymm20
vmovups -0x20(%rsp), %ymm23
vmovups -0x40(%rsp), %ymm24
vmovups -0x60(%rsp), %ymm25
vmovups 0x270(%rsp), %ymm11
testb %r9b, %r9b
je 0x74b395
leaq 0x1(%rdx), %r9
cmpq $0x3, %rdx
movq %r9, %rdx
jb 0x74ad9c
jmp 0x74b395
vmulps %xmm19, %xmm31, %xmm8
vfmadd213ps %xmm8, %xmm9, %xmm29 # xmm29 = (xmm9 * xmm29) + xmm8
vfmadd213ps %xmm29, %xmm20, %xmm26 # xmm26 = (xmm20 * xmm26) + xmm29
vxorps %xmm26, %xmm3, %xmm19
vxorps %xmm8, %xmm8, %xmm8
vcmpnltps %xmm8, %xmm19, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74af20
vandps 0x17d5d76(%rip){1to4}, %xmm25, %xmm9 # 0x1f20ec4
vsubps %xmm17, %xmm9, %xmm8
vcmpnltps %xmm19, %xmm8, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74af20
vmulps %xmm30, %xmm27, %xmm8
vfmadd213ps %xmm8, %xmm23, %xmm28 # xmm28 = (xmm23 * xmm28) + xmm8
vfmadd213ps %xmm28, %xmm2, %xmm24 # xmm24 = (xmm2 * xmm24) + xmm28
vxorps %xmm24, %xmm3, %xmm2
vmulps 0x110(%rsp), %xmm9, %xmm3
vmulps 0x100(%rsp), %xmm9, %xmm8
vcmpleps %xmm8, %xmm2, %k1
vcmpltps %xmm2, %xmm3, %k0 {%k1}
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74af20
vxorps %xmm2, %xmm2, %xmm2
vcmpneqps %xmm2, %xmm25, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74af20
movq -0x68(%rsp), %r11
movq (%r11), %r11
movq 0x1e8(%r11), %r11
movq (%r11,%r9,8), %r11
vmovdqa 0xf0(%rsp), %xmm2
vptestmd 0x34(%r11){1to4}, %xmm2, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74af20
xorb $0xf, %r14b
andb %bpl, %r14b
movl %r14d, %ebp
jmp 0x74af20
vmulps %xmm19, %xmm25, %xmm8
vfmadd213ps %xmm8, %xmm9, %xmm24 # xmm24 = (xmm9 * xmm24) + xmm8
vfmadd213ps %xmm24, %xmm20, %xmm23 # xmm23 = (xmm20 * xmm23) + xmm24
vxorps %xmm23, %xmm3, %xmm19
vxorps %xmm8, %xmm8, %xmm8
vcmpnltps %xmm8, %xmm19, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74b04f
vandps 0x17d5c85(%rip){1to4}, %xmm7, %xmm9 # 0x1f20ec4
vsubps %xmm17, %xmm9, %xmm8
vcmpnltps %xmm19, %xmm8, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
vmovups (%rsp), %ymm20
vmovups -0x20(%rsp), %ymm23
vmovups -0x40(%rsp), %ymm24
vmovups -0x60(%rsp), %ymm25
jne 0x74b2a2
vmovups 0xa0(%rsp), %ymm15
vmovups 0x80(%rsp), %ymm16
vmovups 0x60(%rsp), %ymm17
vmovups 0x40(%rsp), %ymm18
vmovups 0x20(%rsp), %ymm19
jmp 0x74b097
vmulps %xmm15, %xmm10, %xmm8
vfmadd213ps %xmm8, %xmm1, %xmm11 # xmm11 = (xmm1 * xmm11) + xmm8
vfmadd213ps %xmm11, %xmm0, %xmm2 # xmm2 = (xmm0 * xmm2) + xmm11
vxorps %xmm2, %xmm3, %xmm0
vmulps 0x110(%rsp), %xmm9, %xmm1
vmulps 0x100(%rsp), %xmm9, %xmm2
vcmpleps %xmm2, %xmm0, %k1
vcmpltps %xmm0, %xmm1, %k0 {%k1}
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74b328
vxorps %xmm0, %xmm0, %xmm0
vcmpneqps %xmm0, %xmm7, %k0
kmovd %k0, %r11d
andb %r11b, %r14b
je 0x74b328
movq -0x68(%rsp), %r11
movq (%r11), %r11
movq 0x1e8(%r11), %r11
movq (%r11,%r9,8), %r9
vmovdqa 0xf0(%rsp), %xmm0
vptestmd 0x34(%r9){1to4}, %xmm0, %k0
kmovd %k0, %r9d
andb %r9b, %r14b
je 0x74b328
xorb $0xf, %r14b
andb %bpl, %r14b
movl %r14d, %ebp
vmovups 0xa0(%rsp), %ymm15
vmovups 0x80(%rsp), %ymm16
vmovups 0x60(%rsp), %ymm17
vmovups 0x40(%rsp), %ymm18
vmovups 0x20(%rsp), %ymm19
vmovups (%rsp), %ymm20
vmovups -0x20(%rsp), %ymm23
vmovups -0x40(%rsp), %ymm24
vmovups -0x60(%rsp), %ymm25
vmovups 0x180(%rsp), %ymm26
vmovups 0x160(%rsp), %ymm27
vmovups 0x140(%rsp), %ymm28
vmovups 0x120(%rsp), %ymm29
jmp 0x74b097
movb -0x75(%rsp), %r9b
andb %bpl, %r9b
je 0x74b3b7
incq %rdi
addq $0xe0, %rsi
cmpq 0xe8(%rsp), %rdi
jb 0x74ad92
xorb $0xf, %r9b
movq 0xd0(%rsp), %r14
movq 0xe0(%rsp), %rbp
movl -0x74(%rsp), %esi
orb %r9b, %sil
movl %esi, %edx
notb %dl
movl -0x70(%rsp), %edi
andb %dl, %dil
andb $0xf, %dil
movl %edi, -0x70(%rsp)
movl %esi, -0x74(%rsp)
je 0x74b408
kmovd %esi, %k1
vbroadcastss 0x17a178b(%rip), %xmm21 {%k1} # 0x1eecb84
xorl %edx, %edx
movq %r10, %rsi
testb %dl, %dl
je 0x74aa93
jmp 0x74b40c
movb $0x1, %dl
jmp 0x74b3fb
testq %rbp, %rbp
jne 0x74a746
movl -0x74(%rsp), %eax
andb -0x6c(%rsp), %al
kmovd %eax, %k1
movq 0xc8(%rsp), %rax
vbroadcastss 0x17a1752(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, (%rax) {%k1}
addq $0x25d8, %rsp # imm = 0x25D8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/../simd/vint4_sse2.h
|
void std::__move_median_to_first<embree::sse2::BVHBuilderMorton::BuildPrim*, __gnu_cxx::__ops::_Iter_comp_iter<std::less<embree::sse2::BVHBuilderMorton::BuildPrim>>>(embree::sse2::BVHBuilderMorton::BuildPrim*, embree::sse2::BVHBuilderMorton::BuildPrim*, embree::sse2::BVHBuilderMorton::BuildPrim*, embree::sse2::BVHBuilderMorton::BuildPrim*, __gnu_cxx::__ops::_Iter_comp_iter<std::less<embree::sse2::BVHBuilderMorton::BuildPrim>>)
|
__forceinline bool operator<(const BuildPrim &m) const { return code < m.code; }
|
movl (%rsi), %r8d
movl (%rdx), %r9d
movl (%rcx), %eax
cmpl %r9d, %r8d
jae 0x90e361
cmpl %eax, %r9d
jae 0x90e373
movq (%rdi), %rax
movq (%rdx), %rcx
movq %rcx, (%rdi)
movq %rax, (%rdx)
retq
cmpl %eax, %r8d
jae 0x90e385
movq (%rdi), %rax
movq (%rsi), %rcx
movq %rcx, (%rdi)
movq %rax, (%rsi)
retq
movq (%rdi), %rdx
cmpl %eax, %r8d
jae 0x90e397
movq (%rcx), %rax
movq %rax, (%rdi)
movq %rdx, (%rcx)
retq
movq (%rdi), %rsi
cmpl %eax, %r9d
jae 0x90e3a1
movq (%rcx), %rax
movq %rax, (%rdi)
movq %rsi, (%rcx)
retq
movq (%rsi), %rax
movq %rax, (%rdi)
movq %rdx, (%rsi)
retq
movq (%rdx), %rax
movq %rax, (%rdi)
movq %rsi, (%rdx)
retq
|
/embree[P]embree/kernels/common/../builders/bvh_builder_morton.h
|
void embree::sse2::CurveNiMBIntersector1<4>::intersect_n<embree::sse2::OrientedCurve1Intersector1<embree::CatmullRomCurveT, 3, 4>, embree::sse2::Intersect1Epilog1<true>>(embree::sse2::CurvePrecalculations1 const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::CurveNiMB<4> const&)
|
static __forceinline void intersect_n(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
const TensorLinearCubicBezierSurface3fa curve = geom->getNormalOrientedCurve<typename Intersector::SourceCurve3ff, typename Intersector::SourceCurve3fa, TensorLinearCubicBezierSurface3fa>(context, ray.org, primID,ray.time());
Intersector().intersect(pre,ray,context,geom,primID,curve,Epilog(ray,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x3f8, %rsp # imm = 0x3F8
movq %rdx, %rbp
movzbl 0x1(%rcx), %eax
leaq (%rax,%rax,8), %r11
leaq (%rax,%r11,4), %r9
movups 0x6(%rcx,%r9), %xmm2
movaps (%rsi), %xmm3
subps %xmm2, %xmm3
shufps $0xff, %xmm2, %xmm2 # xmm2 = xmm2[3,3,3,3]
mulps %xmm2, %xmm3
mulps 0x10(%rsi), %xmm2
movd 0x6(%rcx,%rax,4), %xmm0
punpcklbw %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm0
cvtdq2ps %xmm0, %xmm13
leaq (%rax,%rax,4), %r10
movd 0x6(%rcx,%r10), %xmm1
punpcklbw %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm1
cvtdq2ps %xmm1, %xmm4
leaq (%rax,%rax,2), %rdx
movd 0x6(%rcx,%rdx,2), %xmm1
punpcklbw %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm1
cvtdq2ps %xmm1, %xmm6
leaq (%r10,%r10,2), %r8
movd 0x6(%rcx,%r8), %xmm1
punpcklbw %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm1, %xmm1 # xmm1 = xmm1[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm1
cvtdq2ps %xmm1, %xmm0
movaps %xmm0, 0x30(%rsp)
movl %eax, %r8d
shll $0x4, %r8d
movd 0x6(%rcx,%r8), %xmm5
punpcklbw %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm5
cvtdq2ps %xmm5, %xmm7
addq %rax, %r8
movd 0x6(%rcx,%r8), %xmm5
punpcklbw %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm5
cvtdq2ps %xmm5, %xmm9
leaq (%r10,%r10,4), %r8
addq %rax, %r8
movd 0x6(%rcx,%r8), %xmm5
punpcklbw %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm5
cvtdq2ps %xmm5, %xmm8
leaq (%r11,%r11,2), %r8
movd 0x6(%rcx,%r8), %xmm5
punpcklbw %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm5
cvtdq2ps %xmm5, %xmm10
addq %rax, %r8
movd 0x6(%rcx,%r8), %xmm5
punpcklbw %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
punpcklwd %xmm5, %xmm5 # xmm5 = xmm5[0,0,1,1,2,2,3,3]
psrad $0x18, %xmm5
cvtdq2ps %xmm5, %xmm1
movaps %xmm2, %xmm5
shufps $0x0, %xmm2, %xmm5 # xmm5 = xmm5[0,0],xmm2[0,0]
movaps %xmm2, %xmm14
shufps $0x55, %xmm2, %xmm14 # xmm14 = xmm14[1,1],xmm2[1,1]
shufps $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
movaps %xmm2, %xmm11
mulps %xmm6, %xmm11
movaps %xmm2, %xmm12
mulps %xmm9, %xmm12
mulps %xmm1, %xmm2
movaps %xmm14, %xmm15
mulps %xmm4, %xmm15
addps %xmm11, %xmm15
movaps %xmm14, %xmm0
mulps %xmm7, %xmm0
addps %xmm12, %xmm0
mulps %xmm10, %xmm14
addps %xmm2, %xmm14
movaps %xmm5, %xmm12
mulps %xmm13, %xmm12
addps %xmm15, %xmm12
movaps %xmm5, %xmm11
movaps 0x30(%rsp), %xmm15
mulps %xmm15, %xmm11
addps %xmm0, %xmm11
mulps %xmm8, %xmm5
addps %xmm14, %xmm5
movaps %xmm3, %xmm2
shufps $0x0, %xmm3, %xmm2 # xmm2 = xmm2[0,0],xmm3[0,0]
movaps %xmm3, %xmm14
shufps $0x55, %xmm3, %xmm14 # xmm14 = xmm14[1,1],xmm3[1,1]
shufps $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
mulps %xmm3, %xmm6
mulps %xmm3, %xmm9
mulps %xmm1, %xmm3
mulps %xmm14, %xmm4
addps %xmm6, %xmm4
mulps %xmm14, %xmm7
addps %xmm9, %xmm7
mulps %xmm10, %xmm14
addps %xmm3, %xmm14
mulps %xmm2, %xmm13
addps %xmm4, %xmm13
movaps %xmm15, %xmm1
mulps %xmm2, %xmm1
addps %xmm7, %xmm1
mulps %xmm8, %xmm2
addps %xmm14, %xmm2
movaps 0x13bac5f(%rip), %xmm4 # 0x1eec6c0
movaps %xmm12, %xmm3
andps %xmm4, %xmm3
movaps 0x13c02d1(%rip), %xmm0 # 0x1ef1d40
cmpnltps %xmm0, %xmm3
andps %xmm3, %xmm12
andnps %xmm0, %xmm3
orps %xmm12, %xmm3
movaps %xmm11, %xmm6
andps %xmm4, %xmm6
cmpnltps %xmm0, %xmm6
andps %xmm6, %xmm11
andnps %xmm0, %xmm6
orps %xmm11, %xmm6
movaps %xmm5, %xmm7
andps %xmm4, %xmm7
cmpnltps %xmm0, %xmm7
andps %xmm7, %xmm5
andnps %xmm0, %xmm7
orps %xmm5, %xmm7
rcpps %xmm3, %xmm0
mulps %xmm0, %xmm3
movaps 0x13baf5b(%rip), %xmm8 # 0x1eeca10
movaps %xmm8, %xmm4
subps %xmm3, %xmm4
mulps %xmm0, %xmm4
addps %xmm0, %xmm4
rcpps %xmm6, %xmm0
mulps %xmm0, %xmm6
movaps %xmm8, %xmm5
subps %xmm6, %xmm5
mulps %xmm0, %xmm5
addps %xmm0, %xmm5
rcpps %xmm7, %xmm0
mulps %xmm0, %xmm7
movaps %xmm8, %xmm6
subps %xmm7, %xmm6
mulps %xmm0, %xmm6
movss 0x1c(%rsi), %xmm8
subss 0x16(%rcx,%r9), %xmm8
addps %xmm0, %xmm6
mulss 0x1a(%rcx,%r9), %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
leaq (,%rax,8), %r9
subq %rax, %r9
movq 0x6(%rcx,%r9), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
leaq (%rax,%r10,2), %r9
movq 0x6(%rcx,%r9), %xmm3
punpcklwd %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm3
cvtdq2ps %xmm3, %xmm3
subps %xmm0, %xmm3
mulps %xmm8, %xmm3
addps %xmm0, %xmm3
movq 0x6(%rcx,%r11), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
leaq (%rax,%rdx,4), %r9
movq 0x6(%rcx,%r9), %xmm7
punpcklwd %xmm7, %xmm7 # xmm7 = xmm7[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm7
cvtdq2ps %xmm7, %xmm7
subps %xmm0, %xmm7
mulps %xmm8, %xmm7
addps %xmm0, %xmm7
movq 0x6(%rcx,%r11,2), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
shll $0x2, %r10d
leaq (%rax,%rax), %r9
addq %r10, %r9
movq 0x6(%rcx,%r9), %xmm9
punpcklwd %xmm9, %xmm9 # xmm9 = xmm9[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm9
cvtdq2ps %xmm9, %xmm9
subps %xmm0, %xmm9
mulps %xmm8, %xmm9
addps %xmm0, %xmm9
movq 0x6(%rcx,%r10), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
movq 0x6(%rcx,%rdx,8), %xmm10
punpcklwd %xmm10, %xmm10 # xmm10 = xmm10[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm10
cvtdq2ps %xmm10, %xmm10
subps %xmm0, %xmm10
mulps %xmm8, %xmm10
addps %xmm0, %xmm10
addq %rax, %r8
movq 0x6(%rcx,%r8), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
movl %eax, %r8d
shll $0x5, %r8d
leaq (%rax,%r8), %rdx
movq 0x6(%rcx,%rdx), %xmm11
punpcklwd %xmm11, %xmm11 # xmm11 = xmm11[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm11
cvtdq2ps %xmm11, %xmm11
subps %xmm0, %xmm11
mulps %xmm8, %xmm11
addps %xmm0, %xmm11
subq %rax, %r8
movq 0x6(%rcx,%r8), %xmm0
punpcklwd %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm0
cvtdq2ps %xmm0, %xmm0
imulq $0x23, %rax, %rdx
movq %rcx, 0x138(%rsp)
movq 0x6(%rcx,%rdx), %xmm12
punpcklwd %xmm12, %xmm12 # xmm12 = xmm12[0,0,1,1,2,2,3,3]
psrad $0x10, %xmm12
cvtdq2ps %xmm12, %xmm12
subps %xmm0, %xmm12
mulps %xmm8, %xmm12
addps %xmm0, %xmm12
subps %xmm13, %xmm3
mulps %xmm4, %xmm3
subps %xmm13, %xmm7
mulps %xmm4, %xmm7
subps %xmm1, %xmm9
mulps %xmm5, %xmm9
subps %xmm1, %xmm10
mulps %xmm5, %xmm10
subps %xmm2, %xmm11
mulps %xmm6, %xmm11
subps %xmm2, %xmm12
mulps %xmm6, %xmm12
movaps %xmm3, %xmm2
minps %xmm7, %xmm2
movaps %xmm9, %xmm0
minps %xmm10, %xmm0
maxps %xmm0, %xmm2
movaps %xmm11, %xmm0
minps %xmm12, %xmm0
movups 0xc(%rsi), %xmm1
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
maxps %xmm1, %xmm0
maxps %xmm0, %xmm2
mulps 0x13c00b4(%rip), %xmm2 # 0x1ef1d80
maxps %xmm7, %xmm3
maxps %xmm10, %xmm9
minps %xmm9, %xmm3
maxps %xmm12, %xmm11
movaps 0x20(%rsi), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
minps %xmm0, %xmm11
minps %xmm11, %xmm3
mulps 0x13c009e(%rip), %xmm3 # 0x1ef1d90
movd %eax, %xmm0
pshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
pcmpgtd 0x13befed(%rip), %xmm0 # 0x1ef0cf0
movaps %xmm2, 0x270(%rsp)
movaps %xmm2, %xmm1
cmpleps %xmm3, %xmm1
andps %xmm0, %xmm1
movmskps %xmm1, %eax
testl %eax, %eax
je 0xb342b7
movq %rdi, %r12
movzbl %al, %ebx
leaq 0x161e253(%rip), %rax # 0x214ff80
movaps 0xf0(%rax), %xmm1
xorps %xmm0, %xmm0
shufps $0x20, %xmm1, %xmm0 # xmm0 = xmm0[0,0],xmm1[2,0]
shufps $0x24, %xmm0, %xmm1 # xmm1 = xmm1[0,1],xmm0[2,0]
movaps %xmm1, 0x2c0(%rsp)
movq %rsi, 0x48(%rsp)
bsfq %rbx, %rax
movq 0x138(%rsp), %rcx
movl 0x2(%rcx), %edx
movl 0x6(%rcx,%rax,4), %r15d
movq (%rbp), %rax
movq 0x1e8(%rax), %rax
movq %rdx, 0x50(%rsp)
movq (%rax,%rdx,8), %r14
movss 0x1c(%rsi), %xmm0
movss 0x28(%r14), %xmm3
movss %xmm3, 0x10(%rsp)
movss 0x2c(%r14), %xmm2
movss 0x30(%r14), %xmm1
subss %xmm2, %xmm0
subss %xmm2, %xmm1
divss %xmm1, %xmm0
mulss %xmm3, %xmm0
movaps %xmm0, 0x30(%rsp)
callq 0x6a790
movss 0x10(%rsp), %xmm1
addss 0x13bec13(%rip), %xmm1 # 0x1ef09cc
minss %xmm1, %xmm0
xorps %xmm1, %xmm1
maxss %xmm0, %xmm1
movaps 0x30(%rsp), %xmm0
subss %xmm1, %xmm0
movaps %xmm0, 0x30(%rsp)
cvttss2si %xmm1, %eax
movslq %eax, %rcx
movq 0x58(%r14), %rax
movq %r15, 0xe8(%rsp)
movq %r15, %rdx
imulq 0x68(%r14), %rdx
movl (%rax,%rdx), %eax
movq 0x188(%r14), %r9
imulq $0x38, %rcx, %rdx
movq (%r9,%rdx), %r8
movq 0x10(%r9,%rdx), %r10
movq %r10, %rcx
imulq %rax, %rcx
movaps (%r8,%rcx), %xmm1
leaq 0x1(%rax), %rcx
movq %r10, %rsi
imulq %rcx, %rsi
movaps (%r8,%rsi), %xmm5
leaq 0x2(%rax), %r15
movq %r10, %rdi
imulq %r15, %rdi
movaps (%r8,%rdi), %xmm12
leaq 0x3(%rax), %rdi
imulq %rdi, %r10
movaps (%r8,%r10), %xmm6
movq 0x1a8(%r14), %r8
movq (%r8,%rdx), %r10
movq 0x10(%r8,%rdx), %r11
movq %r11, %r14
imulq %rax, %r14
movups (%r10,%r14), %xmm4
movq %r11, %r14
imulq %rcx, %r14
movups (%r10,%r14), %xmm7
movq %r11, %r14
imulq %r15, %r14
movups (%r10,%r14), %xmm9
imulq %rdi, %r11
movups (%r10,%r11), %xmm8
movaps %xmm6, %xmm2
movaps 0x13ba84c(%rip), %xmm0 # 0x1eec6d0
mulps %xmm0, %xmm2
movaps %xmm12, %xmm3
xorps %xmm10, %xmm10
mulps %xmm10, %xmm3
movaps %xmm6, %xmm10
movaps 0x13ba841(%rip), %xmm13 # 0x1eec6e0
mulps %xmm13, %xmm6
addps %xmm3, %xmm6
addps %xmm2, %xmm3
movaps %xmm5, 0x20(%rsp)
addps %xmm5, %xmm3
xorps %xmm11, %xmm11
mulps %xmm11, %xmm10
addps %xmm12, %xmm2
movaps %xmm2, (%rsp)
movaps %xmm12, %xmm2
mulps %xmm13, %xmm2
addps %xmm10, %xmm2
movaps %xmm5, %xmm12
mulps %xmm11, %xmm12
addps %xmm12, %xmm2
movaps %xmm1, %xmm10
mulps %xmm13, %xmm10
movaps %xmm13, %xmm5
subps %xmm10, %xmm2
movaps %xmm8, %xmm10
mulps %xmm0, %xmm10
movaps %xmm9, %xmm13
mulps %xmm11, %xmm13
xorps %xmm15, %xmm15
movaps %xmm8, %xmm14
mulps %xmm5, %xmm8
addps %xmm13, %xmm8
addps %xmm10, %xmm13
mulps %xmm15, %xmm14
addps %xmm9, %xmm10
movaps %xmm9, %xmm11
mulps %xmm5, %xmm11
addps %xmm14, %xmm11
movaps %xmm7, %xmm9
mulps %xmm15, %xmm9
addps %xmm9, %xmm11
movaps %xmm4, %xmm14
mulps %xmm5, %xmm14
subps %xmm14, %xmm11
movaps %xmm1, %xmm14
movaps 0x13ba78b(%rip), %xmm15 # 0x1eec6d0
mulps %xmm15, %xmm14
addps %xmm14, %xmm3
movaps %xmm3, 0x10(%rsp)
addps %xmm7, %xmm13
movaps (%rsp), %xmm0
addps %xmm12, %xmm0
movaps %xmm4, %xmm12
mulps %xmm15, %xmm12
addps %xmm12, %xmm13
addps %xmm14, %xmm0
movaps %xmm0, %xmm3
movaps %xmm5, %xmm0
movaps 0x20(%rsp), %xmm5
mulps %xmm0, %xmm5
subps %xmm5, %xmm6
xorps %xmm5, %xmm5
mulps %xmm5, %xmm1
addps %xmm6, %xmm1
movaps %xmm1, %xmm15
addps %xmm9, %xmm10
addps %xmm12, %xmm10
mulps %xmm0, %xmm7
subps %xmm7, %xmm8
mulps %xmm5, %xmm4
addps %xmm8, %xmm4
movaps %xmm2, %xmm8
shufps $0xc9, %xmm2, %xmm8 # xmm8 = xmm8[1,2],xmm2[0,3]
movaps %xmm8, %xmm12
mulps %xmm13, %xmm12
shufps $0xc9, %xmm13, %xmm13 # xmm13 = xmm13[1,2,0,3]
mulps %xmm2, %xmm13
subps %xmm13, %xmm12
movaps %xmm12, %xmm9
shufps $0xc9, %xmm12, %xmm9 # xmm9 = xmm9[1,2],xmm12[0,3]
mulps %xmm11, %xmm8
shufps $0xc9, %xmm11, %xmm11 # xmm11 = xmm11[1,2,0,3]
mulps %xmm2, %xmm11
subps %xmm11, %xmm8
shufps $0xc9, %xmm8, %xmm8 # xmm8 = xmm8[1,2,0,3]
movaps %xmm1, %xmm5
shufps $0xc9, %xmm1, %xmm5 # xmm5 = xmm5[1,2],xmm1[0,3]
movaps %xmm5, %xmm7
mulps %xmm10, %xmm7
shufps $0xc9, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
mulps %xmm1, %xmm10
subps %xmm10, %xmm7
movaps %xmm7, %xmm6
shufps $0xc9, %xmm7, %xmm6 # xmm6 = xmm6[1,2],xmm7[0,3]
mulps %xmm4, %xmm5
shufps $0xc9, %xmm4, %xmm4 # xmm4 = xmm4[1,2,0,3]
mulps %xmm1, %xmm4
subps %xmm4, %xmm5
shufps $0xc9, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
mulps %xmm12, %xmm12
movaps %xmm12, %xmm4
shufps $0x55, %xmm12, %xmm4 # xmm4 = xmm4[1,1],xmm12[1,1]
movaps %xmm12, %xmm10
unpckhpd %xmm12, %xmm10 # xmm10 = xmm10[1],xmm12[1]
addps %xmm4, %xmm10
addps %xmm12, %xmm10
xorps %xmm12, %xmm12
movss %xmm10, %xmm12 # xmm12 = xmm10[0],xmm12[1,2,3]
movaps %xmm12, %xmm4
rsqrtss %xmm12, %xmm4
movaps %xmm4, %xmm11
movss 0x13ba6c9(%rip), %xmm13 # 0x1eec718
mulss %xmm13, %xmm11
movaps %xmm13, %xmm0
movaps %xmm10, %xmm13
movss 0x13bab1b(%rip), %xmm14 # 0x1eecb80
mulss %xmm14, %xmm13
mulss %xmm4, %xmm13
mulss %xmm4, %xmm4
mulss %xmm13, %xmm4
subss %xmm4, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps %xmm9, %xmm13
mulps %xmm8, %xmm13
movaps %xmm13, %xmm4
shufps $0x55, %xmm13, %xmm4 # xmm4 = xmm4[1,1],xmm13[1,1]
addss %xmm13, %xmm4
movhlps %xmm13, %xmm13 # xmm13 = xmm13[1,1]
addss %xmm4, %xmm13
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
mulps %xmm9, %xmm13
movaps %xmm9, %xmm4
mulps %xmm11, %xmm4
movaps %xmm10, %xmm9
shufps $0x0, %xmm10, %xmm9 # xmm9 = xmm9[0,0],xmm10[0,0]
mulps %xmm8, %xmm9
subps %xmm13, %xmm9
rcpss %xmm12, %xmm12
mulss %xmm12, %xmm10
movss 0x13bef22(%rip), %xmm13 # 0x1ef0ff8
movaps %xmm13, %xmm8
subss %xmm10, %xmm8
mulss %xmm12, %xmm8
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm9, %xmm8
mulps %xmm11, %xmm8
mulps %xmm7, %xmm7
movaps %xmm7, %xmm10
shufps $0x55, %xmm7, %xmm10 # xmm10 = xmm10[1,1],xmm7[1,1]
movaps %xmm7, %xmm9
unpckhpd %xmm7, %xmm9 # xmm9 = xmm9[1],xmm7[1]
addps %xmm10, %xmm9
addps %xmm7, %xmm9
xorps %xmm10, %xmm10
movss %xmm9, %xmm10 # xmm10 = xmm9[0],xmm10[1,2,3]
movaps %xmm10, %xmm11
rsqrtss %xmm10, %xmm11
movaps %xmm11, %xmm7
mulss %xmm0, %xmm7
movaps %xmm9, %xmm12
mulss %xmm14, %xmm12
mulss %xmm11, %xmm12
mulss %xmm11, %xmm11
mulss %xmm12, %xmm11
subss %xmm11, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movaps %xmm6, %xmm11
mulps %xmm5, %xmm11
movaps %xmm11, %xmm12
shufps $0x55, %xmm11, %xmm12 # xmm12 = xmm12[1,1],xmm11[1,1]
addss %xmm11, %xmm12
movhlps %xmm11, %xmm11 # xmm11 = xmm11[1,1]
addss %xmm12, %xmm11
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm6, %xmm11
mulps %xmm7, %xmm6
movaps %xmm9, %xmm12
shufps $0x0, %xmm9, %xmm12 # xmm12 = xmm12[0,0],xmm9[0,0]
mulps %xmm5, %xmm12
subps %xmm11, %xmm12
rcpss %xmm10, %xmm10
mulss %xmm10, %xmm9
movaps %xmm13, %xmm5
subss %xmm9, %xmm5
mulss %xmm10, %xmm5
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm12, %xmm5
mulps %xmm7, %xmm5
movaps 0x10(%rsp), %xmm0
movaps %xmm0, %xmm9
shufps $0xff, %xmm0, %xmm9 # xmm9 = xmm9[3,3],xmm0[3,3]
movaps %xmm9, %xmm7
mulps %xmm4, %xmm7
movaps %xmm0, %xmm10
subps %xmm7, %xmm10
movaps %xmm10, 0xb0(%rsp)
movaps %xmm7, %xmm10
movaps %xmm2, %xmm7
shufps $0xff, %xmm2, %xmm7 # xmm7 = xmm7[3,3],xmm2[3,3]
mulps %xmm4, %xmm7
mulps %xmm8, %xmm9
addps %xmm7, %xmm9
movaps %xmm2, %xmm4
subps %xmm9, %xmm4
movaps %xmm4, 0xa0(%rsp)
addps %xmm0, %xmm10
movaps %xmm10, 0xc0(%rsp)
addps %xmm2, %xmm9
movaps %xmm9, 0xd0(%rsp)
movaps %xmm3, %xmm1
shufps $0xff, %xmm3, %xmm3 # xmm3 = xmm3[3,3,3,3]
movaps %xmm3, %xmm0
mulps %xmm6, %xmm0
movaps %xmm1, %xmm4
subps %xmm0, %xmm4
movaps %xmm4, %xmm7
movaps %xmm4, 0x70(%rsp)
movaps %xmm15, %xmm4
shufps $0xff, %xmm15, %xmm4 # xmm4 = xmm4[3,3],xmm15[3,3]
mulps %xmm6, %xmm4
mulps %xmm5, %xmm3
addps %xmm4, %xmm3
movaps %xmm15, %xmm4
subps %xmm3, %xmm4
addps %xmm1, %xmm0
movaps %xmm0, 0x10(%rsp)
addps %xmm15, %xmm3
movaps 0x13bed2c(%rip), %xmm1 # 0x1ef0f80
mulps %xmm1, %xmm4
subps %xmm4, %xmm7
movaps %xmm7, 0x90(%rsp)
mulps %xmm1, %xmm3
movaps %xmm0, %xmm1
subps %xmm3, %xmm1
movaps %xmm1, 0x60(%rsp)
movq 0x38(%r9,%rdx), %r10
movq 0x48(%r9,%rdx), %r9
movq %r9, %r11
imulq %rax, %r11
movaps (%r10,%r11), %xmm8
movaps %xmm8, (%rsp)
movq %r9, %r11
imulq %rcx, %r11
movaps (%r10,%r11), %xmm2
movq %r9, %r11
imulq %r15, %r11
movaps (%r10,%r11), %xmm4
movq 0x48(%rsp), %rsi
imulq %rdi, %r9
movaps (%r10,%r9), %xmm6
movaps %xmm6, %xmm5
movaps 0x13ba414(%rip), %xmm9 # 0x1eec6d0
mulps %xmm9, %xmm5
movaps %xmm4, %xmm3
mulps 0x13b9746(%rip), %xmm3 # 0x1eeba10
movaps %xmm6, %xmm7
movaps 0x13ba40c(%rip), %xmm0 # 0x1eec6e0
mulps %xmm0, %xmm6
addps %xmm3, %xmm6
addps %xmm5, %xmm3
xorps %xmm1, %xmm1
mulps %xmm1, %xmm7
addps %xmm4, %xmm5
mulps %xmm0, %xmm4
addps %xmm7, %xmm4
movaps %xmm2, %xmm11
mulps %xmm1, %xmm11
addps %xmm11, %xmm4
movaps %xmm8, %xmm7
mulps %xmm0, %xmm7
movaps %xmm0, %xmm1
subps %xmm7, %xmm4
movq 0x38(%r8,%rdx), %r9
movq 0x48(%r8,%rdx), %rdx
imulq %rdx, %rax
imulq %rdx, %rcx
imulq %rdx, %r15
imulq %rdi, %rdx
movups (%r9,%rdx), %xmm8
movaps %xmm8, %xmm7
mulps %xmm9, %xmm7
movups (%r9,%r15), %xmm10
movaps %xmm10, %xmm12
xorps %xmm0, %xmm0
mulps %xmm0, %xmm12
movaps %xmm8, %xmm9
mulps %xmm1, %xmm8
addps %xmm12, %xmm8
addps %xmm7, %xmm12
mulps %xmm0, %xmm9
addps %xmm10, %xmm7
mulps %xmm1, %xmm10
addps %xmm9, %xmm10
movups (%r9,%rcx), %xmm13
movaps %xmm13, %xmm14
mulps %xmm0, %xmm14
addps %xmm14, %xmm10
movups (%r9,%rax), %xmm9
movaps %xmm9, %xmm15
mulps %xmm1, %xmm15
subps %xmm15, %xmm10
addps %xmm11, %xmm5
addps %xmm2, %xmm3
movaps (%rsp), %xmm0
movaps %xmm0, %xmm11
movaps 0x13ba33b(%rip), %xmm15 # 0x1eec6d0
mulps %xmm15, %xmm11
addps %xmm11, %xmm3
addps %xmm11, %xmm5
movaps %xmm5, 0x20(%rsp)
mulps %xmm1, %xmm2
subps %xmm2, %xmm6
movaps %xmm0, %xmm5
xorps %xmm0, %xmm0
mulps %xmm0, %xmm5
addps %xmm6, %xmm5
movaps %xmm5, %xmm6
addps %xmm14, %xmm7
addps %xmm13, %xmm12
movaps %xmm9, %xmm5
mulps %xmm15, %xmm5
addps %xmm5, %xmm12
addps %xmm5, %xmm7
mulps %xmm1, %xmm13
subps %xmm13, %xmm8
mulps %xmm0, %xmm9
addps %xmm8, %xmm9
movaps %xmm4, %xmm8
shufps $0xc9, %xmm4, %xmm8 # xmm8 = xmm8[1,2],xmm4[0,3]
movaps %xmm8, %xmm11
mulps %xmm12, %xmm11
shufps $0xc9, %xmm12, %xmm12 # xmm12 = xmm12[1,2,0,3]
mulps %xmm4, %xmm12
subps %xmm12, %xmm11
mulps %xmm10, %xmm8
shufps $0xc9, %xmm10, %xmm10 # xmm10 = xmm10[1,2,0,3]
mulps %xmm4, %xmm10
subps %xmm10, %xmm8
movaps %xmm6, %xmm1
movaps %xmm6, %xmm5
shufps $0xc9, %xmm6, %xmm5 # xmm5 = xmm5[1,2],xmm6[0,3]
movaps %xmm5, %xmm6
mulps %xmm7, %xmm6
shufps $0xc9, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3]
mulps %xmm1, %xmm7
subps %xmm7, %xmm6
mulps %xmm9, %xmm5
shufps $0xc9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
mulps %xmm1, %xmm9
subps %xmm9, %xmm5
movaps %xmm11, %xmm9
mulps %xmm11, %xmm11
movaps %xmm11, %xmm7
shufps $0x55, %xmm11, %xmm7 # xmm7 = xmm7[1,1],xmm11[1,1]
movaps %xmm11, %xmm10
unpckhpd %xmm11, %xmm10 # xmm10 = xmm10[1],xmm11[1]
addps %xmm7, %xmm10
addps %xmm11, %xmm10
xorps %xmm11, %xmm11
movss %xmm10, %xmm11 # xmm11 = xmm10[0],xmm11[1,2,3]
movaps %xmm11, %xmm7
rsqrtss %xmm11, %xmm7
movaps %xmm10, %xmm13
movss 0x13ba703(%rip), %xmm14 # 0x1eecb80
mulss %xmm14, %xmm13
mulss %xmm7, %xmm13
movaps %xmm7, %xmm12
mulss %xmm7, %xmm7
mulss %xmm13, %xmm7
movss 0x13ba27c(%rip), %xmm0 # 0x1eec718
mulss %xmm0, %xmm12
subss %xmm7, %xmm12
shufps $0xc9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
shufps $0xc9, %xmm8, %xmm8 # xmm8 = xmm8[1,2,0,3]
movaps %xmm9, %xmm7
mulps %xmm8, %xmm7
movaps %xmm10, %xmm13
shufps $0x0, %xmm10, %xmm13 # xmm13 = xmm13[0,0],xmm10[0,0]
mulps %xmm8, %xmm13
movaps %xmm7, %xmm8
shufps $0x55, %xmm7, %xmm8 # xmm8 = xmm8[1,1],xmm7[1,1]
addss %xmm7, %xmm8
movhlps %xmm7, %xmm7 # xmm7 = xmm7[1,1]
addss %xmm8, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm9, %xmm7
subps %xmm7, %xmm13
rcpss %xmm11, %xmm11
mulss %xmm11, %xmm10
movss 0x13beafe(%rip), %xmm15 # 0x1ef0ff8
movaps %xmm15, %xmm7
subss %xmm10, %xmm7
mulss %xmm11, %xmm7
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm13, %xmm7
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
movaps %xmm9, %xmm8
mulps %xmm12, %xmm8
mulps %xmm12, %xmm7
movaps %xmm6, %xmm9
mulps %xmm6, %xmm6
movaps %xmm6, %xmm11
shufps $0x55, %xmm6, %xmm11 # xmm11 = xmm11[1,1],xmm6[1,1]
movaps %xmm6, %xmm10
unpckhpd %xmm6, %xmm10 # xmm10 = xmm10[1],xmm6[1]
addps %xmm11, %xmm10
addps %xmm6, %xmm10
xorps %xmm6, %xmm6
movss %xmm10, %xmm6 # xmm6 = xmm10[0],xmm6[1,2,3]
movaps %xmm6, %xmm12
rsqrtss %xmm6, %xmm12
movaps %xmm10, %xmm13
mulss %xmm14, %xmm13
mulss %xmm12, %xmm13
movaps %xmm12, %xmm11
mulss %xmm12, %xmm12
mulss %xmm13, %xmm12
mulss %xmm0, %xmm11
subss %xmm12, %xmm11
shufps $0xc9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
shufps $0xc9, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
movaps %xmm9, %xmm12
mulps %xmm5, %xmm12
movaps %xmm10, %xmm13
shufps $0x0, %xmm10, %xmm13 # xmm13 = xmm13[0,0],xmm10[0,0]
mulps %xmm5, %xmm13
movaps %xmm12, %xmm5
shufps $0x55, %xmm12, %xmm5 # xmm5 = xmm5[1,1],xmm12[1,1]
addss %xmm12, %xmm5
movhlps %xmm12, %xmm12 # xmm12 = xmm12[1,1]
addss %xmm5, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm9, %xmm12
subps %xmm12, %xmm13
rcpss %xmm6, %xmm6
mulss %xmm6, %xmm10
movaps %xmm15, %xmm12
subss %xmm10, %xmm12
mulss %xmm6, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm13, %xmm12
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
movaps %xmm9, %xmm10
mulps %xmm11, %xmm10
mulps %xmm11, %xmm12
movaps %xmm3, %xmm5
shufps $0xff, %xmm3, %xmm5 # xmm5 = xmm5[3,3],xmm3[3,3]
movaps %xmm5, %xmm6
mulps %xmm8, %xmm6
movaps %xmm4, %xmm9
shufps $0xff, %xmm4, %xmm9 # xmm9 = xmm9[3,3],xmm4[3,3]
mulps %xmm8, %xmm9
mulps %xmm7, %xmm5
addps %xmm9, %xmm5
movaps %xmm3, %xmm7
subps %xmm6, %xmm7
addps %xmm3, %xmm6
movaps %xmm4, %xmm8
subps %xmm5, %xmm8
addps %xmm4, %xmm5
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm9
shufps $0xff, %xmm0, %xmm9 # xmm9 = xmm9[3,3],xmm0[3,3]
movaps %xmm9, %xmm3
mulps %xmm10, %xmm3
movaps %xmm1, %xmm4
shufps $0xff, %xmm1, %xmm4 # xmm4 = xmm4[3,3],xmm1[3,3]
mulps %xmm10, %xmm4
mulps %xmm12, %xmm9
addps %xmm4, %xmm9
movaps %xmm0, %xmm4
subps %xmm3, %xmm4
addps %xmm0, %xmm3
movaps %xmm1, %xmm10
subps %xmm9, %xmm10
addps %xmm1, %xmm9
movaps 0x13be919(%rip), %xmm0 # 0x1ef0f80
mulps %xmm0, %xmm10
movaps %xmm4, %xmm2
subps %xmm10, %xmm2
mulps %xmm0, %xmm9
movaps %xmm3, %xmm1
subps %xmm9, %xmm1
movaps 0xa0(%rsp), %xmm9
mulps %xmm0, %xmm9
movaps 0xb0(%rsp), %xmm11
addps %xmm11, %xmm9
movaps %xmm9, %xmm12
mulps %xmm0, %xmm8
addps %xmm7, %xmm8
movss 0x13ba068(%rip), %xmm9 # 0x1eec714
movaps 0x30(%rsp), %xmm10
subss %xmm10, %xmm9
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulps %xmm10, %xmm7
mulps %xmm9, %xmm11
addps %xmm7, %xmm11
movaps %xmm11, 0xb0(%rsp)
mulps %xmm10, %xmm8
mulps %xmm9, %xmm12
addps %xmm8, %xmm12
movaps %xmm12, 0xa0(%rsp)
movaps 0xd0(%rsp), %xmm7
mulps %xmm0, %xmm7
movaps 0xc0(%rsp), %xmm8
addps %xmm8, %xmm7
movaps %xmm7, %xmm13
mulps %xmm0, %xmm5
addps %xmm6, %xmm5
mulps %xmm10, %xmm2
mulps %xmm10, %xmm4
movaps 0x90(%rsp), %xmm7
mulps %xmm9, %xmm7
addps %xmm2, %xmm7
movaps %xmm7, %xmm15
movaps %xmm7, 0x90(%rsp)
movaps 0x70(%rsp), %xmm0
mulps %xmm9, %xmm0
addps %xmm4, %xmm0
movaps %xmm0, 0x70(%rsp)
mulps %xmm10, %xmm6
mulps %xmm10, %xmm5
mulps %xmm10, %xmm1
mulps %xmm3, %xmm10
movaps %xmm8, %xmm2
mulps %xmm9, %xmm2
addps %xmm6, %xmm2
movaps %xmm2, %xmm6
movaps %xmm2, 0xc0(%rsp)
mulps %xmm9, %xmm13
addps %xmm5, %xmm13
movaps %xmm13, 0xd0(%rsp)
movaps 0x60(%rsp), %xmm0
mulps %xmm9, %xmm0
addps %xmm1, %xmm0
movaps %xmm0, 0x60(%rsp)
mulps 0x10(%rsp), %xmm9
addps %xmm10, %xmm9
movaps (%rsi), %xmm2
movaps %xmm11, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm14
shufps $0x0, %xmm4, %xmm14 # xmm14 = xmm14[0,0],xmm4[0,0]
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
movaps 0x10(%r12), %xmm0
movaps 0x20(%r12), %xmm1
movaps 0x30(%r12), %xmm3
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
mulps %xmm0, %xmm14
addps %xmm5, %xmm14
movaps %xmm12, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm7
shufps $0x0, %xmm4, %xmm7 # xmm7 = xmm7[0,0],xmm4[0,0]
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
mulps %xmm0, %xmm7
addps %xmm5, %xmm7
movaps %xmm15, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps %xmm4, %xmm8
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps %xmm0, %xmm8
addps %xmm5, %xmm8
movaps 0x70(%rsp), %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps %xmm4, %xmm10
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
shufps $0x0, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
mulps %xmm0, %xmm10
addps %xmm5, %xmm10
movaps %xmm6, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps %xmm4, %xmm11
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
shufps $0x0, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
mulps %xmm0, %xmm11
addps %xmm5, %xmm11
movaps %xmm13, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps %xmm4, %xmm12
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm0, %xmm12
addps %xmm5, %xmm12
movaps 0x60(%rsp), %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
shufps $0x55, %xmm4, %xmm5 # xmm5 = xmm5[1,1],xmm4[1,1]
movaps %xmm4, %xmm13
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
mulps %xmm0, %xmm13
addps %xmm5, %xmm13
movaps %xmm9, %xmm4
subps %xmm2, %xmm4
movaps %xmm4, %xmm5
movaps %xmm4, %xmm2
shufps $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
mulps %xmm3, %xmm4
shufps $0x55, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
mulps %xmm1, %xmm2
addps %xmm4, %xmm2
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm0, %xmm5
addps %xmm2, %xmm5
movaps %xmm14, %xmm15
movlhps %xmm11, %xmm15 # xmm15 = xmm15[0],xmm11[0]
movaps %xmm7, %xmm3
movlhps %xmm12, %xmm3 # xmm3 = xmm3[0],xmm12[0]
movaps %xmm8, %xmm4
movlhps %xmm13, %xmm4 # xmm4 = xmm4[0],xmm13[0]
movaps %xmm10, %xmm6
movlhps %xmm5, %xmm6 # xmm6 = xmm6[0],xmm5[0]
movaps %xmm15, %xmm0
minps %xmm3, %xmm0
movaps %xmm4, %xmm1
minps %xmm6, %xmm1
minps %xmm1, %xmm0
movaps %xmm15, %xmm1
maxps %xmm3, %xmm1
movaps %xmm4, %xmm2
maxps %xmm6, %xmm2
maxps %xmm2, %xmm1
movaps %xmm0, %xmm2
unpckhpd %xmm0, %xmm2 # xmm2 = xmm2[1],xmm0[1]
minps %xmm2, %xmm0
movaps %xmm1, %xmm2
unpckhpd %xmm1, %xmm2 # xmm2 = xmm2[1],xmm1[1]
maxps %xmm2, %xmm1
movaps 0x13b9d66(%rip), %xmm2 # 0x1eec6c0
andps %xmm2, %xmm0
andps %xmm2, %xmm1
movsd 0x13b9d88(%rip), %xmm2 # 0x1eec6f0
maxps %xmm1, %xmm0
movaps %xmm0, %xmm1
shufps $0x55, %xmm0, %xmm1 # xmm1 = xmm1[1,1],xmm0[1,1]
maxss %xmm0, %xmm1
leaq 0xf(%rbx), %r11
mulss 0x13bf536(%rip), %xmm1 # 0x1ef1eb8
movlhps %xmm14, %xmm14 # xmm14 = xmm14[0,0]
movaps %xmm14, 0x220(%rsp)
movlhps %xmm7, %xmm7 # xmm7 = xmm7[0,0]
movaps %xmm7, 0x210(%rsp)
movlhps %xmm8, %xmm8 # xmm8 = xmm8[0,0]
movaps %xmm8, 0x200(%rsp)
movlhps %xmm10, %xmm10 # xmm10 = xmm10[0,0]
movaps %xmm10, 0x1f0(%rsp)
movlhps %xmm11, %xmm11 # xmm11 = xmm11[0,0]
movaps %xmm11, 0x1e0(%rsp)
movlhps %xmm12, %xmm12 # xmm12 = xmm12[0,0]
movaps %xmm12, 0x1d0(%rsp)
movlhps %xmm13, %xmm13 # xmm13 = xmm13[0,0]
movaps %xmm13, 0x1c0(%rsp)
movlhps %xmm5, %xmm5 # xmm5 = xmm5[0,0]
movaps %xmm5, 0x1b0(%rsp)
movaps %xmm1, 0x30(%rsp)
shufps $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
movaps %xmm1, 0x2e0(%rsp)
xorps 0x13b9cd2(%rip), %xmm1 # 0x1eec6d0
movaps %xmm1, 0x2d0(%rsp)
movaps %xmm3, %xmm0
subps %xmm15, %xmm0
movaps %xmm0, 0x3a0(%rsp)
movaps %xmm4, %xmm0
subps %xmm3, %xmm0
movaps %xmm0, 0x390(%rsp)
movaps %xmm6, %xmm0
subps %xmm4, %xmm0
movaps %xmm0, 0x380(%rsp)
movaps 0xc0(%rsp), %xmm0
subps 0xb0(%rsp), %xmm0
movaps %xmm0, 0x2b0(%rsp)
movaps 0xd0(%rsp), %xmm0
subps 0xa0(%rsp), %xmm0
movaps %xmm0, 0x2a0(%rsp)
movaps 0x60(%rsp), %xmm0
subps 0x90(%rsp), %xmm0
movaps %xmm0, 0x290(%rsp)
movaps %xmm9, 0x170(%rsp)
subps 0x70(%rsp), %xmm9
movaps %xmm9, 0x280(%rsp)
xorl %r14d, %r14d
movaps %xmm2, 0x10(%rsp)
movaps %xmm2, 0x20(%rsp)
movaps %xmm15, 0x120(%rsp)
movaps %xmm3, 0x110(%rsp)
movaps %xmm4, 0x100(%rsp)
movaps %xmm6, 0x370(%rsp)
movaps 0x10(%rsp), %xmm0
unpcklps %xmm0, %xmm0 # xmm0 = xmm0[0,0,1,1]
movaps 0x13b9f44(%rip), %xmm15 # 0x1eeca10
movaps %xmm15, %xmm14
subps %xmm0, %xmm14
movaps 0x1e0(%rsp), %xmm1
mulps %xmm0, %xmm1
movaps 0x1d0(%rsp), %xmm2
mulps %xmm0, %xmm2
movaps 0x1c0(%rsp), %xmm3
mulps %xmm0, %xmm3
mulps 0x1b0(%rsp), %xmm0
movaps 0x220(%rsp), %xmm8
mulps %xmm14, %xmm8
addps %xmm1, %xmm8
movaps 0x210(%rsp), %xmm10
mulps %xmm14, %xmm10
addps %xmm2, %xmm10
movaps 0x200(%rsp), %xmm11
mulps %xmm14, %xmm11
addps %xmm3, %xmm11
mulps 0x1f0(%rsp), %xmm14
addps %xmm0, %xmm14
movaps 0x20(%rsp), %xmm6
movaps %xmm6, %xmm12
shufps $0x55, %xmm6, %xmm12 # xmm12 = xmm12[1,1],xmm6[1,1]
movaps %xmm12, %xmm0
subss %xmm6, %xmm0
mulss 0x13bf355(%rip), %xmm0 # 0x1ef1eb0
movaps %xmm0, %xmm13
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
subps %xmm6, %xmm12
movaps %xmm8, %xmm2
shufps $0x0, %xmm8, %xmm2 # xmm2 = xmm2[0,0],xmm8[0,0]
movaps %xmm8, %xmm4
shufps $0x55, %xmm8, %xmm4 # xmm4 = xmm4[1,1],xmm8[1,1]
movaps %xmm10, %xmm3
shufps $0x0, %xmm10, %xmm3 # xmm3 = xmm3[0,0],xmm10[0,0]
movaps %xmm10, %xmm9
shufps $0x55, %xmm10, %xmm9 # xmm9 = xmm9[1,1],xmm10[1,1]
movaps %xmm11, %xmm5
shufps $0x0, %xmm11, %xmm5 # xmm5 = xmm5[0,0],xmm11[0,0]
movaps %xmm11, %xmm1
shufps $0x55, %xmm11, %xmm1 # xmm1 = xmm1[1,1],xmm11[1,1]
movaps %xmm14, %xmm7
shufps $0x0, %xmm14, %xmm7 # xmm7 = xmm7[0,0],xmm14[0,0]
movaps %xmm14, %xmm0
shufps $0x55, %xmm14, %xmm0 # xmm0 = xmm0[1,1],xmm14[1,1]
shufps $0x0, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
movaps %xmm13, 0xf0(%rsp)
mulps 0x13bf1eb(%rip), %xmm12 # 0x1ef1db0
addps %xmm6, %xmm12
movaps %xmm15, %xmm13
subps %xmm12, %xmm13
movaps %xmm3, %xmm15
mulps %xmm12, %xmm15
mulps %xmm13, %xmm2
addps %xmm15, %xmm2
movaps %xmm9, %xmm15
mulps %xmm12, %xmm15
mulps %xmm13, %xmm4
addps %xmm15, %xmm4
movaps %xmm5, %xmm15
mulps %xmm12, %xmm15
mulps %xmm13, %xmm3
addps %xmm15, %xmm3
movaps %xmm1, %xmm15
mulps %xmm12, %xmm15
mulps %xmm13, %xmm9
addps %xmm15, %xmm9
mulps %xmm12, %xmm7
mulps %xmm12, %xmm0
mulps %xmm13, %xmm5
addps %xmm7, %xmm5
mulps %xmm13, %xmm1
addps %xmm0, %xmm1
movaps %xmm12, %xmm0
mulps %xmm3, %xmm0
movaps %xmm12, %xmm7
mulps %xmm9, %xmm7
mulps %xmm13, %xmm2
addps %xmm0, %xmm2
mulps %xmm13, %xmm4
addps %xmm7, %xmm4
mulps %xmm12, %xmm5
mulps %xmm12, %xmm1
mulps %xmm13, %xmm3
addps %xmm5, %xmm3
mulps %xmm13, %xmm9
addps %xmm1, %xmm9
movaps %xmm12, %xmm0
mulps %xmm3, %xmm0
movaps %xmm12, %xmm5
mulps %xmm9, %xmm5
movaps %xmm13, %xmm1
mulps %xmm2, %xmm1
addps %xmm0, %xmm1
movaps %xmm13, %xmm6
mulps %xmm4, %xmm6
addps %xmm5, %xmm6
subps %xmm2, %xmm3
subps %xmm4, %xmm9
movaps 0x13be2d4(%rip), %xmm4 # 0x1ef0f60
mulps %xmm4, %xmm3
mulps %xmm4, %xmm9
movaps 0xf0(%rsp), %xmm7
mulps %xmm7, %xmm3
mulps %xmm7, %xmm9
xorps %xmm0, %xmm0
shufps $0x30, %xmm1, %xmm0 # xmm0 = xmm0[0,0],xmm1[3,0]
movaps %xmm1, %xmm2
movaps %xmm1, 0x180(%rsp)
shufps $0x29, %xmm0, %xmm2 # xmm2 = xmm2[1,2],xmm0[2,0]
movaps %xmm2, %xmm4
xorps %xmm0, %xmm0
shufps $0x30, %xmm6, %xmm0 # xmm0 = xmm0[0,0],xmm6[3,0]
movaps %xmm6, %xmm2
movaps %xmm6, %xmm15
movaps %xmm6, 0x190(%rsp)
shufps $0x29, %xmm0, %xmm2 # xmm2 = xmm2[1,2],xmm0[2,0]
movaps %xmm2, %xmm5
xorps %xmm0, %xmm0
shufps $0x30, %xmm3, %xmm0 # xmm0 = xmm0[0,0],xmm3[3,0]
xorps %xmm2, %xmm2
shufps $0x30, %xmm9, %xmm2 # xmm2 = xmm2[0,0],xmm9[3,0]
movaps %xmm1, %xmm6
addps %xmm3, %xmm6
movaps %xmm6, 0x80(%rsp)
shufps $0x29, %xmm0, %xmm3 # xmm3 = xmm3[1,2],xmm0[2,0]
movaps %xmm15, %xmm0
addps %xmm9, %xmm0
movaps %xmm0, (%rsp)
shufps $0x29, %xmm2, %xmm9 # xmm9 = xmm9[1,2],xmm2[2,0]
movaps %xmm4, %xmm0
movaps %xmm4, %xmm2
subps %xmm3, %xmm0
movaps %xmm0, 0x1a0(%rsp)
movaps %xmm5, %xmm3
movaps %xmm5, %xmm4
subps %xmm9, %xmm3
movaps %xmm8, %xmm9
shufps $0xaa, %xmm8, %xmm9 # xmm9 = xmm9[2,2],xmm8[2,2]
movaps %xmm10, %xmm5
shufps $0xaa, %xmm10, %xmm5 # xmm5 = xmm5[2,2],xmm10[2,2]
movaps %xmm5, %xmm0
mulps %xmm12, %xmm0
mulps %xmm13, %xmm9
addps %xmm0, %xmm9
shufps $0xff, %xmm8, %xmm8 # xmm8 = xmm8[3,3,3,3]
shufps $0xff, %xmm10, %xmm10 # xmm10 = xmm10[3,3,3,3]
movaps %xmm10, %xmm0
mulps %xmm12, %xmm0
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
movaps %xmm11, %xmm15
shufps $0xaa, %xmm11, %xmm15 # xmm15 = xmm15[2,2],xmm11[2,2]
movaps %xmm15, %xmm0
mulps %xmm12, %xmm0
mulps %xmm13, %xmm5
addps %xmm0, %xmm5
shufps $0xff, %xmm11, %xmm11 # xmm11 = xmm11[3,3,3,3]
movaps %xmm11, %xmm0
mulps %xmm12, %xmm0
mulps %xmm13, %xmm10
addps %xmm0, %xmm10
movaps %xmm14, %xmm0
shufps $0xaa, %xmm14, %xmm0 # xmm0 = xmm0[2,2],xmm14[2,2]
mulps %xmm12, %xmm0
mulps %xmm13, %xmm15
addps %xmm0, %xmm15
shufps $0xff, %xmm14, %xmm14 # xmm14 = xmm14[3,3,3,3]
mulps %xmm12, %xmm14
mulps %xmm13, %xmm11
addps %xmm14, %xmm11
movaps %xmm12, %xmm0
mulps %xmm5, %xmm0
mulps %xmm13, %xmm9
addps %xmm0, %xmm9
movaps %xmm12, %xmm0
mulps %xmm10, %xmm0
mulps %xmm13, %xmm8
addps %xmm0, %xmm8
mulps %xmm12, %xmm15
mulps %xmm13, %xmm5
addps %xmm15, %xmm5
mulps %xmm12, %xmm11
mulps %xmm13, %xmm10
addps %xmm11, %xmm10
movaps %xmm12, %xmm0
mulps %xmm5, %xmm0
movaps %xmm13, %xmm6
mulps %xmm9, %xmm6
addps %xmm0, %xmm6
mulps %xmm10, %xmm12
mulps %xmm8, %xmm13
addps %xmm12, %xmm13
subps %xmm9, %xmm5
subps %xmm8, %xmm10
movaps 0x13be149(%rip), %xmm0 # 0x1ef0f60
mulps %xmm0, %xmm5
mulps %xmm0, %xmm10
mulps %xmm7, %xmm5
mulps %xmm7, %xmm10
xorps %xmm0, %xmm0
shufps $0x30, %xmm6, %xmm0 # xmm0 = xmm0[0,0],xmm6[3,0]
movaps %xmm6, %xmm1
shufps $0x29, %xmm0, %xmm1 # xmm1 = xmm1[1,2],xmm0[2,0]
xorps %xmm0, %xmm0
shufps $0x30, %xmm13, %xmm0 # xmm0 = xmm0[0,0],xmm13[3,0]
movaps %xmm13, %xmm12
shufps $0x29, %xmm0, %xmm12 # xmm12 = xmm12[1,2],xmm0[2,0]
xorps %xmm0, %xmm0
shufps $0x30, %xmm5, %xmm0 # xmm0 = xmm0[0,0],xmm5[3,0]
movaps %xmm6, %xmm7
addps %xmm5, %xmm7
movaps %xmm7, 0xf0(%rsp)
shufps $0x29, %xmm0, %xmm5 # xmm5 = xmm5[1,2],xmm0[2,0]
xorps %xmm0, %xmm0
shufps $0x30, %xmm10, %xmm0 # xmm0 = xmm0[0,0],xmm10[3,0]
movaps %xmm13, %xmm11
addps %xmm10, %xmm11
shufps $0x29, %xmm0, %xmm10 # xmm10 = xmm10[1,2],xmm0[2,0]
movaps %xmm1, %xmm7
subps %xmm5, %xmm7
movaps %xmm12, %xmm14
subps %xmm10, %xmm14
movaps %xmm6, %xmm0
movaps 0x180(%rsp), %xmm15
subps %xmm15, %xmm0
movaps %xmm1, %xmm10
movaps %xmm2, %xmm8
subps %xmm2, %xmm10
addps %xmm0, %xmm10
movaps %xmm13, %xmm0
movaps 0x190(%rsp), %xmm5
subps %xmm5, %xmm0
movaps %xmm12, %xmm9
subps %xmm4, %xmm9
addps %xmm0, %xmm9
movaps %xmm15, %xmm0
mulps %xmm10, %xmm5
mulps %xmm9, %xmm0
subps %xmm0, %xmm5
movaps (%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x80(%rsp), %xmm0
mulps %xmm9, %xmm0
subps %xmm0, %xmm2
movaps %xmm3, 0x350(%rsp)
mulps %xmm10, %xmm3
movaps %xmm9, %xmm0
mulps 0x1a0(%rsp), %xmm0
subps %xmm0, %xmm3
movaps %xmm10, %xmm15
movaps %xmm4, 0x360(%rsp)
mulps %xmm4, %xmm15
movaps %xmm9, %xmm0
movaps %xmm8, 0x340(%rsp)
mulps %xmm8, %xmm0
subps %xmm0, %xmm15
movaps %xmm13, %xmm0
mulps %xmm10, %xmm0
movaps %xmm6, %xmm4
mulps %xmm9, %xmm6
subps %xmm6, %xmm0
movaps %xmm11, 0x320(%rsp)
mulps %xmm10, %xmm11
movaps 0xf0(%rsp), %xmm8
mulps %xmm9, %xmm8
subps %xmm8, %xmm11
movaps %xmm14, 0x300(%rsp)
movaps %xmm14, %xmm8
mulps %xmm10, %xmm8
movaps %xmm9, %xmm14
movaps %xmm7, 0x2f0(%rsp)
mulps %xmm7, %xmm14
subps %xmm14, %xmm8
movaps %xmm12, 0x310(%rsp)
mulps %xmm12, %xmm10
movaps %xmm1, 0x330(%rsp)
mulps %xmm1, %xmm9
subps %xmm9, %xmm10
movaps %xmm5, %xmm14
minps %xmm2, %xmm14
maxps %xmm2, %xmm5
movaps %xmm3, %xmm2
minps %xmm15, %xmm2
minps %xmm2, %xmm14
maxps %xmm15, %xmm3
maxps %xmm3, %xmm5
movaps %xmm0, %xmm2
minps %xmm11, %xmm2
maxps %xmm11, %xmm0
movaps %xmm8, %xmm3
minps %xmm10, %xmm3
minps %xmm3, %xmm2
minps %xmm2, %xmm14
maxps %xmm10, %xmm8
maxps %xmm8, %xmm0
maxps %xmm0, %xmm5
movaps 0x2e0(%rsp), %xmm8
cmpleps %xmm8, %xmm14
movaps 0x2d0(%rsp), %xmm9
cmpnltps %xmm9, %xmm5
andps %xmm14, %xmm5
andps 0x2c0(%rsp), %xmm5
movmskps %xmm5, %ecx
movl $0x0, %eax
testl %ecx, %ecx
je 0xb33151
movaps 0x340(%rsp), %xmm11
movaps %xmm11, %xmm0
movaps 0x180(%rsp), %xmm1
subps %xmm1, %xmm0
movaps 0x330(%rsp), %xmm6
movaps %xmm6, %xmm10
subps %xmm4, %xmm10
addps %xmm0, %xmm10
movaps 0x360(%rsp), %xmm3
movaps %xmm3, %xmm2
movaps 0x190(%rsp), %xmm15
subps %xmm15, %xmm2
movaps 0x310(%rsp), %xmm12
movaps %xmm12, %xmm0
subps %xmm13, %xmm0
addps %xmm2, %xmm0
mulps %xmm10, %xmm15
mulps %xmm0, %xmm1
subps %xmm1, %xmm15
movaps (%rsp), %xmm2
mulps %xmm10, %xmm2
movaps 0x80(%rsp), %xmm7
mulps %xmm0, %xmm7
subps %xmm7, %xmm2
movaps 0x350(%rsp), %xmm7
mulps %xmm10, %xmm7
movaps 0x1a0(%rsp), %xmm14
mulps %xmm0, %xmm14
subps %xmm14, %xmm7
mulps %xmm10, %xmm3
mulps %xmm0, %xmm11
subps %xmm11, %xmm3
mulps %xmm10, %xmm13
mulps %xmm0, %xmm4
subps %xmm4, %xmm13
movaps 0x320(%rsp), %xmm4
mulps %xmm10, %xmm4
movaps 0xf0(%rsp), %xmm1
mulps %xmm0, %xmm1
subps %xmm1, %xmm4
movaps 0x300(%rsp), %xmm11
mulps %xmm10, %xmm11
movaps 0x2f0(%rsp), %xmm1
mulps %xmm0, %xmm1
subps %xmm1, %xmm11
mulps %xmm12, %xmm10
mulps %xmm6, %xmm0
subps %xmm0, %xmm10
movaps %xmm15, %xmm0
minps %xmm2, %xmm0
maxps %xmm2, %xmm15
movaps %xmm7, %xmm2
minps %xmm3, %xmm2
minps %xmm2, %xmm0
maxps %xmm3, %xmm7
maxps %xmm7, %xmm15
movaps %xmm13, %xmm2
minps %xmm4, %xmm2
maxps %xmm4, %xmm13
movaps %xmm11, %xmm3
minps %xmm10, %xmm3
minps %xmm3, %xmm2
minps %xmm2, %xmm0
maxps %xmm10, %xmm11
maxps %xmm11, %xmm13
maxps %xmm13, %xmm15
cmpleps %xmm8, %xmm0
cmpnltps %xmm9, %xmm15
andps %xmm0, %xmm15
andps %xmm5, %xmm15
movmskps %xmm15, %eax
testl %eax, %eax
je 0xb3317c
movl %r14d, %ecx
movl %eax, 0x230(%rsp,%rcx,4)
movaps 0x20(%rsp), %xmm0
movlps %xmm0, 0x3b0(%rsp,%rcx,8)
movaps 0x10(%rsp), %xmm0
movlps %xmm0, 0x3d0(%rsp,%rcx,8)
incl %r14d
testl %r14d, %r14d
je 0xb34295
leal -0x1(%r14), %eax
movl 0x230(%rsp,%rax,4), %edx
movsd 0x3d0(%rsp,%rax,8), %xmm3
bsfq %rdx, %rcx
leal -0x1(%rdx), %edi
andl %edx, %edi
movl %edi, 0x230(%rsp,%rax,4)
cmovel %eax, %r14d
testq %rcx, %rcx
js 0xb331bc
xorps %xmm0, %xmm0
cvtsi2ss %rcx, %xmm0
jmp 0xb331d6
movq %rcx, %rdx
shrq %rdx
movl %ecx, %edi
andl $0x1, %edi
orq %rdx, %rdi
xorps %xmm0, %xmm0
cvtsi2ss %rdi, %xmm0
addss %xmm0, %xmm0
incq %rcx
js 0xb331e5
xorps %xmm1, %xmm1
cvtsi2ss %rcx, %xmm1
jmp 0xb331fd
movq %rcx, %rdx
shrq %rdx
andl $0x1, %ecx
orq %rdx, %rcx
xorps %xmm1, %xmm1
cvtsi2ss %rcx, %xmm1
addss %xmm1, %xmm1
unpcklps %xmm1, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
mulps 0x13bec39(%rip), %xmm0 # 0x1ef1e40
movaps 0x13b9822(%rip), %xmm1 # 0x1eeca30
subps %xmm0, %xmm1
movss 0x3b0(%rsp,%rax,8), %xmm9
movss 0x3b4(%rsp,%rax,8), %xmm2
shufps $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
mulps %xmm0, %xmm2
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulps %xmm1, %xmm9
addps %xmm2, %xmm9
movaps %xmm9, %xmm2
shufps $0x55, %xmm9, %xmm2 # xmm2 = xmm2[1,1],xmm9[1,1]
movaps %xmm2, %xmm1
subss %xmm9, %xmm1
movss 0x13bddaf(%rip), %xmm0 # 0x1ef1000
ucomiss %xmm1, %xmm0
movaps %xmm3, 0x10(%rsp)
movaps %xmm9, 0x20(%rsp)
jbe 0xb32abc
movaps %xmm3, %xmm0
unpcklps %xmm3, %xmm0 # xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
movaps 0x13b979e(%rip), %xmm4 # 0x1eeca10
subps %xmm0, %xmm4
movaps 0x1e0(%rsp), %xmm3
mulps %xmm0, %xmm3
movaps 0x1d0(%rsp), %xmm7
mulps %xmm0, %xmm7
movaps 0x1c0(%rsp), %xmm8
mulps %xmm0, %xmm8
mulps 0x1b0(%rsp), %xmm0
movaps 0x220(%rsp), %xmm5
mulps %xmm4, %xmm5
addps %xmm3, %xmm5
movaps 0x210(%rsp), %xmm6
mulps %xmm4, %xmm6
addps %xmm7, %xmm6
movaps 0x200(%rsp), %xmm7
mulps %xmm4, %xmm7
addps %xmm8, %xmm7
mulps 0x1f0(%rsp), %xmm4
addps %xmm0, %xmm4
movaps %xmm9, %xmm0
shufps $0x0, %xmm9, %xmm0 # xmm0 = xmm0[0,0],xmm9[0,0]
subps %xmm7, %xmm4
movaps %xmm0, %xmm3
mulps %xmm4, %xmm3
addps %xmm7, %xmm3
mulps %xmm2, %xmm4
addps %xmm7, %xmm4
subps %xmm6, %xmm7
movaps %xmm0, %xmm8
mulps %xmm7, %xmm8
addps %xmm6, %xmm8
mulps %xmm2, %xmm7
addps %xmm6, %xmm7
subps %xmm5, %xmm6
movaps %xmm0, %xmm9
mulps %xmm6, %xmm9
addps %xmm5, %xmm9
subps %xmm8, %xmm3
mulps %xmm0, %xmm3
addps %xmm8, %xmm3
subps %xmm9, %xmm8
mulps %xmm0, %xmm8
addps %xmm9, %xmm8
subps %xmm8, %xmm3
mulps %xmm3, %xmm0
addps %xmm8, %xmm0
movaps 0x13bdc21(%rip), %xmm8 # 0x1ef0f60
mulps %xmm8, %xmm3
mulps %xmm2, %xmm6
addps %xmm5, %xmm6
subps %xmm7, %xmm4
mulps %xmm2, %xmm4
addps %xmm7, %xmm4
subps %xmm6, %xmm7
mulps %xmm2, %xmm7
addps %xmm6, %xmm7
subps %xmm7, %xmm4
mulps %xmm4, %xmm2
addps %xmm7, %xmm2
mulps %xmm8, %xmm4
movaps %xmm1, %xmm5
mulss 0x13beb49(%rip), %xmm5 # 0x1ef1ebc
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
mulps %xmm5, %xmm3
addps %xmm0, %xmm3
mulps %xmm4, %xmm5
movaps %xmm2, %xmm4
subps %xmm5, %xmm4
movaps %xmm0, %xmm14
unpckhpd %xmm0, %xmm14 # xmm14 = xmm14[1],xmm0[1]
movaps %xmm2, %xmm6
unpckhpd %xmm2, %xmm6 # xmm6 = xmm6[1],xmm2[1]
movaps %xmm14, %xmm5
subps %xmm0, %xmm5
movaps %xmm6, %xmm8
subps %xmm2, %xmm8
addps %xmm5, %xmm8
movaps %xmm0, %xmm5
shufps $0xf5, %xmm0, %xmm5 # xmm5 = xmm5[1,1],xmm0[3,3]
movaps %xmm3, %xmm9
shufps $0xf5, %xmm3, %xmm9 # xmm9 = xmm9[1,1],xmm3[3,3]
movaps %xmm4, %xmm10
shufps $0xf5, %xmm4, %xmm10 # xmm10 = xmm10[1,1],xmm4[3,3]
movaps %xmm2, %xmm11
shufps $0xf5, %xmm2, %xmm11 # xmm11 = xmm11[1,1],xmm2[3,3]
movaps %xmm8, %xmm7
shufps $0x0, %xmm8, %xmm7 # xmm7 = xmm7[0,0],xmm8[0,0]
shufps $0x55, %xmm8, %xmm8 # xmm8 = xmm8[1,1,1,1]
mulps %xmm8, %xmm5
mulps %xmm8, %xmm9
mulps %xmm8, %xmm10
mulps %xmm11, %xmm8
movaps %xmm0, %xmm12
mulps %xmm7, %xmm12
addps %xmm5, %xmm12
movaps %xmm3, %xmm11
mulps %xmm7, %xmm11
addps %xmm9, %xmm11
movaps %xmm4, %xmm5
mulps %xmm7, %xmm5
addps %xmm10, %xmm5
mulps %xmm2, %xmm7
addps %xmm8, %xmm7
movaps %xmm11, %xmm8
movaps %xmm12, %xmm10
minss %xmm11, %xmm10
maxss %xmm12, %xmm11
movhlps %xmm12, %xmm12 # xmm12 = xmm12[1,1]
movhlps %xmm8, %xmm8 # xmm8 = xmm8[1,1]
movaps %xmm5, %xmm9
minss %xmm7, %xmm9
minss %xmm9, %xmm10
movaps %xmm7, %xmm9
maxss %xmm5, %xmm7
movhlps %xmm5, %xmm5 # xmm5 = xmm5[1,1]
movhlps %xmm9, %xmm9 # xmm9 = xmm9[1,1]
maxss %xmm11, %xmm7
movaps %xmm12, %xmm13
minss %xmm8, %xmm13
maxss %xmm12, %xmm8
movaps %xmm5, %xmm11
minss %xmm9, %xmm11
minss %xmm11, %xmm13
maxss %xmm5, %xmm9
maxss %xmm8, %xmm9
movss 0x13be5d0(%rip), %xmm5 # 0x1ef1a4c
ucomiss %xmm1, %xmm5
seta %cl
cmpl $0x4, %r14d
setae %al
movss 0x13bd547(%rip), %xmm5 # 0x1ef09d8
ucomiss %xmm10, %xmm5
jbe 0xb334a1
ucomiss 0x13bea21(%rip), %xmm9 # 0x1ef1ec0
ja 0xb334eb
movss 0x13bea16(%rip), %xmm8 # 0x1ef1ec0
ucomiss %xmm8, %xmm9
setbe %dl
movss 0x13bd51f(%rip), %xmm5 # 0x1ef09d8
ucomiss %xmm10, %xmm5
setbe %dil
ucomiss %xmm13, %xmm5
setbe %r10b
ucomiss %xmm8, %xmm7
setbe %r8b
movl %r8d, %r9d
orb %r10b, %r9b
cmpb $0x1, %r9b
jne 0xb334eb
orb %r8b, %dil
je 0xb334eb
orb %dl, %r10b
jne 0xb340f8
movaps %xmm14, 0x80(%rsp)
movaps %xmm6, (%rsp)
orb %cl, %al
xorps %xmm5, %xmm5
movaps %xmm10, %xmm15
cmpltss %xmm5, %xmm15
movaps %xmm15, %xmm8
movss 0x13bd4b9(%rip), %xmm6 # 0x1ef09cc
andps %xmm6, %xmm8
movss 0x13b91f4(%rip), %xmm11 # 0x1eec714
andnps %xmm11, %xmm15
orps %xmm8, %xmm15
movaps %xmm7, %xmm14
cmpltss %xmm5, %xmm14
movaps %xmm14, %xmm8
andps %xmm6, %xmm8
andnps %xmm11, %xmm14
orps %xmm8, %xmm14
ucomiss %xmm14, %xmm15
xorps %xmm8, %xmm8
jne 0xb33557
jp 0xb33557
movss 0x13b84c9(%rip), %xmm8 # 0x1eeba20
xorps %xmm12, %xmm12
jne 0xb33568
jp 0xb33568
movss 0x13b961c(%rip), %xmm12 # 0x1eecb84
movaps %xmm13, %xmm11
cmpltss %xmm5, %xmm11
movaps %xmm11, %xmm5
movss 0x13bd44e(%rip), %xmm6 # 0x1ef09cc
andps %xmm6, %xmm5
movss 0x13b918b(%rip), %xmm6 # 0x1eec714
andnps %xmm6, %xmm11
orps %xmm5, %xmm11
ucomiss %xmm11, %xmm15
jne 0xb33599
jnp 0xb335e6
ucomiss %xmm10, %xmm13
movss 0x13b916e(%rip), %xmm15 # 0x1eec714
jne 0xb335f5
jp 0xb335f5
xorps %xmm13, %xmm13
ucomiss %xmm13, %xmm10
movss 0x13b8466(%rip), %xmm5 # 0x1eeba20
jne 0xb335c9
movss 0x13b845c(%rip), %xmm5 # 0x1eeba20
jp 0xb335c9
xorps %xmm5, %xmm5
cmpeqss %xmm13, %xmm10
movd %xmm10, %ecx
andl $0x1, %ecx
leaq 0x13be8e6(%rip), %rdx # 0x1ef1ec4
movss (%rdx,%rcx,4), %xmm10
jmp 0xb33622
xorps %xmm13, %xmm13
movss 0x13b9121(%rip), %xmm15 # 0x1eec714
jmp 0xb33630
subss %xmm10, %xmm13
xorps 0x13b90ce(%rip), %xmm10 # 0x1eec6d0
divss %xmm13, %xmm10
movaps %xmm15, %xmm5
subss %xmm10, %xmm5
xorps %xmm13, %xmm13
mulss %xmm13, %xmm5
addss %xmm10, %xmm5
movaps %xmm5, %xmm10
minss %xmm5, %xmm8
maxss %xmm12, %xmm10
movaps %xmm10, %xmm12
xorl %ecx, %ecx
ucomiss %xmm9, %xmm13
seta %cl
leaq 0x13be88c(%rip), %rdx # 0x1ef1ecc
movss (%rdx,%rcx,4), %xmm10
ucomiss %xmm10, %xmm14
jne 0xb3364e
jnp 0xb33690
ucomiss %xmm7, %xmm9
jne 0xb3369f
jp 0xb3369f
xorps %xmm9, %xmm9
ucomiss %xmm9, %xmm7
movss 0x13b83ba(%rip), %xmm5 # 0x1eeba20
jne 0xb33675
movss 0x13b83b0(%rip), %xmm5 # 0x1eeba20
jp 0xb33675
xorps %xmm5, %xmm5
cmpeqss %xmm9, %xmm7
movd %xmm7, %ecx
andl $0x1, %ecx
leaq 0x13be83b(%rip), %rdx # 0x1ef1ec4
movss (%rdx,%rcx,4), %xmm7
jmp 0xb336c4
unpcklps %xmm12, %xmm8 # xmm8 = xmm8[0],xmm12[0],xmm8[1],xmm12[1]
movsd 0x13b9053(%rip), %xmm14 # 0x1eec6f0
jmp 0xb336db
subss %xmm7, %xmm9
xorps 0x13b9025(%rip), %xmm7 # 0x1eec6d0
divss %xmm9, %xmm7
movaps %xmm15, %xmm5
subss %xmm7, %xmm5
mulss %xmm13, %xmm5
addss %xmm7, %xmm5
movaps %xmm5, %xmm7
movsd 0x13b9023(%rip), %xmm14 # 0x1eec6f0
minss %xmm5, %xmm8
maxss %xmm12, %xmm7
unpcklps %xmm7, %xmm8 # xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
ucomiss %xmm10, %xmm11
jne 0xb336e3
jnp 0xb3370e
movaps %xmm8, %xmm5
cmpltps 0x13b9341(%rip), %xmm5 # 0x1eeca30
movaps %xmm6, %xmm7
movlhps %xmm8, %xmm7 # xmm7 = xmm7[0],xmm8[0]
shufps $0xe2, %xmm8, %xmm7 # xmm7 = xmm7[2,0],xmm8[2,3]
movss %xmm6, %xmm8 # xmm8 = xmm6[0],xmm8[1,2,3]
andps %xmm5, %xmm7
andnps %xmm8, %xmm5
orps %xmm7, %xmm5
movaps %xmm5, %xmm8
movaps %xmm8, %xmm9
cmpltps %xmm14, %xmm9
movaps %xmm8, %xmm5
movss %xmm13, %xmm5 # xmm5 = xmm13[0],xmm5[1,2,3]
movaps %xmm6, %xmm7
movlhps %xmm8, %xmm7 # xmm7 = xmm7[0],xmm8[0]
shufps $0xe2, %xmm8, %xmm7 # xmm7 = xmm7[2,0],xmm8[2,3]
andps %xmm9, %xmm5
andnps %xmm7, %xmm9
orps %xmm5, %xmm9
pshufd $0x55, %xmm9, %xmm5 # xmm5 = xmm9[1,1,1,1]
movb $0x1, %r13b
ucomiss %xmm5, %xmm9
ja 0xb340e5
addps 0x13be6fd(%rip), %xmm9 # 0x1ef1e50
movlhps %xmm0, %xmm0 # xmm0 = xmm0[0,0]
movaps %xmm3, %xmm7
movlhps %xmm3, %xmm7 # xmm7 = xmm7[0],xmm3[0]
movaps %xmm4, %xmm8
movlhps %xmm4, %xmm8 # xmm8 = xmm8[0],xmm4[0]
movlhps %xmm2, %xmm2 # xmm2 = xmm2[0,0]
movhlps %xmm3, %xmm3 # xmm3 = xmm3[1,1]
movhlps %xmm4, %xmm4 # xmm4 = xmm4[1,1]
movaps %xmm9, %xmm10
cmpltps %xmm14, %xmm10
movaps %xmm9, %xmm5
movss %xmm13, %xmm5 # xmm5 = xmm13[0],xmm5[1,2,3]
movaps %xmm15, %xmm11
movlhps %xmm9, %xmm11 # xmm11 = xmm11[0],xmm9[0]
shufps $0xe2, %xmm9, %xmm11 # xmm11 = xmm11[2,0],xmm9[2,3]
andps %xmm10, %xmm5
andnps %xmm11, %xmm10
orps %xmm5, %xmm10
pshufd $0x50, %xmm10, %xmm5 # xmm5 = xmm10[0,0,1,1]
movaps 0x13b926a(%rip), %xmm9 # 0x1eeca10
subps %xmm5, %xmm9
movaps 0x80(%rsp), %xmm12
mulps %xmm5, %xmm12
mulps %xmm5, %xmm3
mulps %xmm5, %xmm4
movaps (%rsp), %xmm11
mulps %xmm5, %xmm11
mulps %xmm9, %xmm0
addps %xmm12, %xmm0
mulps %xmm9, %xmm7
addps %xmm3, %xmm7
mulps %xmm9, %xmm8
addps %xmm4, %xmm8
mulps %xmm2, %xmm9
addps %xmm11, %xmm9
movaps 0x13b9244(%rip), %xmm2 # 0x1eeca30
subps %xmm10, %xmm2
movaps 0x10(%rsp), %xmm12
movaps %xmm12, %xmm3
shufps $0x55, %xmm12, %xmm3 # xmm3 = xmm3[1,1],xmm12[1,1]
mulps %xmm10, %xmm3
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
mulps %xmm2, %xmm12
addps %xmm3, %xmm12
movaps %xmm12, %xmm2
shufps $0x55, %xmm12, %xmm2 # xmm2 = xmm2[1,1],xmm12[1,1]
movaps %xmm15, %xmm4
divss %xmm1, %xmm4
movaps %xmm9, %xmm1
unpckhpd %xmm9, %xmm1 # xmm1 = xmm1[1],xmm9[1]
subps %xmm9, %xmm1
subps %xmm8, %xmm9
movaps %xmm8, %xmm3
unpckhpd %xmm8, %xmm3 # xmm3 = xmm3[1],xmm8[1]
subps %xmm8, %xmm3
movaps %xmm8, %xmm5
subps %xmm7, %xmm5
movaps %xmm7, %xmm10
unpckhpd %xmm7, %xmm10 # xmm10 = xmm10[1],xmm7[1]
subps %xmm7, %xmm10
subps %xmm0, %xmm7
movaps 0x13bd702(%rip), %xmm11 # 0x1ef0f60
mulps %xmm11, %xmm7
mulps %xmm11, %xmm5
mulps %xmm11, %xmm9
movaps %xmm5, %xmm6
minps %xmm9, %xmm6
maxps %xmm9, %xmm5
movaps %xmm7, %xmm8
minps %xmm6, %xmm8
maxps %xmm5, %xmm7
movaps %xmm8, %xmm5
unpckhpd %xmm8, %xmm5 # xmm5 = xmm5[1],xmm8[1]
movaps %xmm7, %xmm6
unpckhpd %xmm7, %xmm6 # xmm6 = xmm6[1],xmm7[1]
minps %xmm5, %xmm8
maxps %xmm6, %xmm7
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps %xmm4, %xmm8
mulps %xmm7, %xmm4
subss %xmm12, %xmm2
movaps %xmm15, %xmm7
divss %xmm2, %xmm7
movaps %xmm0, %xmm2
unpckhpd %xmm0, %xmm2 # xmm2 = xmm2[1],xmm0[1]
subps %xmm0, %xmm2
movaps %xmm2, %xmm6
minps %xmm10, %xmm6
maxps %xmm10, %xmm2
movaps %xmm3, %xmm0
minps %xmm1, %xmm0
minps %xmm0, %xmm6
maxps %xmm1, %xmm3
maxps %xmm3, %xmm2
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps %xmm7, %xmm6
mulps %xmm2, %xmm7
movaps 0x20(%rsp), %xmm0
movaps %xmm0, %xmm1
unpcklps %xmm12, %xmm1 # xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1]
movaps %xmm1, (%rsp)
movaps %xmm12, 0x10(%rsp)
movaps %xmm12, %xmm5
shufps $0x11, %xmm0, %xmm5 # xmm5 = xmm5[1,0],xmm0[1,0]
shufps $0xe2, %xmm0, %xmm5 # xmm5 = xmm5[2,0],xmm0[2,3]
movaps %xmm1, %xmm0
addps %xmm5, %xmm0
mulps 0x13be554(%rip), %xmm0 # 0x1ef1e60
movaps %xmm0, %xmm10
shufps $0x0, %xmm0, %xmm10 # xmm10 = xmm10[0,0],xmm0[0,0]
movaps 0x3a0(%rsp), %xmm1
mulps %xmm10, %xmm1
addps 0x120(%rsp), %xmm1
movaps 0x390(%rsp), %xmm2
mulps %xmm10, %xmm2
addps 0x110(%rsp), %xmm2
movaps 0x380(%rsp), %xmm14
mulps %xmm10, %xmm14
addps 0x100(%rsp), %xmm14
subps %xmm2, %xmm14
mulps %xmm10, %xmm14
addps %xmm2, %xmm14
subps %xmm1, %xmm2
mulps %xmm10, %xmm2
addps %xmm1, %xmm2
subps %xmm2, %xmm14
mulps %xmm14, %xmm10
addps %xmm2, %xmm10
mulps %xmm11, %xmm14
movaps %xmm10, %xmm9
movlhps %xmm10, %xmm9 # xmm9 = xmm9[0],xmm10[0]
movhlps %xmm10, %xmm10 # xmm10 = xmm10[1,1]
movaps %xmm0, %xmm11
shufps $0x55, %xmm0, %xmm11 # xmm11 = xmm11[1,1],xmm0[1,1]
subps %xmm9, %xmm10
movaps %xmm14, %xmm1
movlhps %xmm14, %xmm1 # xmm1 = xmm1[0],xmm14[0]
movhlps %xmm14, %xmm14 # xmm14 = xmm14[1,1]
subps %xmm1, %xmm14
mulps %xmm11, %xmm14
addps %xmm1, %xmm14
movaps %xmm10, %xmm1
movaps 0x13b8d1b(%rip), %xmm3 # 0x1eec6d0
xorps %xmm3, %xmm1
movaps %xmm10, %xmm12
shufps $0x55, %xmm10, %xmm12 # xmm12 = xmm12[1,1],xmm10[1,1]
movaps %xmm12, %xmm2
movlhps %xmm14, %xmm1 # xmm1 = xmm1[0],xmm14[0]
shufps $0x8, %xmm14, %xmm1 # xmm1 = xmm1[0,2],xmm14[0,0]
mulss %xmm14, %xmm12
shufps $0x55, %xmm14, %xmm14 # xmm14 = xmm14[1,1,1,1]
movaps %xmm14, %xmm15
mulss %xmm10, %xmm14
movaps %xmm10, %xmm13
mulps %xmm11, %xmm13
movaps (%rsp), %xmm10
subps %xmm0, %xmm10
movaps %xmm5, %xmm11
subps %xmm0, %xmm11
shufps $0x54, %xmm0, %xmm0 # xmm0 = xmm0[0,1,1,1]
addps %xmm9, %xmm13
xorps %xmm3, %xmm15
unpcklps %xmm15, %xmm2 # xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1]
shufps $0x4, %xmm15, %xmm2 # xmm2 = xmm2[0,1],xmm15[0,0]
subss %xmm14, %xmm12
shufps $0x0, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
divps %xmm12, %xmm2
divps %xmm12, %xmm1
movaps %xmm13, %xmm9
shufps $0x0, %xmm13, %xmm9 # xmm9 = xmm9[0,0],xmm13[0,0]
mulps %xmm2, %xmm9
shufps $0x55, %xmm13, %xmm13 # xmm13 = xmm13[1,1,1,1]
mulps %xmm1, %xmm13
addps %xmm9, %xmm13
subps %xmm13, %xmm0
movaps %xmm2, %xmm9
shufps $0x55, %xmm2, %xmm9 # xmm9 = xmm9[1,1],xmm2[1,1]
movaps %xmm8, %xmm12
unpcklps %xmm6, %xmm12 # xmm12 = xmm12[0],xmm6[0],xmm12[1],xmm6[1]
movaps %xmm9, %xmm14
mulps %xmm12, %xmm14
movaps %xmm4, %xmm13
unpcklps %xmm7, %xmm13 # xmm13 = xmm13[0],xmm7[0],xmm13[1],xmm7[1]
mulps %xmm13, %xmm9
movaps %xmm14, %xmm15
minps %xmm9, %xmm15
maxps %xmm14, %xmm9
movaps %xmm1, %xmm14
shufps $0x55, %xmm1, %xmm14 # xmm14 = xmm14[1,1],xmm1[1,1]
shufps $0x11, %xmm8, %xmm6 # xmm6 = xmm6[1,0],xmm8[1,0]
shufps $0xe2, %xmm8, %xmm6 # xmm6 = xmm6[2,0],xmm8[2,3]
movaps %xmm14, %xmm3
mulps %xmm6, %xmm3
shufps $0x11, %xmm4, %xmm7 # xmm7 = xmm7[1,0],xmm4[1,0]
shufps $0xe2, %xmm4, %xmm7 # xmm7 = xmm7[2,0],xmm4[2,3]
mulps %xmm7, %xmm14
movaps %xmm3, %xmm8
minps %xmm14, %xmm8
addps %xmm15, %xmm8
maxps %xmm3, %xmm14
addps %xmm9, %xmm14
movsd 0x13b8c3e(%rip), %xmm4 # 0x1eec6f0
movaps %xmm4, %xmm3
subps %xmm14, %xmm3
subps %xmm8, %xmm4
movaps %xmm10, %xmm9
mulps %xmm3, %xmm9
movaps %xmm2, %xmm8
shufps $0x0, %xmm2, %xmm8 # xmm8 = xmm8[0,0],xmm2[0,0]
mulps %xmm8, %xmm12
mulps %xmm13, %xmm8
movaps %xmm12, %xmm13
minps %xmm8, %xmm13
maxps %xmm12, %xmm8
movaps %xmm1, %xmm14
shufps $0x0, %xmm1, %xmm14 # xmm14 = xmm14[0,0],xmm1[0,0]
mulps %xmm14, %xmm6
mulps %xmm7, %xmm14
movaps %xmm6, %xmm15
minps %xmm14, %xmm15
addps %xmm13, %xmm15
movaps %xmm10, %xmm12
mulps %xmm4, %xmm12
mulps %xmm11, %xmm3
mulps %xmm11, %xmm4
maxps %xmm6, %xmm14
addps %xmm8, %xmm14
movss 0x13b8bf5(%rip), %xmm7 # 0x1eec714
movaps %xmm7, %xmm6
subps %xmm14, %xmm6
subps %xmm15, %xmm7
movaps %xmm10, %xmm8
mulps %xmm6, %xmm8
mulps %xmm7, %xmm10
mulps %xmm11, %xmm6
mulps %xmm11, %xmm7
movaps %xmm8, %xmm11
minps %xmm10, %xmm11
movaps %xmm6, %xmm13
minps %xmm7, %xmm13
minps %xmm13, %xmm11
maxps %xmm8, %xmm10
maxps %xmm6, %xmm7
maxps %xmm10, %xmm7
movaps %xmm9, %xmm6
minps %xmm12, %xmm6
movaps %xmm3, %xmm8
minps %xmm4, %xmm8
minps %xmm8, %xmm6
maxps %xmm9, %xmm12
maxps %xmm3, %xmm4
maxps %xmm12, %xmm4
movaps %xmm11, %xmm3
unpcklps %xmm6, %xmm3 # xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
shufps $0x11, %xmm11, %xmm6 # xmm6 = xmm6[1,0],xmm11[1,0]
shufps $0xe2, %xmm11, %xmm6 # xmm6 = xmm6[2,0],xmm11[2,3]
addps %xmm3, %xmm6
movaps %xmm7, %xmm3
unpcklps %xmm4, %xmm3 # xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
shufps $0x11, %xmm7, %xmm4 # xmm4 = xmm4[1,0],xmm7[1,0]
shufps $0xe2, %xmm7, %xmm4 # xmm4 = xmm4[2,0],xmm7[2,3]
addps %xmm3, %xmm4
addps %xmm0, %xmm6
addps %xmm0, %xmm4
movaps (%rsp), %xmm7
maxps %xmm6, %xmm7
movaps %xmm4, %xmm3
minps %xmm5, %xmm3
cmpltps %xmm7, %xmm3
unpcklps %xmm3, %xmm3 # xmm3 = xmm3[0,0,1,1]
movmskpd %xmm3, %ecx
testl %ecx, %ecx
jne 0xb340e5
ucomiss %xmm4, %xmm5
seta %dl
xorl %ecx, %ecx
ucomiss 0x20(%rsp), %xmm6
movaps 0x13b8ae4(%rip), %xmm8 # 0x1eec6c0
movaps 0x370(%rsp), %xmm12
jbe 0xb33c35
testb %dl, %dl
xorps %xmm9, %xmm9
movss 0x13b8b1e(%rip), %xmm13 # 0x1eec714
movss 0x13bd3ed(%rip), %xmm14 # 0x1ef0fec
movaps 0x120(%rsp), %xmm15
movaps 0x110(%rsp), %xmm10
movaps 0x100(%rsp), %xmm11
je 0xb33c66
shufps $0x55, %xmm6, %xmm6 # xmm6 = xmm6[1,1,1,1]
ucomiss 0x10(%rsp), %xmm6
seta %dl
cmpltps %xmm5, %xmm4
pextrw $0x2, %xmm4, %ecx
andb %dl, %cl
jmp 0xb33c66
xorps %xmm9, %xmm9
movss 0x13b8ad2(%rip), %xmm13 # 0x1eec714
movss 0x13bd3a1(%rip), %xmm14 # 0x1ef0fec
movaps 0x120(%rsp), %xmm15
movaps 0x110(%rsp), %xmm10
movaps 0x100(%rsp), %xmm11
orb %cl, %al
cmpb $0x1, %al
jne 0xb340f3
movl $0xc8, %eax
movaps %xmm13, %xmm4
subss %xmm0, %xmm4
movaps %xmm4, %xmm3
movaps %xmm0, %xmm5
mulss %xmm0, %xmm5
movaps %xmm0, %xmm6
mulss %xmm5, %xmm6
mulss %xmm14, %xmm5
mulss %xmm4, %xmm5
mulss %xmm4, %xmm4
mulss %xmm4, %xmm3
movaps %xmm0, %xmm7
mulss %xmm14, %xmm7
mulss %xmm4, %xmm7
shufps $0x0, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
shufps $0x0, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps %xmm12, %xmm6
mulps %xmm11, %xmm5
addps %xmm6, %xmm5
mulps %xmm10, %xmm7
addps %xmm5, %xmm7
mulps %xmm15, %xmm3
addps %xmm7, %xmm3
movaps %xmm3, %xmm4
movlhps %xmm3, %xmm4 # xmm4 = xmm4[0],xmm3[0]
movhlps %xmm3, %xmm3 # xmm3 = xmm3[1,1]
movaps %xmm0, %xmm5
shufps $0x55, %xmm0, %xmm5 # xmm5 = xmm5[1,1],xmm0[1,1]
subps %xmm4, %xmm3
mulps %xmm5, %xmm3
addps %xmm4, %xmm3
movaps %xmm3, %xmm4
shufps $0x0, %xmm3, %xmm4 # xmm4 = xmm4[0,0],xmm3[0,0]
mulps %xmm2, %xmm4
movaps %xmm3, %xmm5
shufps $0x55, %xmm3, %xmm5 # xmm5 = xmm5[1,1],xmm3[1,1]
mulps %xmm1, %xmm5
addps %xmm4, %xmm5
subps %xmm5, %xmm0
andps %xmm8, %xmm3
pshufd $0x55, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
maxss %xmm3, %xmm4
movaps 0x30(%rsp), %xmm3
ucomiss %xmm4, %xmm3
ja 0xb33d2c
decq %rax
jne 0xb33c75
jmp 0xb340e5
ucomiss %xmm9, %xmm0
jb 0xb340e5
ucomiss %xmm0, %xmm13
jb 0xb340e5
movaps %xmm0, %xmm2
shufps $0x55, %xmm0, %xmm2 # xmm2 = xmm2[1,1],xmm0[1,1]
ucomiss %xmm9, %xmm2
jb 0xb340e5
ucomiss %xmm2, %xmm13
jb 0xb340e5
movss 0x18(%r12), %xmm4
movss 0x28(%r12), %xmm1
unpcklps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
movss 0x38(%r12), %xmm1
movlhps %xmm1, %xmm4 # xmm4 = xmm4[0],xmm1[0]
movaps (%rsi), %xmm6
movaps 0xb0(%rsp), %xmm1
subps %xmm6, %xmm1
mulps %xmm4, %xmm1
movaps %xmm1, %xmm3
shufps $0x55, %xmm1, %xmm3 # xmm3 = xmm3[1,1],xmm1[1,1]
addss %xmm1, %xmm3
movhlps %xmm1, %xmm1 # xmm1 = xmm1[1,1]
addss %xmm3, %xmm1
movaps 0xa0(%rsp), %xmm3
subps %xmm6, %xmm3
mulps %xmm4, %xmm3
movaps %xmm3, %xmm5
shufps $0x55, %xmm3, %xmm5 # xmm5 = xmm5[1,1],xmm3[1,1]
addss %xmm3, %xmm5
movhlps %xmm3, %xmm3 # xmm3 = xmm3[1,1]
addss %xmm5, %xmm3
movaps 0x90(%rsp), %xmm5
subps %xmm6, %xmm5
mulps %xmm4, %xmm5
movaps %xmm5, %xmm7
shufps $0x55, %xmm5, %xmm7 # xmm7 = xmm7[1,1],xmm5[1,1]
addss %xmm5, %xmm7
movhlps %xmm5, %xmm5 # xmm5 = xmm5[1,1]
addss %xmm7, %xmm5
movaps 0x70(%rsp), %xmm7
subps %xmm6, %xmm7
mulps %xmm4, %xmm7
movaps %xmm7, %xmm8
shufps $0x55, %xmm7, %xmm8 # xmm8 = xmm8[1,1],xmm7[1,1]
addss %xmm7, %xmm8
movhlps %xmm7, %xmm7 # xmm7 = xmm7[1,1]
addss %xmm8, %xmm7
movaps 0xc0(%rsp), %xmm8
subps %xmm6, %xmm8
mulps %xmm4, %xmm8
movaps %xmm8, %xmm9
shufps $0x55, %xmm8, %xmm9 # xmm9 = xmm9[1,1],xmm8[1,1]
addss %xmm8, %xmm9
movhlps %xmm8, %xmm8 # xmm8 = xmm8[1,1]
addss %xmm9, %xmm8
movaps 0xd0(%rsp), %xmm9
subps %xmm6, %xmm9
mulps %xmm4, %xmm9
movaps %xmm9, %xmm10
shufps $0x55, %xmm9, %xmm10 # xmm10 = xmm10[1,1],xmm9[1,1]
addss %xmm9, %xmm10
movhlps %xmm9, %xmm9 # xmm9 = xmm9[1,1]
addss %xmm10, %xmm9
movaps 0x60(%rsp), %xmm11
subps %xmm6, %xmm11
mulps %xmm4, %xmm11
movaps %xmm11, %xmm10
shufps $0x55, %xmm11, %xmm10 # xmm10 = xmm10[1,1],xmm11[1,1]
addss %xmm11, %xmm10
movhlps %xmm11, %xmm11 # xmm11 = xmm11[1,1]
addss %xmm10, %xmm11
movaps 0x170(%rsp), %xmm12
subps %xmm6, %xmm12
mulps %xmm4, %xmm12
movaps %xmm12, %xmm4
shufps $0x55, %xmm12, %xmm4 # xmm4 = xmm4[1,1],xmm12[1,1]
addss %xmm12, %xmm4
movhlps %xmm12, %xmm12 # xmm12 = xmm12[1,1]
addss %xmm4, %xmm12
movaps %xmm13, %xmm10
subss %xmm2, %xmm10
mulss %xmm2, %xmm8
mulss %xmm2, %xmm9
mulss %xmm2, %xmm11
mulss %xmm2, %xmm12
mulss %xmm10, %xmm1
addss %xmm8, %xmm1
mulss %xmm10, %xmm3
addss %xmm9, %xmm3
mulss %xmm10, %xmm5
addss %xmm11, %xmm5
mulss %xmm7, %xmm10
addss %xmm12, %xmm10
movaps %xmm13, %xmm9
subss %xmm0, %xmm9
movaps %xmm9, %xmm7
mulss %xmm9, %xmm7
movaps %xmm9, %xmm4
mulss %xmm7, %xmm4
movaps %xmm0, %xmm6
mulss %xmm14, %xmm6
mulss %xmm7, %xmm6
movaps %xmm0, %xmm7
mulps %xmm0, %xmm7
movaps %xmm0, %xmm8
mulps %xmm7, %xmm8
mulss %xmm14, %xmm7
mulss %xmm9, %xmm7
mulss %xmm8, %xmm10
mulss %xmm7, %xmm5
addss %xmm10, %xmm5
mulss %xmm6, %xmm3
addss %xmm5, %xmm3
mulss %xmm4, %xmm1
addss %xmm3, %xmm1
ucomiss 0xc(%rsi), %xmm1
jb 0xb340e5
movss 0x20(%rsi), %xmm13
ucomiss %xmm1, %xmm13
jb 0xb340e5
movaps 0x13b8ab7(%rip), %xmm5 # 0x1eeca10
subps %xmm2, %xmm5
movaps 0xc0(%rsp), %xmm3
mulps %xmm2, %xmm3
movaps 0xd0(%rsp), %xmm10
mulps %xmm2, %xmm10
movaps 0x60(%rsp), %xmm11
mulps %xmm2, %xmm11
movaps 0xb0(%rsp), %xmm12
mulps %xmm5, %xmm12
addps %xmm3, %xmm12
movaps 0xa0(%rsp), %xmm3
mulps %xmm5, %xmm3
addps %xmm10, %xmm3
movaps 0x90(%rsp), %xmm10
mulps %xmm5, %xmm10
addps %xmm11, %xmm10
mulps 0x170(%rsp), %xmm2
mulps 0x70(%rsp), %xmm5
addps %xmm2, %xmm5
subps %xmm10, %xmm5
subps %xmm3, %xmm10
subps %xmm12, %xmm3
movaps %xmm0, %xmm2
shufps $0x0, %xmm0, %xmm2 # xmm2 = xmm2[0,0],xmm0[0,0]
movaps %xmm2, %xmm11
mulps %xmm10, %xmm11
shufps $0x0, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
mulps %xmm9, %xmm3
addps %xmm11, %xmm3
mulps %xmm2, %xmm5
mulps %xmm9, %xmm10
addps %xmm5, %xmm10
mulps %xmm2, %xmm10
mulps %xmm9, %xmm3
addps %xmm10, %xmm3
mulps 0x13bcf5b(%rip), %xmm3 # 0x1ef0f60
movq (%rbp), %rax
movq 0x1e8(%rax), %rax
movq %rbp, %rcx
movq 0x50(%rsp), %rdx
movq (%rax,%rdx,8), %rbp
movl 0x24(%rsi), %eax
testl %eax, 0x34(%rbp)
je 0xb340e2
movq %rbx, %r15
movq %r12, %rbx
shufps $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
mulps 0x280(%rsp), %xmm8
shufps $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
mulps 0x290(%rsp), %xmm7
addps %xmm8, %xmm7
shufps $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
mulps 0x2a0(%rsp), %xmm6
addps %xmm7, %xmm6
shufps $0x0, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
mulps 0x2b0(%rsp), %xmm4
addps %xmm6, %xmm4
movaps %xmm4, %xmm2
shufps $0xc9, %xmm4, %xmm2 # xmm2 = xmm2[1,2],xmm4[0,3]
movaps %xmm3, %xmm5
shufps $0xc9, %xmm3, %xmm5 # xmm5 = xmm5[1,2],xmm3[0,3]
mulps %xmm4, %xmm5
mulps %xmm3, %xmm2
subps %xmm5, %xmm2
movq %rcx, %r12
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0xb340fd
cmpq $0x0, 0x40(%rbp)
jne 0xb340fd
movss %xmm1, 0x20(%rsi)
movaps %xmm2, %xmm1
shufps $0xe9, %xmm2, %xmm1 # xmm1 = xmm1[1,2],xmm2[2,3]
movlps %xmm1, 0x30(%rsi)
movss %xmm2, 0x38(%rsi)
movlps %xmm0, 0x3c(%rsi)
movq 0xe8(%rsp), %rax
movl %eax, 0x44(%rsi)
movq 0x50(%rsp), %rax
movl %eax, 0x48(%rsi)
movq %r12, %rbp
movq 0x8(%r12), %rax
movl (%rax), %eax
movl %eax, 0x4c(%rsi)
movq 0x8(%r12), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%rsi)
movq %rbx, %r12
movq %r15, %rbx
jmp 0xb340e5
movq %rcx, %rbp
testb %r13b, %r13b
jne 0xb3317c
jmp 0xb32abc
xorl %r13d, %r13d
jmp 0xb340e5
movb $0x1, %r13b
jmp 0xb340e5
movq 0x8(%r12), %rax
movaps %xmm2, %xmm3
shufps $0xe9, %xmm2, %xmm3 # xmm3 = xmm3[1,2],xmm2[2,3]
movlps %xmm3, 0x240(%rsp)
movss %xmm2, 0x248(%rsp)
movlps %xmm0, 0x24c(%rsp)
movq 0xe8(%rsp), %rcx
movl %ecx, 0x254(%rsp)
movq 0x50(%rsp), %rcx
movl %ecx, 0x258(%rsp)
movl (%rax), %ecx
movl %ecx, 0x25c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x260(%rsp)
movss %xmm1, 0x20(%rsi)
movl $0xffffffff, 0x5c(%rsp) # imm = 0xFFFFFFFF
leaq 0x5c(%rsp), %rax
movq %rax, 0x140(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0x148(%rsp)
movq 0x8(%r12), %rax
movq %rax, 0x150(%rsp)
movq %rsi, 0x158(%rsp)
leaq 0x240(%rsp), %rax
movq %rax, 0x160(%rsp)
movl $0x1, 0x168(%rsp)
movq 0x40(%rbp), %rax
testq %rax, %rax
movss %xmm13, (%rsp)
je 0xb341eb
leaq 0x140(%rsp), %rdi
movq %r11, 0x80(%rsp)
callq *%rax
movss (%rsp), %xmm13
movq 0x80(%rsp), %r11
movq 0x48(%rsp), %rsi
movq 0x140(%rsp), %rax
cmpl $0x0, (%rax)
je 0xb34287
movq 0x10(%r12), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0xb3422c
testb $0x2, (%rcx)
jne 0xb34204
testb $0x40, 0x3e(%rbp)
je 0xb3421f
leaq 0x140(%rsp), %rdi
movq %r11, %rbp
callq *%rax
movss (%rsp), %xmm13
movq %rbp, %r11
movq 0x48(%rsp), %rsi
movq 0x140(%rsp), %rax
cmpl $0x0, (%rax)
je 0xb34287
movq 0x158(%rsp), %rax
movq 0x160(%rsp), %rcx
movss (%rcx), %xmm0
movss %xmm0, 0x30(%rax)
movss 0x4(%rcx), %xmm0
movss %xmm0, 0x34(%rax)
movss 0x8(%rcx), %xmm0
movss %xmm0, 0x38(%rax)
movss 0xc(%rcx), %xmm0
movss %xmm0, 0x3c(%rax)
movss 0x10(%rcx), %xmm0
movss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0xb3428d
movss %xmm13, 0x20(%rsi)
movq %r12, %rbp
jmp 0xb340da
movaps 0x20(%rsi), %xmm0
shufps $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
movaps 0x270(%rsp), %xmm1
cmpleps %xmm0, %xmm1
movmskps %xmm1, %eax
andl %r11d, %ebx
andl %eax, %ebx
jne 0xb31d4c
addq $0x3f8, %rsp # imm = 0x3F8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_mb_intersector.h
|
embree::sse2::createPrimRefArrayMBlur(embree::Scene*, embree::Geometry::GTypeMask, unsigned long, embree::vector_t<embree::PrimRef, embree::aligned_monitored_allocator<embree::PrimRef, 32ul>>&, embree::BuildProgressMonitor&, unsigned long)
|
PrimInfo createPrimRefArrayMBlur(Scene* scene, Geometry::GTypeMask types, const size_t numPrimRefs, mvector<PrimRef>& prims, BuildProgressMonitor& progressMonitor, size_t itime)
{
ParallelForForPrefixSumState<PrimInfo> pstate;
Scene::Iterator2 iter(scene,types,true);
/* first try */
progressMonitor(0);
pstate.init(iter,size_t(1024));
PrimInfo pinfo = parallel_for_for_prefix_sum0( pstate, iter, PrimInfo(empty), [&](Geometry* mesh, const range<size_t>& r, size_t k, size_t geomID) -> PrimInfo {
return mesh->createPrimRefArrayMB(prims,itime,r,k,(unsigned)geomID);
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
/* if we need to filter out geometry, run again */
if (pinfo.size() != numPrimRefs)
{
progressMonitor(0);
pinfo = parallel_for_for_prefix_sum1( pstate, iter, PrimInfo(empty), [&](Geometry* mesh, const range<size_t>& r, size_t k, size_t geomID, const PrimInfo& base) -> PrimInfo {
return mesh->createPrimRefArrayMB(prims,itime,r,base.size(),(unsigned)geomID);
}, [](const PrimInfo& a, const PrimInfo& b) -> PrimInfo { return PrimInfo::merge(a,b); });
}
return pinfo;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x2db8, %rsp # imm = 0x2DB8
movq %r8, 0x40(%rsp)
movq %rcx, 0x58(%rsp)
movq %rdi, %rbx
movq $0x0, 0x5a0(%rsp)
movq %rsi, 0x28(%rsp)
movq %rdx, 0x30(%rsp)
movb $0x1, 0x38(%rsp)
movq (%r9), %rax
xorl %r13d, %r13d
movq %r9, 0x50(%rsp)
movq %r9, %rdi
xorl %esi, %esi
callq *(%rax)
movq 0x28(%rsp), %rcx
movq 0x1d8(%rcx), %rax
testq %rax, %rax
je 0xb3db68
movq 0x1e8(%rcx), %rdx
movq 0x30(%rsp), %rsi
movb 0x38(%rsp), %dil
xorl %r13d, %r13d
xorl %r8d, %r8d
movq (%rdx,%r8,8), %r9
testq %r9, %r9
je 0xb3db5b
movl 0x3c(%r9), %ecx
btl $0x15, %ecx
jae 0xb3db5b
movl $0x1, %r10d
shll %cl, %r10d
movslq %r10d, %rcx
testq %rcx, %rsi
je 0xb3db5b
cmpl $0x1, 0x24(%r9)
setne %cl
cmpb %cl, %dil
jne 0xb3db5b
movl 0x20(%r9), %ecx
jmp 0xb3db5d
xorl %ecx, %ecx
addq %rcx, %r13
incq %r8
cmpq %r8, %rax
jne 0xb3db24
movq %r13, 0x5a8(%rsp)
xorl %r15d, %r15d
xorl %edi, %edi
callq 0x6ab80
cltq
leaq 0x3ff(%r13), %rcx
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r14d
cmovbq %rcx, %r14
cmpq $0x1, %r14
adcq %r15, %r14
movq %r14, 0x5a0(%rsp)
movq %r15, 0x1a0(%rsp)
movq %r15, 0x3a0(%rsp)
cmpq $0x2, %rcx
jb 0xb3dcb2
xorl %esi, %esi
movq %r13, %rax
xorl %edx, %edx
divq %r14
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %r8
movl $0x1, %r9d
movb 0x38(%rsp), %r10b
xorl %r11d, %r11d
movq 0x1e8(%rdi), %rcx
movq (%rcx,%r11,8), %rdx
testq %rdx, %rdx
je 0xb3dc21
movl 0x3c(%rdx), %ecx
btl $0x15, %ecx
jae 0xb3dc21
movl $0x1, %ebp
shll %cl, %ebp
movslq %ebp, %rcx
testq %rcx, %r8
je 0xb3dc21
cmpl $0x1, 0x24(%rdx)
setne %cl
cmpb %cl, %r10b
jne 0xb3dc21
movl 0x20(%rdx), %r14d
jmp 0xb3dc24
xorl %r14d, %r14d
testq %r14, %r14
je 0xb3dc8e
leaq 0x1(%r9), %rcx
imulq %r13, %rcx
xorl %edx, %edx
xorl %r15d, %r15d
movq %rsi, %r12
movq %rax, %rsi
leaq (%r12,%r14), %rax
addq %rdx, %rax
cmpq %rsi, %rax
jb 0xb3dc92
cmpq 0x5a0(%rsp), %r9
jae 0xb3dc92
movq %r11, 0x1a0(%rsp,%r9,8)
movq %rsi, %rax
subq %r12, %rax
addq %rax, %r15
movq %r15, 0x3a0(%rsp,%r9,8)
incq %r9
movq %rcx, %rax
xorl %edx, %edx
divq 0x5a0(%rsp)
movq %r15, %rdx
negq %rdx
addq %r13, %rcx
movq %rsi, %r12
cmpq %r14, %r15
jb 0xb3dc39
jmp 0xb3dc98
xorl %edx, %edx
jmp 0xb3dc98
movq %rsi, %rax
movq %r12, %rsi
addq %r14, %rsi
addq %rdx, %rsi
incq %r11
movq 0x5a0(%rsp), %r14
cmpq %r14, %r9
jb 0xb3dbe7
movaps 0x13add37(%rip), %xmm0 # 0x1eeb9f0
leaq 0x80(%rsp), %rbp
movaps %xmm0, (%rbp)
movaps 0x13add34(%rip), %xmm1 # 0x1eeba00
movaps %xmm1, 0x10(%rbp)
movaps %xmm0, 0x20(%rbp)
movaps %xmm1, 0x30(%rbp)
xorps %xmm0, %xmm0
movaps %xmm0, 0x40(%rbp)
leaq 0x70(%rsp), %r15
movq 0x40(%rsp), %rax
movq %rax, (%r15)
leaq 0x2df0(%rsp), %rax
movq %rax, 0x8(%r15)
leaq 0x28(%rsp), %rcx
leaq 0x48(%rsp), %r13
movq %rcx, (%r13)
leaq 0x60(%rsp), %rax
movq %r15, (%rax)
movq %rcx, 0x8(%rax)
leaq 0x10(%rsp), %rcx
movq %r14, (%rcx)
leaq 0x1a0(%rsp), %rdx
leaq 0xd0(%rsp), %r12
movq %rdx, (%r12)
movq %rcx, 0x8(%r12)
movq %rbp, 0x10(%r12)
movq %r13, 0x18(%r12)
leaq 0xf(%rsp), %rcx
movq %rcx, 0x20(%r12)
movq %rax, 0x28(%r12)
leaq 0x120(%rsp), %rdi
movw $0x401, 0xc(%rdi) # imm = 0x401
movups %xmm0, 0x20(%rdi)
movq $0x8, 0x40(%rdi)
callq 0x6a660
movq %r12, 0x18(%rsp)
testq %r14, %r14
je 0xb3de53
leaq 0x20(%rsp), %rdi
movq $0x0, (%rdi)
movl $0xc0, %esi
callq 0x6ac50
movq %rax, %r13
xorps %xmm0, %xmm0
movups %xmm0, 0x28(%rax)
movups %xmm0, 0x18(%rax)
movups %xmm0, 0x8(%rax)
xorl %eax, %eax
movq %rax, 0x38(%r13)
leaq 0x15c91ca(%rip), %rcx # 0x2106f78
movq %rcx, (%r13)
movq %r14, 0x40(%r13)
movq %rax, 0x48(%r13)
movl $0x1, %r14d
movq %r14, 0x50(%r13)
leaq 0x18(%rsp), %rcx
movq %rcx, 0x58(%r13)
movq %rax, 0x60(%r13)
movq %r14, 0x68(%r13)
xorl %edi, %edi
callq 0x6ab80
cltq
movabsq $0x3fffffffffffffff, %rcx # imm = 0x3FFFFFFFFFFFFFFF
andq %rax, %rcx
movl $0x0, 0x80(%r13)
movb $0x5, 0x84(%r13)
addq %rcx, %rcx
movq %rcx, 0x78(%r13)
movq 0x20(%rsp), %rax
movq %rax, 0x88(%r13)
leaq 0x110(%rsp), %rdx
movq $0x0, -0x10(%rdx)
movl $0x1, -0x8(%rdx)
movq %r14, (%rdx)
movq %r14, 0x8(%rdx)
leaq 0x100(%rsp), %rax
movq %rax, 0x70(%r13)
leaq 0x120(%rsp), %rcx
movq %r13, %rdi
movq %rcx, %rsi
callq 0x6a550
leaq 0x48(%rsp), %r13
leaq 0x120(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0xb3e1b4
leaq 0x120(%rsp), %rdi
callq 0x6aab0
leaq 0xc0(%rsp), %rax
movaps -0x40(%rax), %xmm0
movaps %xmm0, (%rbx)
movaps -0x30(%rax), %xmm1
movaps %xmm1, 0x10(%rbx)
movaps -0x20(%rax), %xmm2
movaps %xmm2, 0x20(%rbx)
movaps -0x10(%rax), %xmm3
movaps %xmm3, 0x30(%rbx)
movdqa (%rax), %xmm4
movdqa %xmm4, 0x40(%rbx)
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0xb3df06
leaq 0x40(%rbx), %rcx
movdqa (%rcx), %xmm4
leaq 0x19f0(%rsp), %rdx
movaps %xmm0, -0x40(%rdx)
movaps %xmm1, -0x30(%rdx)
movaps %xmm2, -0x20(%rdx)
movaps %xmm3, -0x10(%rdx)
movaps (%rcx), %xmm5
movaps %xmm5, (%rdx)
minps -0x1440(%rdx), %xmm0
maxps -0x1430(%rdx), %xmm1
minps -0x1420(%rdx), %xmm2
maxps -0x1410(%rdx), %xmm3
paddq -0x1400(%rdx), %xmm4
movdqa %xmm4, (%rcx)
addq $0x50, %rdx
decq %rax
jne 0xb3debf
movaps %xmm0, (%rbx)
movaps %xmm1, 0x10(%rbx)
movaps %xmm2, 0x20(%rbx)
movaps %xmm3, 0x30(%rbx)
movq 0x48(%rbx), %rax
subq 0x40(%rbx), %rax
cmpq 0x58(%rsp), %rax
je 0xb3e19f
movq 0x50(%rsp), %rdi
movq (%rdi), %rax
xorl %esi, %esi
callq *(%rax)
movaps 0x13adab5(%rip), %xmm0 # 0x1eeb9f0
movaps %xmm0, 0x80(%rsp)
movaps 0x13adab6(%rip), %xmm1 # 0x1eeba00
movaps %xmm1, 0x90(%rsp)
movaps %xmm0, 0xa0(%rsp)
movaps %xmm1, 0xb0(%rsp)
xorps %xmm0, %xmm0
movaps %xmm0, 0xc0(%rsp)
movq 0x40(%rsp), %rax
movq %rax, 0x70(%rsp)
leaq 0x2df0(%rsp), %rax
movq %rax, 0x78(%rsp)
leaq 0x28(%rsp), %rax
movq %rax, 0x48(%rsp)
movq %r15, 0x60(%rsp)
movq %rax, 0x68(%rsp)
movq 0x5a0(%rsp), %r15
movq %r15, 0x10(%rsp)
leaq 0x1a0(%rsp), %rax
movq %rax, 0xd0(%rsp)
leaq 0x10(%rsp), %rax
movq %rax, 0xd8(%rsp)
movq %rbp, 0xe0(%rsp)
movq %r13, 0xe8(%rsp)
leaq 0xf(%rsp), %rax
movq %rax, 0xf0(%rsp)
leaq 0x60(%rsp), %rax
movq %rax, 0xf8(%rsp)
leaq 0x120(%rsp), %rdi
movw $0x401, 0xc(%rdi) # imm = 0x401
movups %xmm0, 0x20(%rdi)
movq $0x8, 0x40(%rdi)
callq 0x6a660
movq %r12, 0x18(%rsp)
testq %r15, %r15
je 0xb3e0ed
leaq 0x20(%rsp), %rdi
movq $0x0, (%rdi)
movl $0xc0, %esi
callq 0x6ac50
movq %rax, %r14
xorps %xmm0, %xmm0
movups %xmm0, 0x28(%rax)
movups %xmm0, 0x18(%rax)
movups %xmm0, 0x8(%rax)
xorl %eax, %eax
movq %rax, 0x38(%r14)
leaq 0x15c8f72(%rip), %rcx # 0x2106fc0
movq %rcx, (%r14)
movq %r15, 0x40(%r14)
movq %rax, 0x48(%r14)
movl $0x1, %r15d
movq %r15, 0x50(%r14)
leaq 0x18(%rsp), %rcx
movq %rcx, 0x58(%r14)
movq %rax, 0x60(%r14)
movq %r15, 0x68(%r14)
xorl %edi, %edi
callq 0x6ab80
cltq
movabsq $0x3fffffffffffffff, %rcx # imm = 0x3FFFFFFFFFFFFFFF
andq %rax, %rcx
movl $0x0, 0x80(%r14)
movb $0x5, 0x84(%r14)
addq %rcx, %rcx
movq %rcx, 0x78(%r14)
movq 0x20(%rsp), %rax
movq %rax, 0x88(%r14)
leaq 0x110(%rsp), %rdx
movq $0x0, -0x10(%rdx)
movl $0x1, -0x8(%rdx)
movq %r15, (%rdx)
movq %r15, 0x8(%rdx)
leaq 0x100(%rsp), %rax
movq %rax, 0x70(%r14)
leaq 0x120(%rsp), %rcx
movq %r14, %rdi
movq %rcx, %rsi
callq 0x6a550
leaq 0x120(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0xb3e1e6
leaq 0x120(%rsp), %rdi
callq 0x6aab0
movaps 0x80(%rsp), %xmm3
movaps 0x90(%rsp), %xmm2
movaps 0xa0(%rsp), %xmm1
movaps 0xb0(%rsp), %xmm0
movdqa 0xc0(%rsp), %xmm4
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0xb3e18b
leaq 0x19f0(%rsp), %rcx
movaps %xmm3, -0x40(%rcx)
movaps %xmm2, -0x30(%rcx)
movaps %xmm1, -0x20(%rcx)
movaps %xmm0, -0x10(%rcx)
movdqa %xmm4, (%rcx)
minps -0x1440(%rcx), %xmm3
maxps -0x1430(%rcx), %xmm2
minps -0x1420(%rcx), %xmm1
maxps -0x1410(%rcx), %xmm0
paddq -0x1400(%rcx), %xmm4
addq $0x50, %rcx
decq %rax
jne 0xb3e14a
movaps %xmm3, (%rbx)
movaps %xmm2, 0x10(%rbx)
movaps %xmm1, 0x20(%rbx)
movaps %xmm0, 0x30(%rbx)
movdqa %xmm4, 0x40(%rbx)
movq %rbx, %rax
addq $0x2db8, %rsp # imm = 0x2DB8
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13ad928(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15e6a31(%rip), %rsi # 0x2124c08
movq 0x15e67ea(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13ad8f6(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15e69ff(%rip), %rsi # 0x2124c08
movq 0x15e67b8(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb3e239
jmp 0xb3e266
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb3e251
jmp 0xb3e236
movq %rax, %rbx
leaq 0x120(%rsp), %rdi
callq 0x6aab0
jmp 0xb3e25e
jmp 0xb3e266
jmp 0xb3e266
jmp 0xb3e24e
movq %rax, %rbx
leaq 0x120(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/builders/primrefgen.cpp
|
unsigned long embree::sse2::createMortonCodeArray<embree::Instance>(embree::Instance*, embree::vector_t<embree::sse2::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::sse2::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)
|
size_t createMortonCodeArray(Mesh* mesh, mvector<BVHBuilderMorton::BuildPrim>& morton, BuildProgressMonitor& progressMonitor)
{
size_t numPrimitives = morton.size();
/* compute scene bounds */
std::pair<size_t,BBox3fa> cb_empty(0,empty);
auto cb = parallel_reduce
( size_t(0), numPrimitives, size_t(1024), cb_empty, [&](const range<size_t>& r) -> std::pair<size_t,BBox3fa>
{
size_t num = 0;
BBox3fa bounds = empty;
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa prim_bounds = empty;
if (unlikely(!mesh->buildBounds(j,&prim_bounds))) continue;
bounds.extend(center2(prim_bounds));
num++;
}
return std::make_pair(num,bounds);
}, [] (const std::pair<size_t,BBox3fa>& a, const std::pair<size_t,BBox3fa>& b) {
return std::make_pair(a.first + b.first,merge(a.second,b.second));
});
size_t numPrimitivesGen = cb.first;
const BBox3fa centBounds = cb.second;
/* compute morton codes */
if (likely(numPrimitivesGen == numPrimitives))
{
/* fast path if all primitives were valid */
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_for( size_t(0), numPrimitives, size_t(1024), [&](const range<size_t>& r) -> void {
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
generator(mesh->bounds(j),unsigned(j));
});
}
else
{
/* slow path, fallback in case some primitives were invalid */
ParallelPrefixSumState<size_t> pstate;
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (unlikely(!mesh->buildBounds(j,&bounds))) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[base]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (!mesh->buildBounds(j,&bounds)) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
}
return numPrimitivesGen;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x568, %rsp # imm = 0x568
leaq 0x68(%rsp), %rax
movq %rdi, (%rax)
movq %rsi, %r14
movq 0x10(%rsi), %rbp
xorl %r12d, %r12d
leaq 0x130(%rsp), %r15
movq %r12, (%r15)
movaps 0x13a09cd(%rip), %xmm0 # 0x1eeb9f0
movaps %xmm0, 0x10(%r15)
movaps 0x13a09d1(%rip), %xmm0 # 0x1eeba00
movaps %xmm0, 0x20(%r15)
leaq 0x28(%rsp), %r13
movq %rax, (%r13)
leaq 0x168(%rsp), %rbx
movw $0x401, 0xc(%rbx) # imm = 0x401
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rbx)
movq $0x8, 0x40(%rbx)
movq %rbx, %rdi
callq 0x6a660
leaq 0x40(%rsp), %rdi
movq %rbp, (%rdi)
movq %r12, 0x8(%rdi)
movq $0x400, 0x10(%rdi) # imm = 0x400
leaq 0x8(%rsp), %rax
leaq 0x70(%rsp), %r12
movq %rax, (%r12)
movq %r13, 0x8(%r12)
leaq 0xb0(%rsp), %r13
movq %r15, (%r13)
movq %r12, 0x8(%r13)
movq %rax, 0x10(%r13)
movq (%r15), %rax
movq %rax, 0x20(%r13)
movaps 0x10(%r15), %xmm0
movaps %xmm0, 0x30(%r13)
movaps 0x20(%r15), %xmm0
movaps %xmm0, 0x40(%r13)
leaq 0x20(%rsp), %rdx
movq %r13, %rsi
movq %rbx, %rcx
callq 0xb54b10
movq 0xd0(%rsp), %rbx
movaps 0xe0(%rsp), %xmm0
movaps %xmm0, 0x90(%rsp)
movaps 0xf0(%rsp), %xmm0
movaps %xmm0, 0xa0(%rsp)
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
leaq 0x68(%rsp), %r15
jne 0xb4b488
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movaps 0xa0(%rsp), %xmm3
movaps 0x90(%rsp), %xmm0
subps %xmm0, %xmm3
cmpq %rbp, %rbx
jne 0xb4b20b
movaps %xmm0, 0xb0(%rsp)
movaps %xmm3, %xmm0
cmpnleps 0x13a18ba(%rip), %xmm0 # 0x1eeca00
rcpps %xmm3, %xmm1
mulps %xmm1, %xmm3
movaps 0x13a18bd(%rip), %xmm2 # 0x1eeca10
subps %xmm3, %xmm2
mulps %xmm1, %xmm2
addps %xmm1, %xmm2
mulps 0x13a18bd(%rip), %xmm2 # 0x1eeca20
andps %xmm0, %xmm2
movaps %xmm2, 0xc0(%rsp)
movq %r13, 0x70(%rsp)
movq %r14, 0x78(%rsp)
movq %r15, 0x80(%rsp)
leaq 0x168(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x40(%rsp), %rdi
movq %rbp, (%rdi)
movq $0x0, 0x8(%rdi)
movq $0x400, 0x10(%rdi) # imm = 0x400
leaq 0x28(%rsp), %rsi
movq %r12, (%rsi)
leaq 0x20(%rsp), %rdx
movq %r14, %rcx
callq 0xb55564
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0xb4b4ba
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rax
addq $0x568, %rsp # imm = 0x568
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movaps %xmm0, 0x70(%rsp)
movaps %xmm3, %xmm0
cmpnleps 0x13a17e5(%rip), %xmm0 # 0x1eeca00
rcpps %xmm3, %xmm1
mulps %xmm1, %xmm3
movaps 0x13a17e8(%rip), %xmm2 # 0x1eeca10
subps %xmm3, %xmm2
mulps %xmm1, %xmm2
addps %xmm1, %xmm2
mulps 0x13a17e8(%rip), %xmm2 # 0x1eeca20
andps %xmm0, %xmm2
movaps %xmm2, 0x80(%rsp)
movq %r12, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq $0x0, 0x8(%rsp)
leaq 0x18(%rsp), %r13
movq %rbp, (%r13)
xorl %edi, %edi
callq 0x6ab80
cltq
movq (%r13), %rcx
movq %r13, %rdx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r13d
cmovbq %rcx, %r13
leaq 0x10(%rsp), %rcx
movq %r13, (%rcx)
leaq 0x8(%rsp), %rax
movq %rax, 0x40(%rsp)
movq %rdx, 0x48(%rsp)
movq %rcx, 0x50(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x58(%rsp)
leaq 0x28(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0xb0(%rsp), %rdi
movw $0x401, 0xc(%rdi) # imm = 0x401
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%rdi)
movq $0x8, 0x40(%rdi)
callq 0x6a660
leaq 0x20(%rsp), %rcx
leaq 0x40(%rsp), %rax
movq %rax, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r13, %rsi
leaq 0xb0(%rsp), %r9
callq 0xb56103
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0xb4b4ec
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0xb4b35f
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0xb4b347
movq %r12, 0x28(%rsp)
movq %r14, 0x30(%rsp)
movq %r15, 0x38(%rsp)
movq $0x0, 0x8(%rsp)
movq %rbp, 0x18(%rsp)
xorl %edi, %edi
callq 0x6ab80
cltq
movq 0x18(%rsp), %rcx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r14d
cmovbq %rcx, %r14
movq %r14, 0x10(%rsp)
leaq 0x8(%rsp), %rax
movq %rax, 0x40(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x48(%rsp)
leaq 0x10(%rsp), %rax
movq %rax, 0x50(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x58(%rsp)
leaq 0x28(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0xb0(%rsp), %r12
movw $0x401, 0xc(%r12) # imm = 0x401
xorps %xmm0, %xmm0
movups %xmm0, 0x20(%r12)
movq $0x8, 0x40(%r12)
movq %r12, %rdi
callq 0x6a660
leaq 0x20(%rsp), %rcx
leaq 0x40(%rsp), %rax
movq %rax, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r14, %rsi
movq %r12, %r9
callq 0xb56e35
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0xb4b51e
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0xb4b1f6
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0xb4b46b
jmp 0xb4b1f6
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13a0654(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15d975d(%rip), %rsi # 0x2124c08
movq 0x15d9516(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13a0622(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15d972b(%rip), %rsi # 0x2124c08
movq 0x15d94e4(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13a05f0(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15d96f9(%rip), %rsi # 0x2124c08
movq 0x15d94b2(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0x13a05be(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0x15d96c7(%rip), %rsi # 0x2124c08
movq 0x15d9480(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb4b575
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb4b589
jmp 0xb4b5e8
jmp 0xb4b572
jmp 0xb4b5e8
jmp 0xb4b586
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0xb4b5e0
jmp 0xb4b5e8
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0xb4b5e0
jmp 0xb4b5e8
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb4b5bf
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0xb4b5d3
jmp 0xb4b5e8
jmp 0xb4b5bc
jmp 0xb4b5e8
jmp 0xb4b5d0
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
jmp 0xb4b5e0
jmp 0xb4b5e8
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/builders/primrefgen.cpp
|
embree::FastAllocator::Block::getUsedBytes(embree::FastAllocator::AllocationType, bool) const
|
size_t getUsedBytes(AllocationType atype, bool huge_pages = false) const {
size_t bytes = 0;
for (const Block* block = this; block; block = block->next) {
if (!block->hasType(atype,huge_pages)) continue;
bytes += block->getBlockUsedBytes();
}
return bytes;
}
|
xorl %eax, %eax
cmpl $0x3, %esi
je 0xb5eee6
movl 0x28(%rdi), %ecx
cmpl $0x1, %ecx
jne 0xb5eedd
cmpl $0x1, %esi
sete %r8b
cmpb %dl, 0x2c(%rdi)
sete %cl
andb %r8b, %cl
jmp 0xb5eee2
cmpl %esi, %ecx
sete %cl
testb %cl, %cl
je 0xb5eef7
movq (%rdi), %rcx
movq 0x10(%rdi), %r8
cmpq %r8, %rcx
cmovbq %rcx, %r8
addq %r8, %rax
movq 0x18(%rdi), %rdi
testq %rdi, %rdi
jne 0xb5eebe
retq
nop
|
/embree[P]embree/kernels/bvh/../common/alloc.h
|
Subsets and Splits
SQL Console for LLM4Binary/decompile-bench
Filters out entries with file names ending in .cpp, providing a basic subset of the dataset that excludes C++ files.