name
string | code
string | asm
string | file
string |
|---|---|---|---|
embree::NodeRefPtr<8> embree::avx::BVHBuilderBinnedOpenMergeSAH::build<embree::NodeRefPtr<8>, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(unsigned long)>(embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&) const&, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(unsigned long), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef*, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::GeneralBVHBuilder::Settings const&)
|
static ReductionTy build(CreateAllocFunc createAlloc,
CreateNodeFunc createNode,
UpdateNodeFunc updateNode,
const CreateLeafFunc& createLeaf,
NodeOpenerFunc nodeOpenerFunc,
ProgressMonitor progressMonitor,
BuildRef* prims,
const size_t extSize,
const PrimInfo& pinfo,
const Settings& settings)
{
typedef HeuristicArrayOpenMergeSAH<NodeOpenerFunc,BuildRef,NUM_OBJECT_BINS_HQ> Heuristic;
Heuristic heuristic(nodeOpenerFunc,prims,settings.branchingFactor);
return GeneralBVHBuilder::build<ReductionTy,Heuristic,Set,BuildRef>(
heuristic,
prims,
PrimInfoExtRange(0,pinfo.size(),extSize,pinfo),
createAlloc,
createNode,
updateNode,
createLeaf,
progressMonitor,
settings);
}
|
pushq %r14
pushq %rbx
subq $0x98, %rsp
movq %r8, %rax
movq %rcx, %r10
movq %rsi, %r8
movq %rdi, %rcx
movq 0xb0(%rsp), %rsi
movq 0xb8(%rsp), %rbx
leaq 0x10(%rsp), %r14
movq %rdx, (%r14)
leaq 0x8(%rsp), %r11
movq %r10, (%r11)
movq (%rbx), %rdx
leaq 0x18(%rsp), %rdi
movq %rax, (%rdi)
movq %r14, 0x8(%rdi)
movq %rdx, 0x10(%rdi)
movq 0x48(%rsi), %r10
subq 0x40(%rsi), %r10
vmovaps (%rsi), %xmm0
leaq 0x30(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vmovaps 0x10(%rsi), %xmm0
vmovaps %xmm0, 0x10(%rdx)
vmovaps 0x20(%rsi), %xmm0
vmovaps %xmm0, 0x20(%rdx)
vmovaps 0x30(%rsi), %xmm0
vmovaps %xmm0, 0x30(%rdx)
movq $0x0, 0x40(%rdx)
movq %r10, 0x48(%rdx)
movq %r9, 0x50(%rdx)
movq %rbx, (%rsp)
movq %rax, %rsi
movq %r11, %r9
callq 0x141d324
addq $0x98, %rsp
popq %rbx
popq %r14
retq
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_sah.h
|
embree::avx::__internal_two_level_builder__::MeshBuilder<8, embree::Instance, embree::InstancePrimitive>::operator()(void*, embree::Instance*, unsigned long, embree::Geometry::GTypeMask, bool, embree::Builder*&)
|
void operator () (void* bvh, Mesh* mesh, size_t geomID, Geometry::GTypeMask gtype, bool useMortonBuilder, Builder*& builder) {
if(useMortonBuilder) {
builder = MortonBuilder<N,Mesh,Primitive>()(bvh,mesh,geomID,gtype);
return;
}
switch (mesh->quality) {
case RTC_BUILD_QUALITY_LOW: builder = MortonBuilder<N,Mesh,Primitive>()(bvh,mesh,geomID,gtype); break;
case RTC_BUILD_QUALITY_MEDIUM:
case RTC_BUILD_QUALITY_HIGH: builder = SAHBuilder<N,Mesh,Primitive>()(bvh,mesh,geomID,gtype); break;
case RTC_BUILD_QUALITY_REFIT: builder = RefitBuilder<N,Mesh,Primitive>()(bvh,mesh,geomID,gtype); break;
default: throw_RTCError(RTC_ERROR_UNKNOWN,"invalid build quality");
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x28, %rsp
testl %r9d, %r9d
je 0x141bf7d
movq %rsi, %rdi
movq %rdx, %rsi
movq %r8, %rdx
xorl %r8d, %r8d
callq 0x1447d0a
jmp 0x141c037
movzwl 0x3e(%rdx), %eax
andl $0x7, %eax
leal -0x1(%rax), %edi
cmpl $0x2, %edi
jb 0x141c013
cmpl $0x3, %eax
je 0x141c026
testl %eax, %eax
je 0x141bf67
movl $0x30, %edi
callq 0x6a3b0
movq %rax, %rbx
leaq 0x18(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xad086c(%rip), %rsi # 0x1eec826
leaq 0xad087a(%rip), %rdx # 0x1eec83b
leaq 0x8(%rsp), %rdi
callq 0x8d7230
leaq 0xce58de(%rip), %rax # 0x21018b0
movq %rax, (%rbx)
movl $0x1, 0x8(%rbx)
leaq 0x10(%rbx), %rdi
movq %rbx, %rax
addq $0x20, %rax
movq %rax, 0x10(%rbx)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x8d7100
leaq 0xce584c(%rip), %rsi # 0x2101850
leaq -0xb451ef(%rip), %rdx # 0x8d6e1c
movq %rbx, %rdi
callq 0x6a5d0
movq %rsi, %rdi
movq %rdx, %rsi
movq %r8, %rdx
xorl %r8d, %r8d
callq 0x127ba74
jmp 0x141c037
movq %rsi, %rdi
movq %rdx, %rsi
movq %r8, %rdx
xorl %r8d, %r8d
callq 0x15028c4
movq 0x50(%rsp), %rcx
movq %rax, (%rcx)
addq $0x28, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %r14
xorl %ebp, %ebp
jmp 0x141c05f
movq %rax, %r14
movq %rbx, %rdi
callq 0x6a0e0
movb $0x1, %bpl
movq 0x8(%rsp), %rdi
cmpq %r15, %rdi
je 0x141c076
callq 0x6a4f0
jmp 0x141c076
movq %rax, %r14
movb $0x1, %bpl
testb %bpl, %bpl
je 0x141c083
movq %rbx, %rdi
callq 0x6a8a0
movq %r14, %rdi
callq 0x6a600
nop
|
/embree[P]embree/kernels/bvh/bvh_builder_twolevel_internal.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::Instance, embree::InstancePrimitive>::BuildRef, 32ul>::parallel_object_find(embree::avx::PrimInfoExtRange const&, unsigned long)
|
__noinline const Split parallel_object_find(const PrimInfoExtRange& set, const size_t logBlockSize)
{
Binner binner(empty);
const BinMapping<OBJECT_BINS> mapping(set.centBounds);
const BinMapping<OBJECT_BINS>& _mapping = mapping; // CLANG 3.4 parser bug workaround
auto body = [&] (const range<size_t>& r) -> Binner {
Binner binner(empty); binner.bin(prims0+r.begin(),r.size(),_mapping); return binner;
};
auto reduction = [&] (const Binner& b0, const Binner& b1) -> Binner {
Binner r = b0; r.merge(b1,_mapping.size()); return r;
};
binner = parallel_reduce(set.begin(),set.end(),PARALLEL_FIND_BLOCK_SIZE,binner,body,reduction);
return binner.best(mapping,logBlockSize);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x1f00, %rsp # imm = 0x1F00
movq %rcx, (%rsp)
movq %rdi, %rbx
movl $0xc00, %eax # imm = 0xC00
leaq 0x310(%rsp), %rcx
vbroadcastss 0xaca40f(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0xacb56a(%rip), %xmm2 # 0x1eecb84
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm1, -0x10(%rcx)
vmovaps %xmm2, (%rcx)
vmovaps %xmm1, -0x30(%rcx)
vmovaps %xmm2, -0x20(%rcx)
vmovaps %xmm1, -0x50(%rcx)
vmovaps %xmm2, -0x40(%rcx)
vmovaps %xmm0, 0x2c0(%rsp,%rax)
addq $0x10, %rax
addq $0x60, %rcx
cmpq $0xe00, %rax # imm = 0xE00
jne 0x142161e
leaq 0x80(%rsp), %rax
movq $0x20, (%rax)
vmovaps 0x20(%rdx), %xmm0
vmovaps 0x30(%rdx), %xmm1
vsubps %xmm0, %xmm1, %xmm1
vbroadcastss 0xb007f6(%rip), %xmm2 # 0x1f21e70
vmaxps %xmm1, %xmm2, %xmm1
vcmpnleps %xmm2, %xmm1, %xmm2
vbroadcastss 0xb1739c(%rip), %xmm3 # 0x1f38a28
vdivps %xmm1, %xmm3, %xmm1
vandps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x20(%rax)
vmovaps %xmm0, 0x10(%rax)
leaq 0xb0(%rsp), %r12
movq %rsi, (%r12)
movq %rax, 0x8(%r12)
leaq 0x78(%rsp), %r15
movq %rax, (%r15)
leaq 0xc0(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
vpermilps $0x4e, 0x40(%rdx), %xmm0 # xmm0 = mem[2,3,0,1]
vmovaps %xmm0, 0x10(%rsp)
movq %r14, %rdi
callq 0x6a660
leaq 0x30(%rsp), %rsi
vmovdqa 0x10(%rsp), %xmm0
vmovdqa %xmm0, (%rsi)
movq $0x200, 0x10(%rsi) # imm = 0x200
leaq 0x20(%rsp), %rcx
movq %r15, (%rcx)
movq %r12, 0x8(%rcx)
leaq 0x10c0(%rsp), %r12
leaq 0x2c0(%rsp), %r13
movq %r12, %rdi
movq %r13, %rdx
movq %r15, %r8
movq %r14, %r9
callq 0x1421b8f
leaq 0xc0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1421b2c
leaq 0xc0(%rsp), %r15
movq %r15, %rdi
callq 0x6aab0
xorl %eax, %eax
movl $0x10, %ecx
vmovaps -0x10(%r12,%rcx), %xmm0
vmovaps %xmm0, -0x10(%r13,%rcx)
vmovdqa (%r12,%rcx), %xmm0
vmovdqa %xmm0, (%r13,%rcx)
addq $0x20, %rcx
cmpq $0x70, %rcx
jne 0x142175d
incq %rax
addq $0x60, %r12
addq $0x60, %r13
cmpq $0x20, %rax
jne 0x1421758
leaq 0x1cc0(%rsp), %rsi
movl $0x200, %edx # imm = 0x200
leaq 0xec0(%rsp), %rdi
callq 0x6a0f0
movq 0x80(%rsp), %rax
movq %rax, %rdx
decq %rdx
vbroadcastss 0xaca25c(%rip), %xmm10 # 0x1eeba20
vbroadcastss 0xacb3b7(%rip), %xmm11 # 0x1eecb84
je 0x14218cb
movq %rax, %rdi
shlq $0x4, %rdi
leaq (%rdi,%r15), %rcx
addq $-0x10, %rcx
leaq 0x10c0(%rsp), %rsi
leaq -0x4(%rdi,%rsi), %rsi
leaq 0x2c0(%rsp), %r8
addq %r8, %rdi
addq $0xbf0, %rdi # imm = 0xBF0
leaq (%rax,%rax,2), %r9
shlq $0x5, %r9
addq %r9, %r8
addq $-0x10, %r8
vpxor %xmm0, %xmm0, %xmm0
xorl %r9d, %r9d
movq %rdx, %r10
vmovaps %xmm11, %xmm1
vmovaps %xmm10, %xmm2
vmovaps %xmm11, %xmm3
vmovaps %xmm10, %xmm4
vpaddd (%rdi,%r9), %xmm0, %xmm0
vminps -0x50(%r8), %xmm10, %xmm10
vmaxps -0x40(%r8), %xmm11, %xmm11
vmovdqa %xmm0, (%rcx,%r9)
vsubps %xmm10, %xmm11, %xmm5
vminps -0x30(%r8), %xmm2, %xmm2
vmaxps -0x20(%r8), %xmm1, %xmm1
vsubps %xmm2, %xmm1, %xmm6
vinsertps $0x4c, %xmm5, %xmm6, %xmm7 # xmm7 = xmm5[1],xmm6[1],zero,zero
vshufpd $0x1, %xmm5, %xmm5, %xmm8 # xmm8 = xmm5[1,0]
vinsertps $0x9c, %xmm6, %xmm8, %xmm8 # xmm8 = xmm8[0],xmm6[2],zero,zero
vaddps %xmm7, %xmm8, %xmm9
vmulps %xmm7, %xmm8, %xmm7
vunpcklps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vmulps %xmm5, %xmm9, %xmm5
vaddps %xmm7, %xmm5, %xmm5
vmovlps %xmm5, -0xc(%rsi,%r9)
vminps -0x10(%r8), %xmm4, %xmm4
vmaxps (%r8), %xmm3, %xmm3
vsubps %xmm4, %xmm3, %xmm5
vmovshdup %xmm5, %xmm6 # xmm6 = xmm5[1,1,3,3]
vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0]
vaddss %xmm7, %xmm6, %xmm8
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm8, %xmm5
vaddss %xmm6, %xmm5, %xmm5
vmovss %xmm5, -0x4(%rsi,%r9)
movl $0x0, (%rsi,%r9)
addq $-0x10, %r9
addq $-0x60, %r8
decq %r10
jne 0x142182a
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
movl $0xffffffff, %edi # imm = 0xFFFFFFFF
movq (%rsp), %rcx
shll %cl, %edi
vbroadcastss 0xaca13c(%rip), %xmm10 # 0x1eeba20
vmovaps %xmm10, 0x30(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovdqa %xmm0, 0x20(%rsp)
cmpq $0x2, %rax
jb 0x1421a97
notl %edi
vmovd %edi, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovdqa %xmm0, 0x60(%rsp)
vpxor %xmm0, %xmm0, %xmm0
vmovddup 0xaff5cd(%rip), %xmm2 # xmm2 = mem[0,0]
vbroadcastss 0xaca0fc(%rip), %xmm3 # 0x1eeba20
vbroadcastss 0xacb257(%rip), %xmm4 # 0x1eecb84
vxorps %xmm5, %xmm5, %xmm5
leaq 0x310(%rsp), %r8
xorl %edi, %edi
vmovd %ecx, %xmm1
vmovdqa %xmm1, (%rsp)
vmovddup 0xb00534(%rip), %xmm1 # xmm1 = mem[0,0]
vmovaps %xmm1, 0x50(%rsp)
vmovaps %xmm4, %xmm11
vmovaps %xmm3, %xmm12
vmovaps %xmm4, %xmm13
vmovaps %xmm3, %xmm14
vmovaps %xmm3, %xmm10
vminps -0x50(%r8), %xmm3, %xmm3
vmaxps -0x40(%r8), %xmm4, %xmm4
vmovaps %xmm4, 0x10(%rsp)
vminps -0x30(%r8), %xmm12, %xmm12
vmaxps -0x20(%r8), %xmm11, %xmm11
vsubps %xmm3, %xmm4, %xmm15
vsubps %xmm12, %xmm11, %xmm9
vminps -0x10(%r8), %xmm14, %xmm14
vmaxps (%r8), %xmm13, %xmm13
vsubps %xmm14, %xmm13, %xmm8
vshufps $0x65, %xmm8, %xmm15, %xmm1 # xmm1 = xmm15[1,1],xmm8[2,1]
vblendps $0x2, %xmm9, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm9[1],xmm1[2,3]
vshufps $0x96, %xmm8, %xmm15, %xmm6 # xmm6 = xmm15[2,1],xmm8[1,2]
vinsertps $0x90, %xmm9, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm9[2],xmm6[2,3]
vaddps %xmm6, %xmm1, %xmm7
vmulps %xmm6, %xmm1, %xmm1
vshufps $0x0, %xmm8, %xmm15, %xmm6 # xmm6 = xmm15[0,0],xmm8[0,0]
vpaddd 0xec0(%rsp,%rdi), %xmm5, %xmm5
vinsertps $0x10, %xmm9, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm9[0],xmm6[2,3]
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm1, %xmm6, %xmm1
vmovdqa 0x60(%rsp), %xmm7
vpaddd %xmm7, %xmm5, %xmm6
vmovdqa (%rsp), %xmm9
vpsrld %xmm9, %xmm6, %xmm6
vpaddd 0xd0(%rsp,%rdi), %xmm7, %xmm7
vpsrad $0x1f, %xmm6, %xmm8
vmovdqa 0x50(%rsp), %xmm15
vpand %xmm15, %xmm8, %xmm8
vmovaps %xmm3, %xmm4
vmovdqa %xmm5, %xmm3
vmovaps %xmm2, %xmm5
vmovdqa %xmm0, %xmm2
vbroadcastss 0xaff4a4(%rip), %xmm0 # 0x1f20ec4
vpand %xmm0, %xmm6, %xmm6
vcvtdq2ps %xmm6, %xmm6
vaddps %xmm6, %xmm8, %xmm6
vpsrld %xmm9, %xmm7, %xmm7
vmulps %xmm6, %xmm1, %xmm1
vpsrad $0x1f, %xmm7, %xmm6
vpand %xmm6, %xmm15, %xmm6
vpand %xmm0, %xmm7, %xmm7
vmovdqa %xmm2, %xmm0
vmovaps %xmm5, %xmm2
vmovdqa %xmm3, %xmm5
vmovaps %xmm4, %xmm3
vmovaps 0x10(%rsp), %xmm4
vcvtdq2ps %xmm7, %xmm7
vaddps %xmm6, %xmm7, %xmm6
vmulps 0x10d0(%rsp,%rdi), %xmm6, %xmm6
vaddps %xmm6, %xmm1, %xmm1
vcmpltps %xmm10, %xmm1, %xmm6
vblendvps %xmm6, %xmm2, %xmm0, %xmm0
vminps %xmm10, %xmm1, %xmm10
vpsubd 0xaca39a(%rip), %xmm2, %xmm2 # 0x1eebe20
addq $0x60, %r8
addq $0x10, %rdi
decq %rdx
jne 0x1421966
vmovaps %xmm10, 0x30(%rsp)
vmovdqa %xmm0, 0x20(%rsp)
vmovss 0xac9f75(%rip), %xmm0 # 0x1eeba20
xorl %ecx, %ecx
vxorps %xmm1, %xmm1, %xmm1
xorl %edx, %edx
vmovss 0xa0(%rsp,%rcx,4), %xmm2
vucomiss %xmm1, %xmm2
jne 0x1421ac4
jnp 0x1421ae0
vmovss 0x30(%rsp,%rcx,4), %xmm2
vucomiss %xmm2, %xmm0
jbe 0x1421ae0
movl 0x20(%rsp,%rcx,4), %edi
testl %edi, %edi
je 0x1421ae0
vmovaps %xmm2, %xmm0
movl %ecx, %esi
movl %edi, %edx
incq %rcx
cmpq $0x3, %rcx
jne 0x1421ab3
vmovss %xmm0, (%rbx)
movl %esi, 0x4(%rbx)
movl %edx, 0x8(%rbx)
movl $0x0, 0xc(%rbx)
movq %rax, 0x10(%rbx)
vmovaps 0x90(%rsp), %xmm0
vmovaps %xmm0, 0x20(%rbx)
vmovaps 0xa0(%rsp), %xmm0
vmovaps %xmm0, 0x30(%rbx)
movq %rbx, %rax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xac9fb0(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xd030b9(%rip), %rsi # 0x2124c08
movq 0xd02e72(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1421b72
jmp 0x1421b87
jmp 0x1421b6f
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::NodeRefPtr<8> embree::avx::BVHBuilderBinnedOpenMergeSAH::build<embree::NodeRefPtr<8>, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(unsigned long)>(embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&) const&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(unsigned long), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*, unsigned long, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::GeneralBVHBuilder::Settings const&)
|
static ReductionTy build(CreateAllocFunc createAlloc,
CreateNodeFunc createNode,
UpdateNodeFunc updateNode,
const CreateLeafFunc& createLeaf,
NodeOpenerFunc nodeOpenerFunc,
ProgressMonitor progressMonitor,
BuildRef* prims,
const size_t extSize,
const PrimInfo& pinfo,
const Settings& settings)
{
typedef HeuristicArrayOpenMergeSAH<NodeOpenerFunc,BuildRef,NUM_OBJECT_BINS_HQ> Heuristic;
Heuristic heuristic(nodeOpenerFunc,prims,settings.branchingFactor);
return GeneralBVHBuilder::build<ReductionTy,Heuristic,Set,BuildRef>(
heuristic,
prims,
PrimInfoExtRange(0,pinfo.size(),extSize,pinfo),
createAlloc,
createNode,
updateNode,
createLeaf,
progressMonitor,
settings);
}
|
pushq %r14
pushq %rbx
subq $0x98, %rsp
movq %r8, %rax
movq %rcx, %r10
movq %rsi, %r8
movq %rdi, %rcx
movq 0xb0(%rsp), %rsi
movq 0xb8(%rsp), %rbx
leaq 0x10(%rsp), %r14
movq %rdx, (%r14)
leaq 0x8(%rsp), %r11
movq %r10, (%r11)
movq (%rbx), %rdx
leaq 0x18(%rsp), %rdi
movq %rax, (%rdi)
movq %r14, 0x8(%rdi)
movq %rdx, 0x10(%rdi)
movq 0x48(%rsi), %r10
subq 0x40(%rsi), %r10
vmovaps (%rsi), %xmm0
leaq 0x30(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vmovaps 0x10(%rsi), %xmm0
vmovaps %xmm0, 0x10(%rdx)
vmovaps 0x20(%rsi), %xmm0
vmovaps %xmm0, 0x20(%rdx)
vmovaps 0x30(%rsi), %xmm0
vmovaps %xmm0, 0x30(%rdx)
movq $0x0, 0x40(%rdx)
movq %r10, 0x48(%rdx)
movq %r9, 0x50(%rdx)
movq %rbx, (%rsp)
movq %rax, %rsi
movq %r11, %r9
callq 0x142a926
addq $0x98, %rsp
popq %rbx
popq %r14
retq
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_sah.h
|
embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::RefBuilderLarge::attachBuildRefs(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>*)
|
void attachBuildRefs (BVHNBuilderTwoLevel* topBuilder)
{
BVH* object = topBuilder->getBVH(objectID_); assert(object);
/* build object if it got modified */
if (topBuilder->isGeometryModified(objectID_))
builder_->build();
/* create build primitive */
if (!object->getBounds().empty())
{
#if ENABLE_DIRECT_SAH_MERGE_BUILDER
Mesh* mesh = topBuilder->getMesh(objectID_);
topBuilder->refs[topBuilder->nextRef++] = BVHNBuilderTwoLevel::BuildRef(object->getBounds(),object->root,(unsigned int)objectID_,(unsigned int)mesh->size());
#else
topBuilder->refs[topBuilder->nextRef++] = BVHNBuilderTwoLevel::BuildRef(object->getBounds(),object->root);
#endif
}
}
|
pushq %r15
pushq %r14
pushq %rbx
movq %rsi, %rbx
movq %rdi, %r14
movq 0x8(%rdi), %rax
movq 0x28(%rsi), %rdx
movq 0x30(%rsi), %rcx
movq 0x200(%rdx), %rdx
movq (%rdx,%rax,8), %r15
movq 0x1e8(%rcx), %rdx
movq (%rdx,%rax,8), %rdx
testq %rdx, %rdx
je 0x1429743
movl 0x38(%rdx), %edx
movq 0x208(%rcx), %rcx
cmpl (%rcx,%rax,4), %edx
jbe 0x1429743
movq 0x10(%r14), %rdi
movq (%rdi), %rax
callq *0x20(%rax)
vmovaps 0x10(%r15), %xmm0
vmovaps 0x20(%r15), %xmm1
vminps 0x30(%r15), %xmm0, %xmm0
vmaxps 0x40(%r15), %xmm1, %xmm1
vcmpleps %xmm1, %xmm0, %xmm2
vmovmskps %xmm2, %eax
notb %al
testb $0x7, %al
jne 0x14297f7
movq 0x8(%r14), %rcx
movq 0x30(%rbx), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rax
xorl %edx, %edx
cmpb $0x18, 0x3c(%rax)
cmoveq %rax, %rdx
movq 0x70(%r15), %rax
vmovd %ecx, %xmm3
vinsertps $0x30, 0x20(%rdx), %xmm1, %xmm2 # xmm2 = xmm1[0,1,2],mem[0]
vinsertps $0x30, %xmm3, %xmm0, %xmm3 # xmm3 = xmm0[0,1,2],xmm3[0]
vinsertf128 $0x1, %xmm2, %ymm3, %ymm0
vxorps %xmm1, %xmm1, %xmm1
testb $0x8, %al
jne 0x14297cf
vsubps %xmm3, %xmm2, %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vshufpd $0x1, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,0]
vaddss %xmm3, %xmm2, %xmm4
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm4, %xmm1, %xmm1
vaddss %xmm2, %xmm1, %xmm1
vaddss %xmm1, %xmm1, %xmm1
movl $0x1, %ecx
lock
xaddl %ecx, 0x88(%rbx)
movslq %ecx, %rcx
movq 0x58(%rbx), %rdx
shlq $0x6, %rcx
vmovaps %ymm0, (%rdx,%rcx)
movq %rax, 0x20(%rdx,%rcx)
vmovss %xmm1, 0x28(%rdx,%rcx)
popq %rbx
popq %r14
popq %r15
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_builder_twolevel.h
|
embree::avx::GeneralBVHBuilder::BuilderT<embree::avx::GeneralBVHBuilder::BuildRecordT<embree::avx::PrimInfoExtRange, embree::avx::BinSplit<32ul>>, embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, 32ul>, embree::avx::PrimInfoExtRange, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::NodeRefPtr<8>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&), embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafFunc<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::avx::PrimInfoExtRange>, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafSplitFunc<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::avx::PrimInfoExtRange>, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(unsigned long)>::BuilderT(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*, embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, 32ul>&, embree::BVHN<8>::CreateAlloc const&, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create2 const&, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Set2 const&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&) const&, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafFunc<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::avx::PrimInfoExtRange> const&, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafSplitFunc<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, embree::avx::PrimInfoExtRange> const&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(unsigned long) const&, embree::avx::GeneralBVHBuilder::Settings const&)
|
BuilderT (PrimRef* prims,
Heuristic& heuristic,
const CreateAllocFunc& createAlloc,
const CreateNodeFunc& createNode,
const UpdateNodeFunc& updateNode,
const CreateLeafFunc& createLeaf,
const CanCreateLeafFunc& canCreateLeaf,
const CanCreateLeafSplitFunc& canCreateLeafSplit,
const ProgressMonitor& progressMonitor,
const Settings& settings) :
cfg(settings),
prims(prims),
heuristic(heuristic),
createAlloc(createAlloc),
createNode(createNode),
updateNode(updateNode),
createLeaf(createLeaf),
canCreateLeaf(canCreateLeaf),
canCreateLeafSplit(canCreateLeafSplit),
progressMonitor(progressMonitor)
{
if (cfg.branchingFactor > MAX_BRANCHING_FACTOR)
throw_RTCError(RTC_ERROR_UNKNOWN,"bvh_builder: branching factor too large");
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x28, %rsp
vmovups 0x50(%rsp), %ymm0
movq 0x70(%rsp), %rax
vmovups (%rax), %ymm1
vmovups 0x20(%rax), %ymm2
vmovups %ymm1, (%rdi)
vmovups %ymm2, 0x20(%rdi)
movq %rsi, 0x40(%rdi)
movq %rdx, 0x48(%rdi)
movq %rcx, 0x50(%rdi)
movq %r8, 0x58(%rdi)
movq %r9, 0x60(%rdi)
vmovups %ymm0, 0x68(%rdi)
cmpq $0x11, (%rdi)
jae 0x142aa84
addq $0x28, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movl $0x30, %edi
vzeroupper
callq 0x6a3b0
movq %rax, %rbx
leaq 0x18(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xac21a8(%rip), %rsi # 0x1eecc4c
leaq 0xac21c8(%rip), %rdx # 0x1eecc73
leaq 0x8(%rsp), %rdi
callq 0x8d7230
leaq 0xcd6df4(%rip), %rax # 0x21018b0
movq %rax, (%rbx)
movl $0x1, 0x8(%rbx)
leaq 0x10(%rbx), %rdi
movq %rbx, %rax
addq $0x20, %rax
movq %rax, 0x10(%rbx)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x8d7100
leaq 0xcd6d62(%rip), %rsi # 0x2101850
leaq -0xb53cd9(%rip), %rdx # 0x8d6e1c
movq %rbx, %rdi
callq 0x6a5d0
movq %rax, %r14
xorl %ebp, %ebp
jmp 0x142ab12
movq %rax, %r14
movq %rbx, %rdi
callq 0x6a0e0
movb $0x1, %bpl
movq 0x8(%rsp), %rdi
cmpq %r15, %rdi
je 0x142ab29
callq 0x6a4f0
jmp 0x142ab29
movq %rax, %r14
movb $0x1, %bpl
testb %bpl, %bpl
je 0x142ab36
movq %rbx, %rdi
callq 0x6a8a0
movq %r14, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_sah.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, 32ul>::split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)
|
__noinline void split(const Split& split, const PrimInfoExtRange& set_i, PrimInfoExtRange& lset, PrimInfoExtRange& rset)
{
PrimInfoExtRange set = set_i;
/* valid split */
if (unlikely(!split.valid())) {
deterministic_order(set);
splitFallback(set,lset,rset);
return;
}
std::pair<size_t,size_t> ext_weights(0,0);
/* object split */
if (likely(set.size() < PARALLEL_THRESHOLD))
ext_weights = sequential_object_split(split,set,lset,rset);
else
ext_weights = parallel_object_split(split,set,lset,rset);
/* if we have an extended range, set extended child ranges and move right split range */
if (unlikely(set.has_ext_range()))
{
setExtentedRanges(set,lset,rset,ext_weights.first,ext_weights.second);
moveExtentedRange(set,lset,rset);
}
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x130, %rsp # imm = 0x130
movq %r8, %rbx
movq %rcx, %r15
movq %rdi, %r14
vmovaps (%rdx), %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps 0x10(%rdx), %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps 0x20(%rdx), %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps 0x30(%rdx), %xmm0
vmovaps %xmm0, 0x80(%rsp)
vmovdqa 0x40(%rdx), %xmm0
vmovdqa %xmm0, 0x90(%rsp)
movq 0x50(%rdx), %rax
movq %rax, 0xa0(%rsp)
cmpl $-0x1, 0x4(%rsi)
je 0x142c58d
movq 0x98(%rsp), %rax
vmovq %xmm0, %rcx
subq %rcx, %rax
leaq 0x50(%rsp), %rdx
movq %r14, %rdi
movq %r15, %rcx
movq %rbx, %r8
cmpq $0x3ff, %rax # imm = 0x3FF
ja 0x142c623
callq 0x1ff00c
movq 0xa0(%rsp), %rcx
subq 0x98(%rsp), %rcx
jne 0x142c62d
addq $0x130, %rsp # imm = 0x130
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
leaq 0x90(%rsp), %r12
movq %r14, %rdi
movq %r12, %rsi
callq 0x142b808
movq (%r12), %rdx
movq 0x8(%r12), %rax
leaq (%rax,%rdx), %rcx
shrq %rcx
vbroadcastss 0xac05cb(%rip), %xmm0 # 0x1eecb84
vbroadcastss 0xabf45e(%rip), %xmm1 # 0x1eeba20
movq %rcx, %rsi
subq %rdx, %rsi
jbe 0x142c640
movq (%r14), %rdi
movq %rdx, %r8
shlq $0x6, %r8
addq %r8, %rdi
addq $0x10, %rdi
vbroadcastss 0xabf43c(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0xac0597(%rip), %xmm3 # 0x1eecb84
vmovaps %xmm3, %xmm4
vmovaps %xmm2, %xmm5
vmovaps -0x10(%rdi), %xmm6
vmovaps (%rdi), %xmm7
vminps %xmm6, %xmm5, %xmm5
vmaxps %xmm7, %xmm4, %xmm4
vaddps %xmm7, %xmm6, %xmm6
vminps %xmm6, %xmm2, %xmm2
vmaxps %xmm6, %xmm3, %xmm3
addq $0x40, %rdi
decq %rsi
jne 0x142c5f5
movq %rcx, %rsi
subq %rdx, %rsi
jmp 0x142c652
callq 0x143132e
jmp 0x142c566
testq %rax, %rax
js 0x142c72a
vcvtsi2ss %rax, %xmm1, %xmm0
jmp 0x142c741
xorl %esi, %esi
vmovaps %xmm1, %xmm5
vmovaps %xmm0, %xmm4
vmovaps %xmm1, %xmm2
vmovaps %xmm0, %xmm3
movq %rax, %rdi
subq %rcx, %rdi
jbe 0x142c6ba
movq (%r14), %r8
movq %rcx, %r9
shlq $0x6, %r9
addq %r9, %r8
addq $0x10, %r8
vbroadcastss 0xac0510(%rip), %xmm6 # 0x1eecb84
vbroadcastss 0xabf3a3(%rip), %xmm7 # 0x1eeba20
vmovaps %xmm7, %xmm1
vmovaps %xmm6, %xmm0
vmovaps -0x10(%r8), %xmm8
vmovaps (%r8), %xmm9
vminps %xmm8, %xmm7, %xmm7
vmaxps %xmm9, %xmm6, %xmm6
vaddps %xmm9, %xmm8, %xmm8
vminps %xmm8, %xmm1, %xmm1
vmaxps %xmm8, %xmm0, %xmm0
addq $0x40, %r8
decq %rdi
jne 0x142c685
movq %rax, %rdi
subq %rcx, %rdi
jmp 0x142c6c4
xorl %edi, %edi
vmovaps %xmm0, %xmm6
vmovaps %xmm1, %xmm7
vmovaps %xmm5, (%r15)
vmovaps %xmm4, 0x10(%r15)
vmovaps %xmm2, 0x20(%r15)
vmovaps %xmm3, 0x30(%r15)
movq %rdx, 0x40(%r15)
movq %rcx, 0x48(%r15)
movq %rcx, 0x50(%r15)
vmovaps %xmm7, (%rbx)
vmovaps %xmm6, 0x10(%rbx)
vmovaps %xmm1, 0x20(%rbx)
vmovaps %xmm0, 0x30(%rbx)
movq %rcx, 0x40(%rbx)
movq %rax, 0x48(%rbx)
movq %rax, 0x50(%rbx)
movq 0xa0(%rsp), %rcx
subq %rax, %rcx
je 0x142c57c
testq %rsi, %rsi
js 0x142c892
vcvtsi2ss %rsi, %xmm10, %xmm0
jmp 0x142c8a9
movq %rax, %rsi
shrq %rsi
movl %eax, %edi
andl $0x1, %edi
orq %rsi, %rdi
vcvtsi2ss %rdi, %xmm1, %xmm0
vaddss %xmm0, %xmm0, %xmm0
addq %rdx, %rax
js 0x142c74d
vcvtsi2ss %rax, %xmm1, %xmm1
jmp 0x142c762
movq %rax, %rdx
shrq %rdx
andl $0x1, %eax
orq %rdx, %rax
vcvtsi2ss %rax, %xmm1, %xmm1
vaddss %xmm1, %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm0
testq %rcx, %rcx
js 0x142c772
vcvtsi2ss %rcx, %xmm2, %xmm1
jmp 0x142c789
movq %rcx, %rax
shrq %rax
movl %ecx, %edx
andl $0x1, %edx
orq %rax, %rdx
vcvtsi2ss %rdx, %xmm2, %xmm1
vaddss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %rax
movq %rax, %rdx
sarq $0x3f, %rdx
vsubss 0xabf69d(%rip), %xmm0, %xmm0 # 0x1eebe44
vcvttss2si %xmm0, %rsi
andq %rdx, %rsi
orq %rax, %rsi
cmpq %rcx, %rsi
cmovaeq %rcx, %rsi
subq %rsi, %rcx
addq 0x48(%r15), %rsi
movq %rsi, 0x50(%r15)
movq 0x48(%rbx), %r12
addq %r12, %rcx
movq %rcx, 0x50(%rbx)
movq 0x50(%r15), %r13
movq 0x48(%r15), %rcx
movq %r13, %rax
subq %rcx, %rax
movq %rax, 0x10(%rsp)
movq 0x40(%rbx), %r15
movq %r12, %rax
subq %r15, %rax
movq %rax, 0x18(%rsp)
subq %rcx, %r13
je 0x142c57c
cmpq %rax, %r13
jae 0x142c9fa
addq %r15, %r13
leaq 0x28(%rsp), %r12
movq %r14, (%r12)
leaq 0x18(%rsp), %rax
movq %rax, 0x8(%r12)
leaq 0xb0(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x38(%rsp), %rdi
movq %r13, (%rdi)
movq %r15, 0x8(%rdi)
movq $0x40, 0x10(%rdi)
leaq 0x20(%rsp), %rsi
movq %r12, (%rsi)
leaq 0xf(%rsp), %rdx
movq %r14, %rcx
callq 0x1430516
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x142cb2d
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cb0b
movq %rsi, %rax
shrq %rax
movl %esi, %edx
andl $0x1, %edx
orq %rax, %rdx
vcvtsi2ss %rdx, %xmm10, %xmm0
vaddss %xmm0, %xmm0, %xmm0
addq %rsi, %rdi
js 0x142c8b5
vcvtsi2ss %rdi, %xmm10, %xmm1
jmp 0x142c8ca
movq %rdi, %rax
shrq %rax
andl $0x1, %edi
orq %rax, %rdi
vcvtsi2ss %rdi, %xmm10, %xmm1
vaddss %xmm1, %xmm1, %xmm1
vdivss %xmm1, %xmm0, %xmm0
testq %rcx, %rcx
js 0x142c8da
vcvtsi2ss %rcx, %xmm10, %xmm1
jmp 0x142c8f1
movq %rcx, %rax
shrq %rax
movl %ecx, %edx
andl $0x1, %edx
orq %rax, %rdx
vcvtsi2ss %rdx, %xmm10, %xmm1
vaddss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm0
vcvttss2si %xmm0, %rax
movq %rax, %rdx
sarq $0x3f, %rdx
vsubss 0xabf535(%rip), %xmm0, %xmm0 # 0x1eebe44
vcvttss2si %xmm0, %rsi
andq %rdx, %rsi
orq %rax, %rsi
cmpq %rcx, %rsi
cmovaeq %rcx, %rsi
subq %rsi, %rcx
addq 0x48(%r15), %rsi
movq %rsi, 0x50(%r15)
movq 0x48(%rbx), %r12
addq %r12, %rcx
movq %rcx, 0x50(%rbx)
movq 0x50(%r15), %r13
movq 0x48(%r15), %rcx
movq %r13, %rax
subq %rcx, %rax
movq %rax, 0x10(%rsp)
movq 0x40(%rbx), %r15
movq %r12, %rax
subq %r15, %rax
movq %rax, 0x18(%rsp)
subq %rcx, %r13
je 0x142c57c
cmpq %rax, %r13
jae 0x142ca85
addq %r15, %r13
leaq 0x28(%rsp), %r12
movq %r14, (%r12)
leaq 0x18(%rsp), %rax
movq %rax, 0x8(%r12)
leaq 0xb0(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x38(%rsp), %rdi
movq %r13, (%rdi)
movq %r15, 0x8(%rdi)
movq $0x40, 0x10(%rdi)
leaq 0x20(%rsp), %rsi
movq %r12, (%rsi)
leaq 0xf(%rsp), %rdx
movq %r14, %rcx
callq 0x1430516
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x142cb91
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cb0b
leaq 0x28(%rsp), %r13
movq %r14, (%r13)
leaq 0x10(%rsp), %rax
movq %rax, 0x8(%r13)
leaq 0xb0(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x38(%rsp), %rdi
movq %r12, (%rdi)
movq %r15, 0x8(%rdi)
movq $0x40, 0x10(%rdi)
leaq 0x20(%rsp), %rsi
movq %r13, (%rsi)
leaq 0xf(%rsp), %rdx
movq %r14, %rcx
callq 0x1430c22
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x142cb5f
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cb0b
leaq 0x28(%rsp), %r13
movq %r14, (%r13)
leaq 0x10(%rsp), %rax
movq %rax, 0x8(%r13)
leaq 0xb0(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x38(%rsp), %rdi
movq %r12, (%rdi)
movq %r15, 0x8(%rdi)
movq $0x40, 0x10(%rdi)
leaq 0x20(%rsp), %rsi
movq %r13, (%rsi)
leaq 0xf(%rsp), %rdx
movq %r14, %rcx
callq 0x1430c22
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x142cbc3
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
vmovq %rax, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vpaddq 0x40(%rbx), %xmm0, %xmm0
vmovdqa %xmm0, 0x40(%rbx)
addq %rax, 0x50(%rbx)
jmp 0x142c57c
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xabefaf(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcf80b8(%rip), %rsi # 0x2124c08
movq 0xcf7e71(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xabef7d(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcf8086(%rip), %rsi # 0x2124c08
movq 0xcf7e3f(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xabef4b(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcf8054(%rip), %rsi # 0x2124c08
movq 0xcf7e0d(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xabef19(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcf8022(%rip), %rsi # 0x2124c08
movq 0xcf7ddb(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x142cc34
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x142cc48
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x142cc64
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x142cc76
jmp 0x142cc8d
jmp 0x142cc8d
jmp 0x142cc31
jmp 0x142cc45
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cc83
jmp 0x142cc8d
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cc83
jmp 0x142cc8d
jmp 0x142cc8d
jmp 0x142cc8d
jmp 0x142cc61
jmp 0x142cc73
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x142cc83
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
jmp 0x142cc8d
movq %rax, %rdi
callq 0x8d6de8
nop
|
/embree[P]embree/kernels/bvh/../builders/heuristic_openmerge_array.h
|
embree::avx::HeuristicArrayOpenMergeSAH<embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::build()::'lambda'(embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef*), embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef, 32ul>::parallel_object_split(embree::avx::BinSplit<32ul> const&, embree::avx::PrimInfoExtRange const&, embree::avx::PrimInfoExtRange&, embree::avx::PrimInfoExtRange&)::'lambda'(embree::PrimInfoT<embree::BBox<embree::Vec3fa>>&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const&)::operator()(embree::PrimInfoT<embree::BBox<embree::Vec3fa>>&, embree::avx::BVHNBuilderTwoLevel<8, embree::InstanceArray, embree::InstanceArrayPrimitive>::BuildRef const&) const
|
__forceinline operator Vec3fa () const { return Vec3fa(m128); }
|
vmovaps (%rdx), %xmm0
vmovaps 0x10(%rdx), %xmm1
vmovaps (%rsi), %xmm2
vmovaps 0x10(%rsi), %xmm3
vmovaps 0x20(%rsi), %xmm4
vmovaps 0x30(%rsi), %xmm5
vminps %xmm0, %xmm2, %xmm2
vmovaps %xmm2, (%rsi)
vmaxps %xmm1, %xmm3, %xmm2
vmovaps %xmm2, 0x10(%rsi)
vaddps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm1
vmovaps %xmm1, 0x20(%rsi)
vmaxps %xmm0, %xmm5, %xmm0
vmovaps %xmm0, 0x30(%rsi)
incq 0x48(%rsi)
retq
|
/embree[P]embree/kernels/bvh/../common/../../common/sys/../math/vec3fa.h
|
embree::avx::BVHNSubdivPatch1BuilderSAH<4>::clear()
|
void clear() {
prims.clear();
}
|
pushq %r14
pushq %rbx
pushq %rax
movq %rdi, %rbx
movq 0x38(%rdi), %r14
movq 0x40(%rdi), %rdi
testq %rdi, %rdi
je 0x1434590
movq %r14, %rsi
shlq $0x5, %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x143458b
movzbl 0x28(%rbx), %edx
callq 0x1ee67ca
jmp 0x1434590
callq 0x1ee612d
testq %r14, %r14
je 0x14345ad
movq 0x20(%rbx), %rdi
shlq $0x5, %r14
negq %r14
movq (%rdi), %rax
movq %r14, %rsi
movl $0x1, %edx
callq *(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x30(%rbx)
movq $0x0, 0x40(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r14
retq
|
/embree[P]embree/kernels/bvh/bvh_builder_subdiv.cpp
|
embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::build()
|
void build()
{
/* initialize all half edge structures */
size_t numPatches = scene->getNumPrimitives(SubdivMesh::geom_type,true);
/* skip build for empty scene */
if (numPatches == 0) {
primsMB.resize(0);
bounds.resize(0);
bvh->set(BVH::emptyNode,empty,0);
return;
}
double t0 = bvh->preBuild(TOSTRING(isa) "::BVH" + toString(N) + "SubdivPatch1MBlurBuilderSAH");
ParallelForForPrefixSumState<PrimInfoMB> pstate;
/* calculate number of primitives (some patches need initial subdivision) */
size_t numSubPatches, numSubPatchesMB;
countSubPatches(numSubPatches, numSubPatchesMB, pstate);
primsMB.resize(numSubPatches);
bounds.resize(numSubPatchesMB);
/* exit if there are no primitives to process */
if (numSubPatches == 0) {
bvh->set(BVH::emptyNode,empty,0);
bvh->postBuild(t0);
return;
}
/* Allocate memory for gregory and b-spline patches */
bvh->subdiv_patches.resize(sizeof(SubdivPatch1) * numSubPatchesMB);
/* rebuild BVH */
rebuild(numSubPatches, pstate);
/* clear temporary data for static geometry */
if (scene->isStaticAccel()) {
primsMB.clear();
}
bvh->cleanup();
bvh->postBuild(t0);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x4c88, %rsp # imm = 0x4C88
movq %rdi, %rbx
movq 0x18(%rdi), %rax
cmpq $0x0, 0x2d8(%rax)
je 0x14369d9
movq 0x10(%rbx), %r14
leaq 0x20(%rsp), %r12
movq %r12, -0x10(%r12)
leaq 0x10(%rsp), %r15
movl $0x1, %esi
movq %r15, %rdi
movl $0x2d, %edx
callq 0x6a580
movq (%r15), %rax
movb $0x34, (%rax)
leaq 0xaeb54d(%rip), %rcx # 0x1f21eed
movl $0x8, %r8d
movq %r15, %rdi
xorl %esi, %esi
xorl %edx, %edx
callq 0x6a1a0
leaq 0x40(%rsp), %r15
movq %r15, -0x10(%r15)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x1436a03
movq %rdx, 0x30(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x40(%rsp)
jmp 0x1436a0c
movq 0x38(%rbx), %r15
cmpq $0x0, 0x30(%rbx)
je 0x14369ec
movq $0x0, 0x30(%rbx)
cmpq %r15, 0x38(%rbx)
jne 0x1436b71
movq $0x0, 0x30(%rbx)
jmp 0x143700e
vmovups (%rcx), %xmm0
vmovups %xmm0, (%r15)
movq 0x8(%rax), %rdx
leaq 0x30(%rsp), %rdi
movq %rdx, 0x8(%rdi)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
leaq 0xae7ba9(%rip), %rsi # 0x1f1e5d8
callq 0x6a620
leaq 0x80(%rsp), %r13
movq %r13, -0x10(%r13)
movq (%rax), %rdx
movq %rax, %rcx
addq $0x10, %rcx
cmpq %rcx, %rdx
je 0x1436a61
movq %rdx, 0x70(%rsp)
movq (%rcx), %rdx
movq %rdx, 0x80(%rsp)
jmp 0x1436a6b
vmovups (%rcx), %xmm0
vmovups %xmm0, (%r13)
movq 0x8(%rax), %rdx
leaq 0x70(%rsp), %rsi
movq %rdx, 0x8(%rsi)
movq %rcx, (%rax)
movq $0x0, 0x8(%rax)
movb $0x0, 0x10(%rax)
movq %r14, %rdi
callq 0xb5d532
vmovsd %xmm0, 0x8(%rsp)
movq 0x70(%rsp), %rdi
cmpq %r13, %rdi
je 0x1436aa4
callq 0x6a4f0
movq 0x30(%rsp), %rdi
cmpq %r15, %rdi
je 0x1436ab3
callq 0x6a4f0
movq 0x10(%rsp), %rdi
cmpq %r12, %rdi
je 0x1436ac2
callq 0x6a4f0
leaq 0x70(%rsp), %rcx
movq $0x0, 0x400(%rcx)
leaq 0x10(%rsp), %r14
movq %rsp, %rdx
movq %rbx, %rdi
movq %r14, %rsi
callq 0x1437206
movq (%r14), %r15
movq 0x38(%rbx), %rax
cmpq %r15, %rax
jae 0x1436b13
movq %r15, %r12
testq %rax, %rax
je 0x1436b16
cmpq %r15, %rax
jae 0x1436b13
movq %rax, %r12
addq %r12, %r12
cmpq $0x1, %r12
adcq $0x0, %r12
cmpq %r15, %r12
jb 0x1436b01
jmp 0x1436b16
movq %rax, %r12
leaq 0x30(%rbx), %r13
cmpq %r15, 0x30(%rbx)
jbe 0x1436b24
movq %r15, (%r13)
cmpq %r12, 0x38(%rbx)
jne 0x1436b33
movq %r15, (%r13)
jmp 0x1436c7f
movq 0x40(%rbx), %r14
testq %r12, %r12
je 0x1436b52
movq 0x20(%rbx), %rdi
movq %r12, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *(%rax)
movq %r12, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rdi
cmpq $0x1c00000, %rdi # imm = 0x1C00000
jb 0x1436bb6
leaq 0x28(%rbx), %rsi
callq 0x1ee665d
jmp 0x1436bc0
movq 0x40(%rbx), %r14
testq %r15, %r15
je 0x1436b90
movq 0x20(%rbx), %rdi
movq %r15, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *(%rax)
movq %r15, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rdi
cmpq $0x1c00000, %rdi # imm = 0x1C00000
jb 0x1436ef6
leaq 0x28(%rbx), %rsi
callq 0x1ee665d
jmp 0x1436f00
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, 0x40(%rbx)
cmpq $0x0, 0x30(%rbx)
je 0x1436c1f
movl $0x40, %eax
xorl %ecx, %ecx
movq 0x40(%rbx), %rdx
vmovaps -0x40(%r14,%rax), %xmm0
vmovaps %xmm0, -0x40(%rdx,%rax)
vmovaps -0x30(%r14,%rax), %xmm0
vmovaps %xmm0, -0x30(%rdx,%rax)
vmovaps -0x20(%r14,%rax), %xmm0
vmovaps %xmm0, -0x20(%rdx,%rax)
vmovaps -0x10(%r14,%rax), %xmm0
vmovaps %xmm0, -0x10(%rdx,%rax)
movq (%r14,%rax), %rsi
movq %rsi, (%rdx,%rax)
incq %rcx
addq $0x50, %rax
cmpq 0x30(%rbx), %rcx
jb 0x1436bd2
movq 0x38(%rbx), %rbp
testq %r14, %r14
je 0x1436c52
movq %rbp, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x1436c4a
movzbl 0x28(%rbx), %edx
movq %r14, %rdi
callq 0x1ee67ca
jmp 0x1436c52
movq %r14, %rdi
callq 0x1ee612d
testq %rbp, %rbp
je 0x1436c77
movq 0x20(%rbx), %rdi
shlq $0x4, %rbp
leaq (,%rbp,4), %rsi
addq %rbp, %rsi
negq %rsi
movq (%rdi), %rax
movl $0x1, %edx
callq *(%rax)
movq %r15, 0x30(%rbx)
movq %r12, 0x38(%rbx)
movq (%rsp), %r12
movq 0x60(%rbx), %rax
cmpq %r12, %rax
jae 0x1436cae
movq %r12, %rbp
testq %rax, %rax
je 0x1436cb1
cmpq %r12, %rax
jae 0x1436cae
movq %rax, %rbp
addq %rbp, %rbp
cmpq $0x1, %rbp
adcq $0x0, %rbp
cmpq %r12, %rbp
jb 0x1436c9c
jmp 0x1436cb1
movq %rax, %rbp
cmpq %r12, 0x58(%rbx)
jbe 0x1436cbb
movq %r12, 0x58(%rbx)
cmpq %rbp, 0x60(%rbx)
jne 0x1436cca
movq %r12, 0x58(%rbx)
jmp 0x1436d99
movq 0x68(%rbx), %r14
testq %rbp, %rbp
je 0x1436ce5
movq 0x48(%rbx), %rdi
movq %rbp, %rsi
shlq $0x5, %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *(%rax)
movq %rbp, %rdi
shlq $0x5, %rdi
cmpq $0x1c00000, %rdi # imm = 0x1C00000
jb 0x1436d00
leaq 0x50(%rbx), %rsi
callq 0x1ee665d
jmp 0x1436d0a
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, 0x68(%rbx)
cmpq $0x0, 0x58(%rbx)
je 0x1436d45
movl $0x10, %eax
xorl %ecx, %ecx
movq 0x68(%rbx), %rdx
vmovaps -0x10(%r14,%rax), %xmm0
vmovaps %xmm0, -0x10(%rdx,%rax)
vmovaps (%r14,%rax), %xmm0
vmovaps %xmm0, (%rdx,%rax)
incq %rcx
addq $0x20, %rax
cmpq 0x58(%rbx), %rcx
jb 0x1436d1c
movq 0x60(%rbx), %r15
testq %r14, %r14
je 0x1436d74
movq %r15, %rsi
shlq $0x5, %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x1436d6c
movzbl 0x50(%rbx), %edx
movq %r14, %rdi
callq 0x1ee67ca
jmp 0x1436d74
movq %r14, %rdi
callq 0x1ee612d
testq %r15, %r15
je 0x1436d91
movq 0x48(%rbx), %rdi
shlq $0x5, %r15
negq %r15
movq (%rdi), %rax
movq %r15, %rsi
movl $0x1, %edx
callq *(%rax)
movq %r12, 0x58(%rbx)
movq %rbp, 0x60(%rbx)
cmpq $0x0, 0x10(%rsp)
movq 0x10(%rbx), %r14
je 0x1436e07
movq (%rsp), %rax
shlq $0x6, %rax
leaq (%rax,%rax,4), %rbp
movq 0x228(%r14), %rax
cmpq %rbp, %rax
jae 0x1436ddf
movq %rbp, %r15
testq %rax, %rax
je 0x1436de2
cmpq %rbp, %rax
jae 0x1436ddf
movq %rax, %r15
addq %r15, %r15
cmpq $0x1, %r15
adcq $0x0, %r15
cmpq %rbp, %r15
jb 0x1436dcd
jmp 0x1436de2
movq %rax, %r15
cmpq %rbp, 0x220(%r14)
jbe 0x1436df2
movq %rbp, 0x220(%r14)
cmpq %r15, 0x228(%r14)
jne 0x1436e45
movq %rbp, 0x220(%r14)
jmp 0x1436e9c
vbroadcastss 0xab4c10(%rip), %xmm0 # 0x1eeba20
leaq 0x30(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vbroadcastss 0xab5d62(%rip), %xmm1 # 0x1eecb84
vmovaps %xmm1, 0x10(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovaps %xmm1, 0x30(%rdx)
movl $0x8, %esi
movq %r14, %rdi
xorl %ecx, %ecx
callq 0xb5ccb2
jmp 0x1436fc8
movq 0x230(%r14), %r12
movl $0x20, %esi
movq %r15, %rdi
callq 0x1ee60ac
movq %rax, 0x230(%r14)
cmpq $0x0, 0x220(%r14)
je 0x1436e86
xorl %eax, %eax
movq 0x230(%r14), %rcx
movb (%r12,%rax), %dl
movb %dl, (%rcx,%rax)
incq %rax
cmpq 0x220(%r14), %rax
jb 0x1436e6c
movq %r12, %rdi
callq 0x1ee612d
movq %rbp, 0x220(%r14)
movq %r15, 0x228(%r14)
movq 0x10(%rsp), %rsi
leaq 0x70(%rsp), %rdx
movq %rbx, %rdi
callq 0x1437626
movq 0x18(%rbx), %rax
testb $0x1, 0x238(%rax)
jne 0x1436fbf
movq 0x38(%rbx), %r14
movq 0x40(%rbx), %rdi
testq %rdi, %rdi
je 0x1436f8f
movq %r14, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x1436f8a
movzbl 0x28(%rbx), %edx
callq 0x1ee67ca
jmp 0x1436f8f
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, 0x40(%rbx)
cmpq $0x0, 0x30(%rbx)
je 0x1436f5f
movl $0x40, %eax
xorl %ecx, %ecx
movq 0x40(%rbx), %rdx
vmovaps -0x40(%r14,%rax), %xmm0
vmovaps %xmm0, -0x40(%rdx,%rax)
vmovaps -0x30(%r14,%rax), %xmm0
vmovaps %xmm0, -0x30(%rdx,%rax)
vmovaps -0x20(%r14,%rax), %xmm0
vmovaps %xmm0, -0x20(%rdx,%rax)
vmovaps -0x10(%r14,%rax), %xmm0
vmovaps %xmm0, -0x10(%rdx,%rax)
movq (%r14,%rax), %rsi
movq %rsi, (%rdx,%rax)
incq %rcx
addq $0x50, %rax
cmpq 0x30(%rbx), %rcx
jb 0x1436f12
movq 0x38(%rbx), %r12
testq %r14, %r14
je 0x1436fe4
movq %r12, %rax
shlq $0x4, %rax
leaq (%rax,%rax,4), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x1436fdc
movzbl 0x28(%rbx), %edx
movq %r14, %rdi
callq 0x1ee67ca
jmp 0x1436fe4
callq 0x1ee612d
testq %r14, %r14
je 0x1436fad
movq 0x20(%rbx), %rdi
shlq $0x4, %r14
leaq (%r14,%r14,4), %rsi
negq %rsi
movq (%rdi), %rax
movl $0x1, %edx
callq *(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%r13)
movq $0x0, 0x10(%r13)
movq 0x10(%rbx), %rdi
callq 0xb5ecc6
movq 0x10(%rbx), %rdi
vmovsd 0x8(%rsp), %xmm0
callq 0xb5d720
jmp 0x1437141
movq %r14, %rdi
callq 0x1ee612d
testq %r12, %r12
je 0x1437002
movq 0x20(%rbx), %rdi
shlq $0x4, %r12
leaq (%r12,%r12,4), %rsi
negq %rsi
movq (%rdi), %rax
movl $0x1, %edx
callq *(%rax)
movq $0x0, 0x30(%rbx)
movq %r15, 0x38(%rbx)
movq 0x60(%rbx), %r12
cmpq $0x0, 0x58(%rbx)
je 0x1437021
movq $0x0, 0x58(%rbx)
cmpq %r12, 0x60(%rbx)
jne 0x1437034
movq $0x0, 0x58(%rbx)
jmp 0x1437107
movq 0x68(%rbx), %r14
testq %r12, %r12
je 0x143704f
movq 0x48(%rbx), %rdi
movq %r12, %rsi
shlq $0x5, %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *(%rax)
movq %r12, %rdi
shlq $0x5, %rdi
cmpq $0x1c00000, %rdi # imm = 0x1C00000
jb 0x143706a
leaq 0x50(%rbx), %rsi
callq 0x1ee665d
jmp 0x1437074
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, 0x68(%rbx)
cmpq $0x0, 0x58(%rbx)
je 0x14370af
movl $0x10, %eax
xorl %ecx, %ecx
movq 0x68(%rbx), %rdx
vmovaps -0x10(%r14,%rax), %xmm0
vmovaps %xmm0, -0x10(%rdx,%rax)
vmovaps (%r14,%rax), %xmm0
vmovaps %xmm0, (%rdx,%rax)
incq %rcx
addq $0x20, %rax
cmpq 0x58(%rbx), %rcx
jb 0x1437086
movq 0x60(%rbx), %r15
testq %r14, %r14
je 0x14370de
movq %r15, %rsi
shlq $0x5, %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x14370d6
movzbl 0x50(%rbx), %edx
movq %r14, %rdi
callq 0x1ee67ca
jmp 0x14370de
movq %r14, %rdi
callq 0x1ee612d
testq %r15, %r15
je 0x14370fb
movq 0x48(%rbx), %rdi
shlq $0x5, %r15
negq %r15
movq (%rdi), %rax
movq %r15, %rsi
movl $0x1, %edx
callq *(%rax)
movq $0x0, 0x58(%rbx)
movq %r12, 0x60(%rbx)
movq 0x10(%rbx), %rdi
vbroadcastss 0xab490c(%rip), %xmm0 # 0x1eeba20
leaq 0x70(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vbroadcastss 0xab5a5e(%rip), %xmm1 # 0x1eecb84
vmovaps %xmm1, 0x10(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovaps %xmm1, 0x30(%rdx)
movl $0x8, %esi
xorl %ecx, %ecx
callq 0xb5ccb2
addq $0x4c88, %rsp # imm = 0x4C88
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
movq 0x70(%rsp), %rdi
cmpq %r13, %rdi
je 0x143716a
callq 0x6a4f0
jmp 0x143716a
movq %rax, %rbx
movq 0x30(%rsp), %rdi
cmpq %r15, %rdi
je 0x143717e
callq 0x6a4f0
jmp 0x143717e
movq %rax, %rbx
movq 0x10(%rsp), %rdi
cmpq %r12, %rdi
je 0x143718d
callq 0x6a4f0
movq %rbx, %rdi
callq 0x6a600
nop
|
/embree[P]embree/kernels/bvh/bvh_builder_subdiv.cpp
|
embree::BVHNodeRecordMB4D<embree::NodeRefPtr<4>> const embree::avx::BVHBuilderMSMBlur::build<embree::NodeRefPtr<4>, embree::avx::SubdivRecalculatePrimRef, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface>(embree::vector_t<embree::PrimRefMB, embree::aligned_monitored_allocator<embree::PrimRefMB, 16ul>>&, embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>> const&, embree::MemoryMonitorInterface*, embree::avx::SubdivRecalculatePrimRef, embree::BVHN<4>::CreateAlloc, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Create, embree::AABBNodeMB4D_t<embree::NodeRefPtr<4>, 4>::Set, embree::avx::BVHNSubdivPatch1MBlurBuilderSAH<4>::rebuild(unsigned long, embree::ParallelForForPrefixSumState<embree::PrimInfoMBT<embree::LBBox<embree::Vec3fa>>>&)::'lambda'(embree::avx::BVHBuilderMSMBlur::BuildRecord const&, embree::FastAllocator::CachedAllocator const&), embree::Scene::BuildProgressMonitorInterface, embree::avx::BVHBuilderMSMBlur::Settings const&)
|
static const BVHNodeRecordMB4D<NodeRef> build(mvector<PrimRefMB>& prims,
const PrimInfoMB& pinfo,
MemoryMonitorInterface* device,
const RecalculatePrimRef recalculatePrimRef,
const CreateAllocFunc createAlloc,
const CreateNodeFunc createNode,
const SetNodeFunc setNode,
const CreateLeafFunc createLeaf,
const ProgressMonitorFunc progressMonitor,
const Settings& settings)
{
typedef BuilderT<
NodeRef,
RecalculatePrimRef,
decltype(createAlloc()),
CreateAllocFunc,
CreateNodeFunc,
SetNodeFunc,
CreateLeafFunc,
ProgressMonitorFunc> Builder;
Builder builder(device,
recalculatePrimRef,
createAlloc,
createNode,
setNode,
createLeaf,
progressMonitor,
settings);
return builder(prims,pinfo);
}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x1a8, %rsp # imm = 0x1A8
movq %r9, %r10
movq %r8, %rax
movq %rdx, %r15
movq %rsi, %r14
movq %rdi, %rbx
movq 0x1d0(%rsp), %r8
movq 0x1f0(%rsp), %rdx
movq 0x1e8(%rsp), %rsi
leaq 0xcca32a(%rip), %rdi # 0x2102970
leaq 0x40(%rsp), %r9
movq %rdi, (%r9)
movq 0x8(%rsi), %rsi
movq %rsi, 0x8(%r9)
vmovups 0x1d8(%rsp), %xmm0
vmovups %xmm0, (%rsp)
movq %rdx, 0x10(%rsp)
leaq 0x50(%rsp), %r12
movq %r12, %rdi
movq %rcx, %rsi
movq %rax, %rdx
movq %r10, %rcx
callq 0x143a0ac
vmovups (%r15), %ymm0
vmovups 0x20(%r15), %ymm1
vmovups 0x40(%r15), %ymm2
vmovups 0x70(%r15), %ymm3
leaq 0xf0(%rsp), %rdx
vmovups %ymm3, 0x80(%rdx)
movq $0x1, (%rdx)
vmovups %ymm0, 0x10(%rdx)
vmovups %ymm1, 0x30(%rdx)
vmovups %ymm2, 0x50(%rdx)
vmovaps 0x60(%r15), %xmm0
vmovaps %xmm0, 0x70(%rdx)
movq %r14, 0xa0(%rdx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x0, 0x30(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmovups %xmm0, (%rsp)
movq %rbx, %rdi
movq %r12, %rsi
movl $0x1, %ecx
vzeroupper
callq 0x143a1da
mfence
movq %rbx, %rax
addq $0x1a8, %rsp # imm = 0x1A8
popq %rbx
popq %r12
popq %r14
popq %r15
retq
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_msmblur.h
|
embree::avx::HeuristicMBlurTemporalSplit<embree::PrimRefMB, embree::avx::SubdivRecalculatePrimRef, 2ul>::TemporalBinInfo::merge2(embree::avx::HeuristicMBlurTemporalSplit<embree::PrimRefMB, embree::avx::SubdivRecalculatePrimRef, 2ul>::TemporalBinInfo const&, embree::avx::HeuristicMBlurTemporalSplit<embree::PrimRefMB, embree::avx::SubdivRecalculatePrimRef, 2ul>::TemporalBinInfo const&)
|
static __forceinline const TemporalBinInfo merge2(const TemporalBinInfo& a, const TemporalBinInfo& b) {
TemporalBinInfo r = a; r.merge(b); return r;
}
|
movq %rdi, %rax
vmovaps (%rsi), %xmm0
vmovaps %xmm0, (%rdi)
vmovaps 0x10(%rsi), %xmm0
vmovaps %xmm0, 0x10(%rdi)
vmovaps 0x20(%rsi), %xmm1
vmovaps %xmm1, 0x20(%rdi)
vmovaps 0x30(%rsi), %xmm2
vmovaps %xmm2, 0x30(%rdi)
vmovaps 0x40(%rsi), %xmm2
vmovaps %xmm2, 0x40(%rdi)
vmovaps 0x50(%rsi), %xmm2
vmovaps %xmm2, 0x50(%rdi)
vmovaps 0x60(%rsi), %xmm3
vmovaps %xmm3, 0x60(%rdi)
vmovaps 0x70(%rsi), %xmm3
vmovaps %xmm3, 0x70(%rdi)
vmovaps 0x80(%rsi), %xmm3
vmovaps %xmm3, 0x80(%rdi)
vmovdqa (%rdi), %xmm3
vpaddq (%rdx), %xmm3, %xmm3
vmovdqa %xmm3, (%rdi)
vminps 0x10(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x10(%rdi)
vmaxps 0x20(%rdx), %xmm1, %xmm0
vmovaps %xmm0, 0x20(%rdi)
vmovaps 0x30(%rdi), %xmm0
vminps 0x30(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x30(%rdi)
vmovaps 0x40(%rdi), %xmm0
vmaxps 0x40(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x40(%rdi)
vminps 0x50(%rdx), %xmm2, %xmm0
vmovaps %xmm0, 0x50(%rdi)
vmovaps 0x60(%rdi), %xmm0
vmaxps 0x60(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x60(%rdi)
vmovaps 0x70(%rdi), %xmm0
vminps 0x70(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x70(%rdi)
vmovaps 0x80(%rdi), %xmm0
vmaxps 0x80(%rdx), %xmm0, %xmm0
vmovaps %xmm0, 0x80(%rdi)
retq
|
/embree[P]embree/kernels/bvh/../builders/heuristic_timesplit_array.h
|
unsigned long embree::parallel_filter<embree::PrimRefMB, unsigned long, embree::avx::HeuristicMBlurTemporalSplit<embree::PrimRefMB, embree::avx::SubdivRecalculatePrimRef, 2ul>::split(embree::avx::BinSplit<32ul> const&, embree::SetMB const&, embree::SetMB&, embree::SetMB&)::'lambda0'(embree::PrimRefMB const&)>(embree::PrimRefMB*, unsigned long, unsigned long, unsigned long, embree::avx::HeuristicMBlurTemporalSplit<embree::PrimRefMB, embree::avx::SubdivRecalculatePrimRef, 2ul>::split(embree::avx::BinSplit<32ul> const&, embree::SetMB const&, embree::SetMB&, embree::SetMB&)::'lambda0'(embree::PrimRefMB const&) const&)
|
inline Index parallel_filter( Ty* data, const Index begin, const Index end, const Index minStepSize, const Predicate& predicate)
{
/* sequential fallback */
if (end-begin <= minStepSize)
return sequential_filter(data,begin,end,predicate);
/* calculate number of tasks to use */
enum { MAX_TASKS = 64 };
const Index numThreads = TaskScheduler::threadCount();
const Index numBlocks = (end-begin+minStepSize-1)/minStepSize;
const Index taskCount = min(numThreads,numBlocks,(Index)MAX_TASKS);
/* filter blocks */
Index nused[MAX_TASKS];
Index nfree[MAX_TASKS];
parallel_for(taskCount, [&](const Index taskIndex)
{
const Index i0 = begin+(taskIndex+0)*(end-begin)/taskCount;
const Index i1 = begin+(taskIndex+1)*(end-begin)/taskCount;
const Index i2 = sequential_filter(data,i0,i1,predicate);
nused[taskIndex] = i2-i0;
nfree[taskIndex] = i1-i2;
});
/* calculate offsets */
Index sused=0;
Index sfree=0;
Index pfree[MAX_TASKS];
for (Index i=0; i<taskCount; i++)
{
sused+=nused[i];
Index cfree = nfree[i]; pfree[i] = sfree; sfree+=cfree;
}
/* return if we did not filter out any element */
assert(sfree <= end-begin);
assert(sused <= end-begin);
if (sused == end-begin)
return end;
/* otherwise we have to copy misplaced elements around */
parallel_for(taskCount, [&](const Index taskIndex)
{
/* destination to write elements to */
Index dst = begin+(taskIndex+0)*(end-begin)/taskCount+nused[taskIndex];
Index dst_end = min(dst+nfree[taskIndex],begin+sused);
if (dst_end <= dst) return;
/* range of misplaced elements to copy to destination */
Index r0 = pfree[taskIndex];
Index r1 = r0+dst_end-dst;
/* find range in misplaced elements in back to front order */
Index k0=0;
for (Index i=taskCount-1; i>0; i--)
{
if (k0 > r1) break;
Index k1 = k0+nused[i];
Index src = begin+(i+0)*(end-begin)/taskCount+nused[i];
for (Index i=max(r0,k0); i<min(r1,k1); i++) {
Index isrc = src-i+k0-1;
assert(dst >= begin && dst < end);
assert(isrc >= begin && isrc < end);
data[dst++] = data[isrc];
}
k0 = k1;
}
});
return begin+sused;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x708, %rsp # imm = 0x708
movq %r8, %rbx
movq %rdi, 0x28(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdx, 0x20(%rsp)
movq %rdx, %rax
subq %rsi, %rax
cmpq %rcx, %rax
jbe 0x1441e0b
movq %rcx, %r14
xorl %edi, %edi
callq 0x6ab80
movslq %eax, %rcx
leaq 0x10(%rsp), %rsi
movq (%rsi), %rax
leaq 0x20(%rsp), %r13
movq (%r13), %rdx
addq %r14, %rdx
notq %rax
addq %rdx, %rax
xorl %edx, %edx
divq %r14
cmpq %rax, %rcx
cmovaeq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r14d
cmovbq %rcx, %r14
leaq 0x30(%rsp), %rbp
movq %r14, (%rbp)
leaq 0x80(%rsp), %r12
movq %rsi, (%r12)
movq %r13, 0x8(%r12)
movq %rbp, 0x10(%r12)
leaq 0x28(%rsp), %rax
movq %rax, 0x18(%r12)
movq %rbx, 0x20(%r12)
leaq 0x500(%rsp), %rax
movq %rax, 0x28(%r12)
leaq 0x300(%rsp), %rax
movq %rax, 0x30(%r12)
leaq 0x100(%rsp), %r15
movw $0x401, 0xc(%r15) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r15)
movq $0x8, 0x40(%r15)
movq %r15, %rdi
callq 0x6a660
leaq 0x40(%rsp), %rbx
movq %r12, (%rbx)
leaq 0x38(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r14, %rsi
movq %rbx, %rcx
movq %r15, %r9
callq 0x1445214
leaq 0x100(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1441f91
leaq 0x100(%rsp), %r15
movq %r15, %rdi
callq 0x6aab0
movq $0x0, 0x18(%rsp)
movq 0x30(%rsp), %r14
testq %r14, %r14
je 0x1441ea8
xorl %ecx, %ecx
xorl %edx, %edx
xorl %eax, %eax
addq 0x500(%rsp,%rcx,8), %rax
movq %rdx, 0x100(%rsp,%rcx,8)
addq 0x300(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %r14
jne 0x1441de6
jmp 0x1441eaa
cmpq %rsi, %rdx
jbe 0x1441f7c
leaq (%rsi,%rsi,4), %rcx
shlq $0x4, %rcx
addq %rdi, %rcx
addq $0x44, %rcx
vmovss 0xabd239(%rip), %xmm0 # 0x1eff064
vmovss 0xabd235(%rip), %xmm1 # 0x1eff068
movq (%rbx), %rdx
vmovss (%rcx), %xmm2
vmulss %xmm0, %xmm2, %xmm3
vmovss (%rdx), %xmm4
vucomiss %xmm3, %xmm4
jae 0x1441e9a
vmovss -0x4(%rcx), %xmm3
vmulss %xmm1, %xmm3, %xmm4
vucomiss 0x4(%rdx), %xmm4
jae 0x1441e9a
leaq (%rsi,%rsi,4), %rdx
incq %rsi
shlq $0x4, %rdx
vmovaps -0x44(%rcx), %xmm4
vmovaps %xmm4, (%rdi,%rdx)
vmovaps -0x34(%rcx), %xmm4
vmovaps %xmm4, 0x10(%rdi,%rdx)
vmovaps -0x24(%rcx), %xmm4
vmovaps %xmm4, 0x20(%rdi,%rdx)
vmovaps -0x14(%rcx), %xmm4
vmovaps %xmm4, 0x30(%rdi,%rdx)
vmovss %xmm3, 0x40(%rdi,%rdx)
vmovss %xmm2, 0x44(%rdi,%rdx)
addq $0x50, %rcx
decq %rax
jne 0x1441e33
jmp 0x1441f7c
xorl %eax, %eax
movq %rax, 0x18(%rsp)
movq 0x20(%rsp), %rsi
movq %rsi, %rcx
subq 0x10(%rsp), %rcx
cmpq %rcx, %rax
je 0x1441f7c
leaq 0x10(%rsp), %rax
movq %rax, 0x40(%rsp)
movq %r13, 0x48(%rsp)
movq %rbp, 0x50(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0x58(%rsp)
leaq 0x300(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x68(%rsp)
movq %r15, 0x70(%rsp)
leaq 0x28(%rsp), %rax
movq %rax, 0x78(%rsp)
leaq 0x80(%rsp), %r15
movw $0x401, 0xc(%r15) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r15)
movq $0x8, 0x40(%r15)
movq %r15, %rdi
callq 0x6a660
leaq 0x38(%rsp), %rcx
movq %rbx, (%rcx)
leaq 0xf(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r14, %rsi
movq %r15, %r9
callq 0x1445aa8
leaq 0x80(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1441fc3
leaq 0x80(%rsp), %rdi
callq 0x6aab0
movq 0x18(%rsp), %rsi
addq 0x10(%rsp), %rsi
movq %rsi, %rax
addq $0x708, %rsp # imm = 0x708
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xaa9b4b(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xce2c54(%rip), %rsi # 0x2124c08
movq 0xce2a0d(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xaa9b19(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xce2c22(%rip), %rsi # 0x2124c08
movq 0xce29db(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1442016
jmp 0x1442043
jmp 0x1442013
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x144202e
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0x6aab0
jmp 0x144203b
jmp 0x1442043
jmp 0x1442043
jmp 0x144202b
movq %rax, %rbx
leaq 0x100(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
nop
|
/embree[P]embree/kernels/bvh/../builders/../../common/algorithms/parallel_filter.h
|
embree::avx::BVH4Triangle4iMeshBuilderMortonGeneral(void*, embree::TriangleMesh*, unsigned int, unsigned long)
|
Builder* BVH4Triangle4iMeshBuilderMortonGeneral (void* bvh, TriangleMesh* mesh, unsigned int geomID, size_t mode) { return new class BVHNMeshBuilderMorton<4,TriangleMesh,Triangle4i>((BVH4*)bvh,mesh,geomID,4,4); }
|
pushq %r15
pushq %r14
pushq %rbx
movl %edx, %ebx
movq %rsi, %r14
movq %rdi, %r15
movl $0x78, %edi
callq 0x6a170
xorl %ecx, %ecx
movq %rcx, 0x8(%rax)
leaq 0xcd3930(%rip), %rdx # 0x211b1d8
movq %rdx, (%rax)
movq %r15, 0x10(%rax)
movq %r14, 0x18(%rax)
movq 0x60(%r15), %rdx
leaq 0x550(%rdx), %rsi
testq %rdx, %rdx
cmoveq %rdx, %rsi
movq %rsi, 0x20(%rax)
movb $0x0, 0x28(%rax)
movq %rcx, 0x40(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x30(%rax)
vmovaps 0xb06d3e(%rip), %ymm0 # 0x1f4e620
vmovups %ymm0, 0x48(%rax)
movq $0x400, 0x68(%rax) # imm = 0x400
movl %ebx, 0x70(%rax)
movl $0x0, 0x74(%rax)
popq %rbx
popq %r14
popq %r15
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_builder_morton.cpp
|
embree::avx::BVHNMeshBuilderMorton<8, embree::QuadMesh, embree::QuadMv<4>>::build()
|
void build()
{
/* we reset the allocator when the mesh size changed */
if (mesh->numPrimitives != numPreviousPrimitives) {
bvh->alloc.clear();
morton.clear();
}
size_t numPrimitives = mesh->size();
numPreviousPrimitives = numPrimitives;
/* skip build for empty scene */
if (numPrimitives == 0) {
bvh->set(BVH::emptyNode,empty,0);
return;
}
/* preallocate arrays */
morton.resize(numPrimitives);
size_t bytesEstimated = numPrimitives*sizeof(AABBNode)/(4*N) + size_t(1.2f*Primitive::blocks(numPrimitives)*sizeof(Primitive));
size_t bytesMortonCodes = numPrimitives*sizeof(BVHBuilderMorton::BuildPrim);
bytesEstimated = max(bytesEstimated,bytesMortonCodes); // the first allocation block is reused to sort the morton codes
bvh->alloc.init(bytesMortonCodes,bytesMortonCodes,bytesEstimated);
/* create morton code array */
BVHBuilderMorton::BuildPrim* dest = (BVHBuilderMorton::BuildPrim*) bvh->alloc.specialAlloc(bytesMortonCodes);
size_t numPrimitivesGen = createMortonCodeArray<Mesh>(mesh,morton,bvh->scene->progressInterface);
/* create BVH */
SetBVHNBounds<N> setBounds(bvh);
CreateMortonLeaf<N,Primitive> createLeaf(mesh,geomID_,morton.data());
CalculateMeshBounds<Mesh> calculateBounds(mesh);
auto root = BVHBuilderMorton::build<NodeRecord>(
typename BVH::CreateAlloc(bvh),
typename BVH::AABBNode::Create(),
setBounds,createLeaf,calculateBounds,bvh->scene->progressInterface,
morton.data(),dest,numPrimitivesGen,settings);
bvh->set(root.ref,LBBox3fa(root.bounds),numPrimitives);
#if ROTATE_TREE
if (N == 4)
{
for (int i=0; i<ROTATE_TREE; i++)
BVHNRotate<N>::rotate(bvh->root);
bvh->clearBarrier(bvh->root);
}
#endif
/* clear temporary data for static geometry */
if (bvh->scene->isStaticAccel()) {
morton.clear();
}
bvh->cleanup();
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xe8, %rsp
movq %rdi, %rbx
movq 0x18(%rdi), %rax
movl 0x20(%rax), %eax
cmpl 0x74(%rdi), %eax
je 0x146522e
movq 0x10(%rbx), %r15
leaq 0x78(%r15), %r14
movq %r14, %rdi
callq 0x90af76
movq 0x1a8(%r15), %r12
movq 0x1b0(%r15), %r13
cmpq %r13, %r12
je 0x146508f
movq (%r12), %rdi
movq %r14, %rsi
callq 0x90afd2
addq $0x8, %r12
jmp 0x1465078
movq 0x1a8(%r15), %rax
cmpq %rax, 0x1b0(%r15)
je 0x14650a6
movq %rax, 0x1b0(%r15)
xorl %eax, %eax
xchgq %rax, 0x190(%r15)
xorl %eax, %eax
xchgq %rax, 0x198(%r15)
xorl %eax, %eax
xchgq %rax, 0x1a0(%r15)
movq 0x170(%r15), %rax
testq %rax, %rax
je 0x14650f6
movq 0x170(%r15), %rdi
movq 0x78(%r15), %r14
movzbl 0x180(%r15), %ebp
movq 0x18(%rdi), %r12
movq %r14, %rsi
movl %ebp, %edx
callq 0x90b55c
movq %r12, %rdi
testq %r12, %r12
jne 0x14650e0
xorl %eax, %eax
xchgq %rax, 0x170(%r15)
movq 0x178(%r15), %rax
testq %rax, %rax
je 0x1465134
movq 0x178(%r15), %rdi
movq 0x78(%r15), %r14
movzbl 0x180(%r15), %ebp
movq 0x18(%rdi), %r12
movq %r14, %rsi
movl %ebp, %edx
callq 0x90b55c
movq %r12, %rdi
testq %r12, %r12
jne 0x146511e
xorl %eax, %eax
xchgq %rax, 0x178(%r15)
movq $-0x8, %rax
xorl %ecx, %ecx
xchgq %rcx, 0x130(%r15,%rax,8)
xorl %ecx, %ecx
xchgq %rcx, 0x170(%r15,%rax,8)
incq %rax
jne 0x1465144
movq 0x1e0(%r15), %r14
movq 0x1e8(%r15), %rdi
testq %rdi, %rdi
je 0x1465194
movq %r14, %rsi
shlq $0x5, %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x146518f
movzbl 0x1d0(%r15), %edx
callq 0x1ee67ca
jmp 0x1465194
callq 0x1ee612d
leaq 0x1d8(%r15), %r12
testq %r14, %r14
je 0x14651bb
movq 0x1c8(%r15), %rdi
shlq $0x5, %r14
negq %r14
movq (%rdi), %rax
movq %r14, %rsi
movl $0x1, %edx
callq *(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%r12)
movq $0x0, 0x10(%r12)
movq 0x38(%rbx), %r14
movq 0x40(%rbx), %rdi
testq %rdi, %rdi
je 0x14651fc
leaq (,%r14,8), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x14651f7
movzbl 0x28(%rbx), %edx
callq 0x1ee67ca
jmp 0x14651fc
callq 0x1ee612d
leaq 0x30(%rbx), %r15
testq %r14, %r14
je 0x146521d
movq 0x20(%rbx), %rdi
shlq $0x3, %r14
negq %r14
movq (%rdi), %rax
movq %r14, %rsi
movl $0x1, %edx
callq *(%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, (%r15)
movq $0x0, 0x10(%r15)
movq 0x18(%rbx), %rax
movl 0x20(%rax), %r14d
movl %r14d, 0x74(%rbx)
testq %r14, %r14
je 0x1465291
movq 0x38(%rbx), %rax
cmpq %r14, %rax
jae 0x146526a
movq %r14, %rbp
testq %rax, %rax
je 0x146526d
cmpq %r14, %rax
jae 0x146526a
movq %rax, %rbp
addq %rbp, %rbp
cmpq $0x1, %rbp
adcq $0x0, %rbp
cmpq %r14, %rbp
jb 0x1465258
jmp 0x146526d
movq %rax, %rbp
leaq 0x30(%rbx), %rax
cmpq %r14, 0x30(%rbx)
jbe 0x146527a
movq %r14, (%rax)
leaq 0x20(%rbx), %r15
cmpq %rbp, 0x38(%rbx)
movq %rax, 0x8(%rsp)
jne 0x14652d0
movq %r14, (%rax)
jmp 0x1465387
movq 0x10(%rbx), %rdi
vbroadcastss 0xa86782(%rip), %xmm0 # 0x1eeba20
leaq 0x50(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vbroadcastss 0xa878d4(%rip), %xmm1 # 0x1eecb84
vmovaps %xmm1, 0x10(%rdx)
vmovaps %xmm0, 0x20(%rdx)
vmovaps %xmm1, 0x30(%rdx)
movl $0x8, %esi
xorl %ecx, %ecx
callq 0x14ce916
jmp 0x1465576
movq 0x40(%rbx), %r12
testq %rbp, %rbp
je 0x14652eb
movq (%r15), %rdi
leaq (,%rbp,8), %rsi
movq (%rdi), %rax
xorl %edx, %edx
callq *(%rax)
leaq (,%rbp,8), %rdi
cmpq $0x1c00000, %rdi # imm = 0x1C00000
jb 0x1465307
leaq 0x28(%rbx), %rsi
callq 0x1ee665d
jmp 0x1465311
movl $0x8, %esi
callq 0x1ee60ac
movq %rax, 0x40(%rbx)
cmpq $0x0, 0x30(%rbx)
je 0x1465333
xorl %eax, %eax
movq 0x40(%rbx), %rcx
movq (%r12,%rax,8), %rdx
movq %rdx, (%rcx,%rax,8)
incq %rax
cmpq 0x30(%rbx), %rax
jb 0x146531e
movq 0x38(%rbx), %r13
testq %r12, %r12
je 0x1465363
leaq (,%r13,8), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x146535b
movzbl 0x28(%rbx), %edx
movq %r12, %rdi
callq 0x1ee67ca
jmp 0x1465363
movq %r12, %rdi
callq 0x1ee612d
testq %r13, %r13
je 0x146537f
movq (%r15), %rdi
shlq $0x3, %r13
negq %r13
movq (%rdi), %rax
movq %r13, %rsi
movl $0x1, %edx
callq *(%rax)
movq %r14, 0x30(%rbx)
movq %rbp, 0x38(%rbx)
leaq 0x3(%r14), %rax
shrq $0x2, %rax
vcvtsi2ss %eax, %xmm1, %xmm0
vmulss 0xa9bda1(%rip), %xmm0, %xmm0 # 0x1f0113c
vmulss 0xa9bda5(%rip), %xmm0, %xmm0 # 0x1f01148
leaq (,%r14,8), %rdx
vcvttss2si %xmm0, %rax
movq %rax, %rcx
vsubss 0xa86a89(%rip), %xmm0, %xmm0 # 0x1eebe44
vcvttss2si %xmm0, %rsi
sarq $0x3f, %rcx
andq %rcx, %rsi
orq %rax, %rsi
leaq (%rsi,%r14,8), %rcx
cmpq %rdx, %rcx
cmovbeq %rdx, %rcx
movq 0x10(%rbx), %rdi
addq $0x78, %rdi
movq %rdx, %rsi
callq 0xbbf3b4
movq 0x10(%rbx), %rax
movq 0x178(%rax), %rax
movq (%rax), %rcx
leaq (%rax,%rcx), %r12
addq $0x40, %r12
movq 0x10(%rbx), %rax
movq 0x18(%rbx), %rdi
movl $0x260, %edx # imm = 0x260
addq 0x68(%rax), %rdx
movq %r15, %rsi
callq 0x149394f
movq 0x10(%rbx), %rcx
movq 0x18(%rbx), %rsi
movl 0x70(%rbx), %edi
movq 0x40(%rbx), %rdx
leaq 0x78(%rcx), %r8
movq 0x68(%rcx), %r9
leaq 0xc9d53e(%rip), %r10 # 0x2102970
leaq 0x28(%rsp), %r11
movq %r10, (%r11)
movq 0x268(%r9), %r9
movq %r9, 0x8(%r11)
leaq 0x38(%rsp), %r9
movq %rsi, (%r9)
movq %rdx, 0x8(%r9)
movl %edi, 0x10(%r9)
leaq 0x20(%rsp), %rdi
movq %r8, (%rdi)
leaq 0x18(%rsp), %r8
movq %rcx, (%r8)
leaq 0x10(%rsp), %rcx
movq %rsi, (%rcx)
movq 0x68(%rbx), %rsi
leaq 0x50(%rsp), %r13
movq %rsi, 0x20(%r13)
vmovups 0x48(%rbx), %ymm0
vmovups %ymm0, (%r13)
movq %rdi, 0x28(%r13)
leaq 0x7(%rsp), %rsi
movq %rsi, 0x30(%r13)
movq %r8, 0x38(%r13)
movq %r9, 0x40(%r13)
movq %rcx, 0x48(%r13)
movq %r11, 0x50(%r13)
movq $0x0, 0x58(%r13)
leaq 0xb0(%rsp), %rbp
movq %rbp, %rdi
movq %r13, %rsi
movq %r12, %rcx
movq %rax, %r8
vzeroupper
callq 0x14655f4
movq 0x10(%rbx), %rdi
movq (%rbp), %rsi
vmovaps 0x10(%rbp), %xmm0
vmovaps 0x20(%rbp), %xmm1
vmovaps %xmm0, (%r13)
vmovaps %xmm1, 0x10(%r13)
vmovaps %xmm0, 0x20(%r13)
vmovaps %xmm1, 0x30(%r13)
leaq 0x50(%rsp), %rdx
movq %r14, %rcx
callq 0x14ce916
movq 0x10(%rbx), %rax
movq 0x68(%rax), %rax
testb $0x1, 0x238(%rax)
jne 0x146556d
movq 0x38(%rbx), %r14
movq 0x40(%rbx), %rdi
testq %rdi, %rdi
je 0x146553c
leaq (,%r14,8), %rsi
cmpq $0x1c00000, %rsi # imm = 0x1C00000
jb 0x1465537
movzbl 0x28(%rbx), %edx
callq 0x1ee67ca
jmp 0x146553c
callq 0x1ee612d
testq %r14, %r14
je 0x1465558
movq (%r15), %rdi
shlq $0x3, %r14
negq %r14
movq (%rdi), %rax
movq %r14, %rsi
movl $0x1, %edx
callq *(%rax)
vxorps %xmm0, %xmm0, %xmm0
movq 0x8(%rsp), %rax
vmovups %xmm0, (%rax)
movq $0x0, 0x10(%rax)
movq 0x10(%rbx), %rdi
callq 0x1242658
addq $0xe8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/bvh_builder_morton.cpp
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<4>>, embree::FastAllocator::CachedAllocator, embree::BVHN<4>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Create, embree::avx::SetBVHNBounds<4>, embree::avx::CreateMortonLeaf<4, embree::Object>, embree::avx::CalculateMeshBounds<embree::UserGeometry>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(unsigned long, embree::range<unsigned int> const&, embree::FastAllocator::CachedAllocator)
|
ReductionTy createLargeLeaf(size_t depth, const range<unsigned>& current, Allocator alloc)
{
/* this should never occur but is a fatal error */
if (depth > maxDepth)
throw_RTCError(RTC_ERROR_UNKNOWN,"depth limit reached");
/* create leaf for few primitives */
if (current.size() <= maxLeafSize)
return createLeaf(current,alloc);
/* fill all children by always splitting the largest one */
range<unsigned> children[MAX_BRANCHING_FACTOR];
size_t numChildren = 1;
children[0] = current;
do {
/* find best child with largest number of primitives */
size_t bestChild = -1;
size_t bestSize = 0;
for (size_t i=0; i<numChildren; i++)
{
/* ignore leaves as they cannot get split */
if (children[i].size() <= maxLeafSize)
continue;
/* remember child with largest size */
if (children[i].size() > bestSize) {
bestSize = children[i].size();
bestChild = i;
}
}
if (bestChild == size_t(-1)) break;
/*! split best child into left and right child */
auto split = children[bestChild].split();
/* add new children left and right */
children[bestChild] = children[numChildren-1];
children[numChildren-1] = split.first;
children[numChildren+0] = split.second;
numChildren++;
} while (numChildren < branchingFactor);
/* create node */
auto node = createNode(alloc,numChildren);
/* recurse into each child */
ReductionTy bounds[MAX_BRANCHING_FACTOR];
for (size_t i=0; i<numChildren; i++)
bounds[i] = createLargeLeaf(depth+1,children[i],alloc);
return setBounds(node,bounds,numChildren);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x258, %rsp # imm = 0x258
cmpq %rdx, 0x8(%rsi)
jb 0x146a11e
movq %rsi, %r12
movq %rdi, %r15
leaq 0x290(%rsp), %rbx
movl 0x4(%rcx), %esi
subl (%rcx), %esi
movq 0x18(%r12), %rax
cmpq %rsi, %rax
jae 0x1469daa
movq %rdx, 0x40(%rsp)
movq (%rcx), %rcx
movq %rcx, 0x90(%rsp)
movq (%r12), %rcx
movl $0x1, %r14d
testq %r14, %r14
je 0x1469c7e
movq $-0x1, %rdi
xorl %esi, %esi
xorl %r8d, %r8d
movl 0x94(%rsp,%rsi,8), %edx
subl 0x90(%rsp,%rsi,8), %edx
movl %edx, %r9d
cmpq %r9, %rax
setb %r10b
cmpq %r9, %r8
setb %r11b
movq %rsi, %rdx
testb %r11b, %r10b
jne 0x1469c6e
movq %rdi, %rdx
movq %r8, %r9
incq %rsi
movq %r9, %r8
movq %rdx, %rdi
cmpq %rsi, %r14
jne 0x1469c41
jmp 0x1469c85
movq $-0x1, %rdx
cmpq $-0x1, %rdx
je 0x1469cd1
movl 0x90(%rsp,%rdx,8), %esi
movl 0x94(%rsp,%rdx,8), %edi
leal (%rdi,%rsi), %r8d
shrl %r8d
shlq $0x20, %rdi
orq %r8, %rdi
shlq $0x20, %r8
orq %rsi, %r8
movq 0x88(%rsp,%r14,8), %rsi
movq %rsi, 0x90(%rsp,%rdx,8)
movq %r8, 0x88(%rsp,%r14,8)
movq %rdi, 0x90(%rsp,%r14,8)
incq %r14
cmpq $-0x1, %rdx
je 0x1469ce0
cmpq %rcx, %r14
jb 0x1469c30
movq (%rbx), %rbp
movq 0x8(%rbx), %rbx
movq $0x80, 0x18(%rsp)
movq (%rbx), %r13
movq 0x8(%r13), %rax
cmpq %rax, %rbp
je 0x1469e5a
movq %r13, 0x60(%rsp)
movb $0x1, 0x68(%rsp)
movq %r13, %rdi
callq 0x1ee7bb6
movq 0x8(%r13), %rax
testq %rax, %rax
je 0x1469d6e
movq 0xa8(%r13), %rax
addq 0x68(%r13), %rax
movq 0x8(%r13), %rcx
lock
addq %rax, 0x118(%rcx)
movq 0x58(%r13), %rax
addq 0x98(%r13), %rax
movq 0x50(%r13), %rcx
addq 0x90(%r13), %rcx
subq %rcx, %rax
movq 0x8(%r13), %rcx
lock
addq %rax, 0x120(%rcx)
movq 0xb0(%r13), %rax
addq 0x70(%r13), %rax
movq 0x8(%r13), %rcx
lock
addq %rax, 0x128(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, 0x58(%r13)
vmovups %ymm0, 0x48(%r13)
testq %rbp, %rbp
je 0x1469dc2
movq 0x10(%rbp), %rax
movq %rax, 0x60(%r13)
vmovups %ymm0, 0x88(%r13)
vmovups %ymm0, 0x98(%r13)
movq 0x10(%rbp), %rax
movq %rax, 0xa0(%r13)
jmp 0x1469dd4
movq 0x40(%r12), %rsi
movq %r15, %rdi
movq %rcx, %rdx
movq %rbx, %rcx
callq 0x146a288
jmp 0x146a073
vmovups %ymm0, 0x98(%r13)
vmovups %ymm0, 0x88(%r13)
movq %rbp, %rax
xchgq %rax, 0x8(%r13)
movq %r13, 0x38(%rsp)
leaq 0xcbca91(%rip), %rdi # 0x2126878
movq %rdi, 0xd0(%rsp)
movb $0x1, 0xd8(%rsp)
vzeroupper
callq 0x1ee7bb6
movq 0x138(%rbp), %rsi
cmpq 0x140(%rbp), %rsi
je 0x1469e21
movq 0x38(%rsp), %rax
movq %rax, (%rsi)
addq $0x8, 0x138(%rbp)
jmp 0x1469e32
leaq 0x130(%rbp), %rdi
leaq 0x38(%rsp), %rdx
callq 0x90b95a
cmpb $0x1, 0xd8(%rsp)
jne 0x1469e49
movq 0xd0(%rsp), %rdi
callq 0x1ee7c24
cmpb $0x1, 0x68(%rsp)
jne 0x1469e5a
movq 0x60(%rsp), %rdi
callq 0x1ee7c24
movq 0x18(%rsp), %rax
addq %rax, 0x28(%rbx)
movq 0x10(%rbx), %rcx
movl %ecx, %edx
negl %edx
andl $0xf, %edx
leaq (%rcx,%rax), %r13
addq %rdx, %r13
movq %r13, 0x10(%rbx)
cmpq 0x18(%rbx), %r13
movq %r15, 0x58(%rsp)
ja 0x146a08b
addq %rdx, 0x30(%rbx)
subq %rax, %r13
addq 0x8(%rbx), %r13
vbroadcastss 0xa81b84(%rip), %xmm2 # 0x1eeba20
vmovaps %xmm2, 0x60(%r13)
vmovaps %xmm2, 0x40(%r13)
vmovaps %xmm2, 0x20(%r13)
vbroadcastss 0xa82ccd(%rip), %xmm3 # 0x1eecb84
vmovaps %xmm3, 0x70(%r13)
vmovaps %xmm3, 0x50(%r13)
vmovaps %xmm3, 0x30(%r13)
vbroadcastsd 0xab7fa6(%rip), %ymm0 # 0x1f21e78
vmovups %ymm0, (%r13)
testq %r14, %r14
je 0x1469fd2
incq 0x40(%rsp)
leaq 0x90(%rsp), %rbp
leaq 0xf0(%rsp), %r15
movq %r12, 0x20(%rsp)
movq %r14, %r12
leaq 0x290(%rsp), %rbx
movq 0x10(%rbx), %rax
movq %rax, 0x10(%rsp)
vmovups (%rbx), %xmm0
vmovups %xmm0, (%rsp)
leaq 0x60(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x40(%rsp), %rdx
movq %rbp, %rcx
vzeroupper
callq 0x1469bda
movq 0x60(%rsp), %rax
movq %rax, -0x20(%r15)
vmovaps 0x70(%rsp), %xmm0
vmovaps %xmm0, -0x10(%r15)
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, (%r15)
addq $0x30, %r15
addq $0x8, %rbp
decq %r12
jne 0x1469f06
vbroadcastss 0xa81ab5(%rip), %xmm2 # 0x1eeba20
vbroadcastss 0xa82c10(%rip), %xmm3 # 0x1eecb84
leaq 0xf0(%rsp), %rax
xorl %ecx, %ecx
vmovaps -0x10(%rax), %xmm0
vmovaps (%rax), %xmm1
movq -0x20(%rax), %rdx
movq %rdx, (%r13,%rcx,8)
vmovss %xmm0, 0x20(%r13,%rcx,4)
vextractps $0x1, %xmm0, 0x40(%r13,%rcx,4)
vextractps $0x2, %xmm0, 0x60(%r13,%rcx,4)
vmovss %xmm1, 0x30(%r13,%rcx,4)
vextractps $0x1, %xmm1, 0x50(%r13,%rcx,4)
vminps %xmm0, %xmm2, %xmm2
vextractps $0x2, %xmm1, 0x70(%r13,%rcx,4)
vmaxps %xmm1, %xmm3, %xmm3
incq %rcx
addq $0x30, %rax
cmpq %rcx, %r14
jne 0x1469f7e
testq %r14, %r14
je 0x1469ff5
leaq 0xec(%rsp), %rax
xorl %ebx, %ebx
movq %r14, %rcx
movslq (%rax), %rdx
addq %rdx, %rbx
addq $0x30, %rax
decq %rcx
jne 0x1469fe4
jmp 0x1469ff7
xorl %ebx, %ebx
cmpq $0x1000, %rbx # imm = 0x1000
jb 0x146a055
testq %r14, %r14
je 0x146a055
leaq 0xec(%rsp), %r15
xorl %r12d, %r12d
vmovaps %xmm2, 0x40(%rsp)
vmovaps %xmm3, 0x20(%rsp)
cmpl $0xfff, (%r15) # imm = 0xFFF
jg 0x146a049
movq (%r13,%r12,8), %rdi
movl $0x1, %esi
vzeroupper
callq 0x1485014
vmovaps 0x20(%rsp), %xmm3
vmovaps 0x40(%rsp), %xmm2
orb $-0x80, 0x7(%r13,%r12,8)
incq %r12
addq $0x30, %r15
cmpq %r12, %r14
jne 0x146a01c
vmovd %ebx, %xmm0
vinsertps $0x30, %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0,1,2],xmm0[0]
movq 0x58(%rsp), %r15
movq %r13, (%r15)
vmovaps %xmm0, 0x10(%r15)
vmovaps %xmm3, 0x20(%r15)
movq %r15, %rax
addq $0x258, %rsp # imm = 0x258
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rcx, 0x10(%rbx)
shlq $0x2, %rax
movq 0x20(%rbx), %rcx
cmpq %rcx, %rax
jbe 0x146a0b8
leaq 0x18(%rsp), %rsi
movl $0x40, %edx
movq %rbp, %rdi
xorl %ecx, %ecx
callq 0x90b5ee
movq %rax, %r13
jmp 0x1469e93
leaq 0xd0(%rsp), %rsi
movq %rcx, (%rsi)
movl $0x40, %edx
movq %rbp, %rdi
movl $0x1, %ecx
callq 0x90b5ee
movq %rax, %r13
movq %rax, 0x8(%rbx)
movq 0x18(%rbx), %rax
subq 0x10(%rbx), %rax
addq 0x30(%rbx), %rax
movq %rax, 0x30(%rbx)
movq $0x0, 0x10(%rbx)
leaq 0xd0(%rsp), %rcx
movq (%rcx), %rcx
movq %rcx, 0x18(%rbx)
movq 0x18(%rsp), %rdx
movq %rdx, 0x10(%rbx)
cmpq %rcx, %rdx
ja 0x146a1a0
movq %rax, 0x30(%rbx)
jmp 0x1469e93
movl $0x30, %edi
callq 0x6a3b0
movq %rax, %rbx
leaq 0xe0(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xa82afa(%rip), %rsi # 0x1eecc38
leaq 0xa82b06(%rip), %rdx # 0x1eecc4b
leaq 0xd0(%rsp), %rdi
callq 0x8d7230
leaq 0xc97757(%rip), %rax # 0x21018b0
movq %rax, (%rbx)
movl $0x1, 0x8(%rbx)
leaq 0x10(%rbx), %rdi
movq %rbx, %rax
addq $0x20, %rax
movq %rax, 0x10(%rbx)
movq 0xd0(%rsp), %rsi
movq 0xd8(%rsp), %rdx
addq %rsi, %rdx
callq 0x8d7100
leaq 0xc976bf(%rip), %rsi # 0x2101850
leaq -0xb9337c(%rip), %rdx # 0x8d6e1c
movq %rbx, %rdi
callq 0x6a5d0
movq %r12, 0x20(%rsp)
xorl %r15d, %r15d
movq %r15, 0x10(%rbx)
movq 0x20(%rbx), %rax
leaq 0xd0(%rsp), %rsi
movq %rax, (%rsi)
movl $0x40, %edx
movq %rbp, %rdi
movq %rsi, %rbp
xorl %ecx, %ecx
callq 0x90b5ee
movq %rax, %r13
movq %rax, 0x8(%rbx)
movq 0x18(%rbx), %rax
subq 0x10(%rbx), %rax
addq 0x30(%rbx), %rax
movq %rax, 0x30(%rbx)
movq %r15, 0x10(%rbx)
movq (%rbp), %rcx
movq %rcx, 0x18(%rbx)
movq 0x18(%rsp), %rdx
movq %rdx, 0x10(%rbx)
cmpq %rcx, %rdx
ja 0x146a20c
movq %rax, 0x30(%rbx)
movq 0x20(%rsp), %r12
jmp 0x1469e93
movq $0x0, 0x10(%rbx)
xorl %r13d, %r13d
jmp 0x146a202
movq %rax, %r14
leaq 0xd0(%rsp), %rdi
callq 0x8d6eda
jmp 0x146a261
jmp 0x146a22d
movq %rax, %rdi
callq 0x8d6de8
movq %rax, %r14
xorl %ebp, %ebp
jmp 0x146a24a
movq %rax, %r14
movq %rbx, %rdi
callq 0x6a0e0
movb $0x1, %bpl
movq 0xd0(%rsp), %rdi
cmpq %r15, %rdi
je 0x146a273
callq 0x6a4f0
jmp 0x146a273
movq %rax, %r14
leaq 0x60(%rsp), %rdi
callq 0x8d6eda
jmp 0x146a280
movq %rax, %r14
movb $0x1, %bpl
testb %bpl, %bpl
je 0x146a280
movq %rbx, %rdi
callq 0x6a8a0
movq %r14, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_morton.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<8>>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create, embree::avx::SetBVHNBounds<8>, embree::avx::CreateMortonLeaf<8, embree::Object>, embree::avx::CalculateMeshBounds<embree::UserGeometry>, embree::Scene::BuildProgressMonitorInterface>::build(embree::avx::BVHBuilderMorton::BuildPrim*, embree::avx::BVHBuilderMorton::BuildPrim*, unsigned long)
|
ReductionTy build(BuildPrim* src, BuildPrim* tmp, size_t numPrimitives)
{
/* sort morton codes */
morton = src;
radix_sort_u32(src,tmp,numPrimitives,singleThreadThreshold);
/* build BVH */
const ReductionTy root = recurse(1, range<unsigned>(0,(unsigned)numPrimitives), nullptr, true);
_mm_mfence(); // to allow non-temporal stores during build
return root;
}
|
pushq %r15
pushq %r14
pushq %rbx
subq $0x40, %rsp
movq %r8, %r14
movq %rsi, %r15
movq %rdi, %rbx
movq %rdx, 0x58(%rsi)
movq 0x20(%rsi), %rax
movq %rdx, %rdi
movq %rcx, %rsi
movq %r8, %rdx
movq %rax, %rcx
callq 0x14490df
leaq 0x38(%rsp), %rcx
movl $0x0, (%rcx)
movl %r14d, 0x4(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x20(%rsp)
movq $0x0, 0x30(%rsp)
movq 0x30(%rsp), %rax
movq %rax, 0x10(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmovups %xmm0, (%rsp)
movl $0x1, %edx
movq %rbx, %rdi
movq %r15, %rsi
movl $0x1, %r8d
callq 0x146cb16
mfence
movq %rbx, %rax
addq $0x40, %rsp
popq %rbx
popq %r14
popq %r15
retq
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_morton.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<8>>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create, embree::avx::SetBVHNBounds<8>, embree::avx::CreateMortonLeaf<8, embree::Object>, embree::avx::CalculateMeshBounds<embree::UserGeometry>, embree::Scene::BuildProgressMonitorInterface>::recreateMortonCodes(embree::range<unsigned int> const&) const
|
__noinline void recreateMortonCodes(const range<unsigned>& current) const
{
/* fast path for small ranges */
if (likely(current.size() < 1024))
{
/*! recalculate centroid bounds */
BBox3fa centBounds(empty);
for (size_t i=current.begin(); i<current.end(); i++)
centBounds.extend(center2(calculateBounds(morton[i])));
/* recalculate morton codes */
MortonCodeMapping mapping(centBounds);
for (size_t i=current.begin(); i<current.end(); i++)
morton[i].code = mapping.code(calculateBounds(morton[i]));
/* sort morton codes */
std::sort(morton+current.begin(),morton+current.end());
}
else
{
/*! recalculate centroid bounds */
auto calculateCentBounds = [&] ( const range<unsigned>& r ) {
BBox3fa centBounds = empty;
for (size_t i=r.begin(); i<r.end(); i++)
centBounds.extend(center2(calculateBounds(morton[i])));
return centBounds;
};
const BBox3fa centBounds = parallel_reduce(current.begin(), current.end(), unsigned(1024),
BBox3fa(empty), calculateCentBounds, BBox3fa::merge);
/* recalculate morton codes */
MortonCodeMapping mapping(centBounds);
parallel_for(current.begin(), current.end(), unsigned(1024), [&] ( const range<unsigned>& r ) {
for (size_t i=r.begin(); i<r.end(); i++) {
morton[i].code = mapping.code(calculateBounds(morton[i]));
}
});
/*! sort morton codes */
#if defined(TASKING_TBB)
tbb::parallel_sort(morton+current.begin(),morton+current.end());
#else
radixsort32(morton+current.begin(),current.size());
#endif
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x148, %rsp # imm = 0x148
movq %rsi, %r14
movq %rdi, %rbx
movl (%rsi), %r12d
movl 0x4(%rsi), %ebp
movl %ebp, %eax
subl %r12d, %eax
cmpl $0x3ff, %eax # imm = 0x3FF
ja 0x146e21e
cmpl 0x4(%r14), %r12d
jae 0x146e03e
vbroadcastss 0xa7da74(%rip), %xmm4 # 0x1eeba20
vbroadcastss 0xa7ebcf(%rip), %xmm0 # 0x1eecb84
leaq 0xa0(%rsp), %r13
leaq 0x40(%rsp), %r15
vmovaps %xmm0, 0x10(%rsp)
vmovaps %xmm4, 0x20(%rsp)
movq 0x48(%rbx), %rax
movq 0x58(%rbx), %rcx
movq (%rax), %rax
movl 0x4(%rcx,%r12,8), %ecx
movq 0x18(%rax), %rdx
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl $0x0, 0x4c(%rsp)
movq %r13, 0x50(%rsp)
movq %r15, %rdi
callq *0x58(%rax)
vmovaps 0x20(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm0
vaddps 0xb0(%rsp), %xmm0, %xmm0
vminps %xmm0, %xmm4, %xmm4
vmovaps 0x10(%rsp), %xmm1
vmaxps %xmm0, %xmm1, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmovaps 0x10(%rsp), %xmm0
incq %r12
movl 0x4(%r14), %eax
cmpq %rax, %r12
jb 0x146dfc2
jmp 0x146e050
vbroadcastss 0xa7eb3d(%rip), %xmm0 # 0x1eecb84
vbroadcastss 0xa7d9d0(%rip), %xmm4 # 0x1eeba20
movl (%r14), %r12d
movl 0x4(%r14), %eax
cmpl %eax, %r12d
jae 0x146e1c3
vsubps %xmm4, %xmm0, %xmm0
vbroadcastss 0xab3df3(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa7e691(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xae0608(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x10(%rsp)
leaq 0xa0(%rsp), %r13
leaq 0x40(%rsp), %r15
vmovaps %xmm4, 0x20(%rsp)
movq 0x48(%rbx), %rax
movq 0x58(%rbx), %rcx
movq (%rax), %rax
movl 0x4(%rcx,%r12,8), %ecx
movq 0x18(%rax), %rdx
movq %rdx, 0x40(%rsp)
movl %ecx, 0x48(%rsp)
movl $0x0, 0x4c(%rsp)
movq %r13, 0x50(%rsp)
movq %r15, %rdi
callq *0x58(%rax)
vmovaps 0x20(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm0
vaddps 0xb0(%rsp), %xmm0, %xmm0
vsubps %xmm4, %xmm0, %xmm0
vmulps 0x10(%rsp), %xmm0, %xmm0
vcvtps2dq %xmm0, %xmm0
vextractps $0x1, %xmm0, %eax
movl %eax, %ecx
shll $0x10, %ecx
orl %eax, %ecx
andl $0x30000ff, %ecx # imm = 0x30000FF
movl %ecx, %eax
shll $0x8, %eax
orl %ecx, %eax
andl $0x300f00f, %eax # imm = 0x300F00F
movl %eax, %ecx
shll $0x4, %ecx
orl %eax, %ecx
vshufps $0xe8, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,2,3]
andl $0x30c30c3, %ecx # imm = 0x30C30C3
vpslld $0x10, %xmm0, %xmm1
vpor %xmm0, %xmm1, %xmm0
vbroadcastss 0xab9eca(%rip), %xmm1 # 0x1f2801c
vpand %xmm1, %xmm0, %xmm0
addl %ecx, %ecx
vpslld $0x8, %xmm0, %xmm1
vpor %xmm1, %xmm0, %xmm0
vbroadcastss 0xab9eb6(%rip), %xmm1 # 0x1f28020
vpand %xmm1, %xmm0, %xmm0
leal (%rcx,%rcx,4), %eax
vpslld $0x4, %xmm0, %xmm1
vpor %xmm1, %xmm0, %xmm0
vbroadcastss 0xab9ea1(%rip), %xmm1 # 0x1f28024
vpand %xmm1, %xmm0, %xmm0
andl $0x12492492, %eax # imm = 0x12492492
vpmulld 0xab9edb(%rip), %xmm0, %xmm0 # 0x1f28070
vpand 0xab9ee3(%rip), %xmm0, %xmm0 # 0x1f28080
vmovd %xmm0, %ecx
orl %eax, %ecx
vpextrd $0x1, %xmm0, %eax
movq 0x58(%rbx), %rdx
orl %ecx, %eax
movl %eax, (%rdx,%r12,8)
incq %r12
movl 0x4(%r14), %eax
cmpq %rax, %r12
jb 0x146e0b9
movl (%r14), %ecx
cmpl %eax, %ecx
je 0x146e20c
movq 0x58(%rbx), %rdx
leaq (%rdx,%rax,8), %rbx
shlq $0x3, %rax
leaq (,%rcx,8), %rsi
subq %rsi, %rax
leaq (%rdx,%rcx,8), %r14
sarq $0x3, %rax
bsrq %rax, %rdx
xorl $0x3f, %edx
addl %edx, %edx
xorq $0x7e, %rdx
movq %r14, %rdi
movq %rbx, %rsi
callq 0x144be00
movq %r14, %rdi
movq %rbx, %rsi
callq 0x144befb
addq $0x148, %rsp # imm = 0x148
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
leaq 0x38(%rsp), %rax
movq %rbx, (%rax)
vbroadcastss 0xa7d7f1(%rip), %xmm0 # 0x1eeba20
leaq 0x120(%rsp), %r15
vmovaps %xmm0, (%r15)
vbroadcastss 0xa7e93f(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x10(%r15)
leaq 0xa0(%rsp), %r13
movw $0x401, 0xc(%r13) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r13)
movq $0x8, 0x40(%r13)
movq %r13, %rdi
callq 0x6a660
leaq 0x80(%rsp), %rdx
movl %ebp, (%rdx)
movl %r12d, 0x4(%rdx)
movq $0x400, 0x8(%rdx) # imm = 0x400
leaq -0xb638a1(%rip), %rax # 0x90a9f0
leaq 0x90(%rsp), %rcx
movq %rax, (%rcx)
leaq 0x38(%rsp), %rsi
movq %rsi, 0x8(%rcx)
leaq 0x40(%rsp), %r12
movq %r15, (%r12)
movq %rcx, 0x8(%r12)
movq %rax, 0x10(%r12)
vmovdqu (%r15), %ymm0
movq %rdx, %r15
vmovdqu %ymm0, 0x20(%r12)
leaq 0xf(%rsp), %rdx
movq %r15, %rdi
movq %r12, %rsi
movq %r13, %rcx
vzeroupper
callq 0x146e4e6
vmovaps 0x60(%rsp), %xmm0
vmovaps %xmm0, 0x10(%rsp)
vmovdqa 0x70(%rsp), %xmm0
vmovdqa %xmm0, 0x20(%rsp)
leaq 0xa0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x146e42c
leaq 0xa0(%rsp), %rdi
callq 0x6aab0
vmovaps 0x10(%rsp), %xmm0
vmovaps %xmm0, 0x40(%rsp)
vmovaps 0x20(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vbroadcastss 0xab3b2a(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa7e3c8(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xae033f(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x50(%rsp)
movq %rbx, 0x80(%rsp)
movq %r12, 0x88(%rsp)
leaq 0xa0(%rsp), %r12
movw $0x401, 0xc(%r12) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r12)
movq $0x8, 0x40(%r12)
vmovsd (%r14), %xmm0
vmovaps %xmm0, 0x10(%rsp)
movq %r12, %rdi
callq 0x6a660
vpshufd $0xe1, 0x10(%rsp), %xmm0 # xmm0 = mem[1,0,2,3]
leaq 0x120(%rsp), %rdi
vmovq %xmm0, (%rdi)
movq $0x400, 0x8(%rdi) # imm = 0x400
leaq 0x90(%rsp), %rsi
movq %r15, (%rsi)
leaq 0xf(%rsp), %rdx
movq %r12, %rcx
callq 0x146ed98
leaq 0xa0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x146e45e
leaq 0xa0(%rsp), %rdi
callq 0x6aab0
movq 0x58(%rbx), %rax
movl (%r14), %ecx
movl 0x4(%r14), %edx
leaq (%rax,%rcx,8), %rdi
leaq (%rax,%rdx,8), %rsi
leaq 0xa0(%rsp), %rdx
callq 0x144d1ee
jmp 0x146e20c
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa7d6b0(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcb67b9(%rip), %rsi # 0x2124c08
movq 0xcb6572(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa7d67e(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xcb6787(%rip), %rsi # 0x2124c08
movq 0xcb6540(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x146e4b5
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x146e4c9
jmp 0x146e4de
jmp 0x146e4b2
jmp 0x146e4de
jmp 0x146e4c6
movq %rax, %rbx
leaq 0xa0(%rsp), %rdi
callq 0x6aab0
jmp 0x146e4d6
jmp 0x146e4de
movq %rax, %rbx
leaq 0xa0(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_morton.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<8>>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create, embree::avx::SetBVHNBounds<8>, embree::avx::CreateMortonLeaf<8, embree::InstancePrimitive>, embree::avx::CalculateMeshBounds<embree::Instance>, embree::Scene::BuildProgressMonitorInterface>::recreateMortonCodes(embree::range<unsigned int> const&) const::'lambda0'(embree::range<unsigned int> const&)::operator()(embree::range<unsigned int> const&) const
|
Settings (size_t branchingFactor, size_t maxDepth, size_t minLeafSize, size_t maxLeafSize, size_t singleThreadThreshold)
: branchingFactor(branchingFactor), maxDepth(maxDepth), minLeafSize(minLeafSize), maxLeafSize(maxLeafSize), singleThreadThreshold(singleThreadThreshold)
{
minLeafSize = min(minLeafSize,maxLeafSize);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x28, %rsp
movl (%rsi), %r13d
cmpl 0x4(%rsi), %r13d
jae 0x1478253
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdi), %rbp
movq %rsp, %rcx
vbroadcastss 0xaaffe0(%rip), %xmm12 # 0x1f2801c
vbroadcastss 0xaaffdb(%rip), %xmm13 # 0x1f28020
vbroadcastss 0xaaffd6(%rip), %xmm14 # 0x1f28024
vpmovsxbd 0xad664d(%rip), %xmm15 # 0x1f4e6a4
movq 0x8(%r14), %r12
movq 0x48(%rbp), %rax
movq 0x58(%rbp), %r15
movq (%rax), %rsi
movzbl 0x3d(%rsi), %eax
shll $0x8, %eax
movq 0x60(%rsi), %rdi
cmpl $0x100, %eax # imm = 0x100
je 0x1478212
vmovaps (%rdi), %xmm0
vmovaps 0x10(%rdi), %xmm1
vmovaps 0x20(%rdi), %xmm2
vmovaps 0x30(%rdi), %xmm3
movq 0x58(%rsi), %rax
vmovaps 0x10(%rax), %xmm4
vmovaps 0x20(%rax), %xmm5
vminps 0x30(%rax), %xmm4, %xmm4
vmaxps 0x40(%rax), %xmm5, %xmm5
vshufps $0x0, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[0,0,0,0]
vshufps $0x55, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmulps %xmm4, %xmm2, %xmm4
vaddps %xmm4, %xmm3, %xmm4
vmulps %xmm7, %xmm1, %xmm7
vaddps %xmm4, %xmm7, %xmm8
vmulps %xmm6, %xmm0, %xmm6
vaddps %xmm6, %xmm8, %xmm9
vbroadcastss 0xa73949(%rip), %xmm10 # 0x1eeba20
vminps %xmm9, %xmm10, %xmm10
vbroadcastss 0xa74a9f(%rip), %xmm11 # 0x1eecb84
vmaxps %xmm9, %xmm11, %xmm9
vshufps $0xaa, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[2,2,2,2]
vmulps %xmm2, %xmm11, %xmm2
vaddps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm7, %xmm3
vaddps %xmm3, %xmm6, %xmm7
vminps %xmm7, %xmm10, %xmm10
vmaxps %xmm7, %xmm9, %xmm7
vshufps $0x55, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1,1,1]
vmulps %xmm1, %xmm9, %xmm1
vaddps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm6, %xmm9
vminps %xmm9, %xmm10, %xmm10
vmaxps %xmm9, %xmm7, %xmm7
vaddps %xmm2, %xmm1, %xmm1
vaddps %xmm1, %xmm6, %xmm2
vminps %xmm2, %xmm10, %xmm6
vmaxps %xmm2, %xmm7, %xmm2
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm0, %xmm0
vaddps %xmm0, %xmm8, %xmm5
vminps %xmm5, %xmm6, %xmm6
vmaxps %xmm5, %xmm2, %xmm2
vaddps %xmm3, %xmm0, %xmm3
vminps %xmm3, %xmm6, %xmm5
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm4, %xmm0, %xmm3
vminps %xmm3, %xmm5, %xmm4
vmaxps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm0, %xmm1
vminps %xmm1, %xmm4, %xmm0
vmaxps %xmm1, %xmm2, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vsubps (%r12), %xmm0, %xmm0
vmulps 0x10(%r12), %xmm0, %xmm0
vcvtps2dq %xmm0, %xmm0
vextractps $0x1, %xmm0, %eax
movl %eax, %edx
shll $0x10, %edx
orl %eax, %edx
andl $0x30000ff, %edx # imm = 0x30000FF
movl %edx, %eax
shll $0x8, %eax
orl %edx, %eax
andl $0x300f00f, %eax # imm = 0x300F00F
movl %eax, %edx
shll $0x4, %edx
orl %eax, %edx
andl $0x30c30c3, %edx # imm = 0x30C30C3
addl %edx, %edx
leal (%rdx,%rdx,4), %eax
andl $0x12492492, %eax # imm = 0x12492492
vshufps $0xe8, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,2,3]
vpslld $0x10, %xmm0, %xmm1
vpor %xmm0, %xmm1, %xmm0
vpand %xmm0, %xmm12, %xmm0
vpslld $0x8, %xmm0, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpand %xmm0, %xmm13, %xmm0
vpslld $0x4, %xmm0, %xmm1
vpor %xmm1, %xmm0, %xmm0
vpand %xmm0, %xmm14, %xmm0
vpmulld %xmm15, %xmm0, %xmm0
vpand 0xaafe91(%rip), %xmm0, %xmm0 # 0x1f28080
vmovd %xmm0, %edx
orl %eax, %edx
vpextrd $0x1, %xmm0, %eax
orl %edx, %eax
movl %eax, (%r15,%r13,8)
incq %r13
movl 0x4(%rbx), %eax
cmpq %rax, %r13
jb 0x1478057
jmp 0x1478253
leaq 0x10(%rsp), %rdx
callq 0x200bee
vpmovsxbd 0xad647f(%rip), %xmm15 # 0x1f4e6a4
vbroadcastss 0xaafdf6(%rip), %xmm14 # 0x1f28024
vbroadcastss 0xaafde9(%rip), %xmm13 # 0x1f28020
vbroadcastss 0xaafddc(%rip), %xmm12 # 0x1f2801c
movq %rsp, %rcx
vmovaps 0x10(%rsp), %xmm0
vmovaps (%rsp), %xmm1
jmp 0x147816b
addq $0x28, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_morton.h
|
embree::avx::BVHBuilderMorton::BuilderT<embree::BVHNodeRecord<embree::NodeRefPtr<8>>, embree::FastAllocator::CachedAllocator, embree::BVHN<8>::CreateAlloc, embree::AABBNode_t<embree::NodeRefPtr<8>, 8>::Create, embree::avx::SetBVHNBounds<8>, embree::avx::CreateMortonLeaf<8, embree::InstanceArrayPrimitive>, embree::avx::CalculateMeshBounds<embree::InstanceArray>, embree::Scene::BuildProgressMonitorInterface>::createLargeLeaf(unsigned long, embree::range<unsigned int> const&, embree::FastAllocator::CachedAllocator)
|
ReductionTy createLargeLeaf(size_t depth, const range<unsigned>& current, Allocator alloc)
{
/* this should never occur but is a fatal error */
if (depth > maxDepth)
throw_RTCError(RTC_ERROR_UNKNOWN,"depth limit reached");
/* create leaf for few primitives */
if (current.size() <= maxLeafSize)
return createLeaf(current,alloc);
/* fill all children by always splitting the largest one */
range<unsigned> children[MAX_BRANCHING_FACTOR];
size_t numChildren = 1;
children[0] = current;
do {
/* find best child with largest number of primitives */
size_t bestChild = -1;
size_t bestSize = 0;
for (size_t i=0; i<numChildren; i++)
{
/* ignore leaves as they cannot get split */
if (children[i].size() <= maxLeafSize)
continue;
/* remember child with largest size */
if (children[i].size() > bestSize) {
bestSize = children[i].size();
bestChild = i;
}
}
if (bestChild == size_t(-1)) break;
/*! split best child into left and right child */
auto split = children[bestChild].split();
/* add new children left and right */
children[bestChild] = children[numChildren-1];
children[numChildren-1] = split.first;
children[numChildren+0] = split.second;
numChildren++;
} while (numChildren < branchingFactor);
/* create node */
auto node = createNode(alloc,numChildren);
/* recurse into each child */
ReductionTy bounds[MAX_BRANCHING_FACTOR];
for (size_t i=0; i<numChildren; i++)
bounds[i] = createLargeLeaf(depth+1,children[i],alloc);
return setBounds(node,bounds,numChildren);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x238, %rsp # imm = 0x238
cmpq %rdx, 0x8(%rsi)
jb 0x148042e
movq %rsi, %r12
movq %rdi, %r15
leaq 0x270(%rsp), %rbx
movl 0x4(%rcx), %esi
subl (%rcx), %esi
movq 0x18(%r12), %rax
cmpq %rsi, %rax
jae 0x148011f
movq %rdx, 0x28(%rsp)
movq (%rcx), %rcx
movq %rcx, 0x70(%rsp)
movq (%r12), %rcx
movl $0x1, %r14d
testq %r14, %r14
je 0x1480009
movq $-0x1, %rdi
xorl %esi, %esi
xorl %r8d, %r8d
movl 0x74(%rsp,%rsi,8), %edx
subl 0x70(%rsp,%rsi,8), %edx
movl %edx, %r9d
cmpq %r9, %rax
setb %r10b
cmpq %r9, %r8
setb %r11b
movq %rsi, %rdx
testb %r11b, %r10b
jne 0x147fff9
movq %rdi, %rdx
movq %r8, %r9
incq %rsi
movq %r9, %r8
movq %rdx, %rdi
cmpq %rsi, %r14
jne 0x147ffd2
jmp 0x1480010
movq $-0x1, %rdx
cmpq $-0x1, %rdx
je 0x148004a
movl 0x70(%rsp,%rdx,8), %esi
movl 0x74(%rsp,%rdx,8), %edi
leal (%rdi,%rsi), %r8d
shrl %r8d
shlq $0x20, %rdi
orq %r8, %rdi
shlq $0x20, %r8
orq %rsi, %r8
movq 0x68(%rsp,%r14,8), %rsi
movq %rsi, 0x70(%rsp,%rdx,8)
movq %r8, 0x68(%rsp,%r14,8)
movq %rdi, 0x70(%rsp,%r14,8)
incq %r14
cmpq $-0x1, %rdx
je 0x1480059
cmpq %rcx, %r14
jb 0x147ffc1
movq (%rbx), %r13
movq 0x8(%rbx), %rbx
movq $0x100, 0x20(%rsp) # imm = 0x100
movq (%rbx), %rbp
movq 0x8(%rbp), %rax
cmpq %rax, %r13
je 0x14801cd
movq %rbp, 0x40(%rsp)
movb $0x1, 0x48(%rsp)
movq %rbp, %rdi
callq 0x1ee7bb6
movq 0x8(%rbp), %rax
testq %rax, %rax
je 0x14800e7
movq 0xa8(%rbp), %rax
addq 0x68(%rbp), %rax
movq 0x8(%rbp), %rcx
lock
addq %rax, 0x118(%rcx)
movq 0x58(%rbp), %rax
addq 0x98(%rbp), %rax
movq 0x50(%rbp), %rcx
addq 0x90(%rbp), %rcx
subq %rcx, %rax
movq 0x8(%rbp), %rcx
lock
addq %rax, 0x120(%rcx)
movq 0xb0(%rbp), %rax
addq 0x70(%rbp), %rax
movq 0x8(%rbp), %rcx
lock
addq %rax, 0x128(%rcx)
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, 0x58(%rbp)
vmovups %ymm0, 0x48(%rbp)
testq %r13, %r13
je 0x1480137
movq 0x10(%r13), %rax
movq %rax, 0x60(%rbp)
vmovups %ymm0, 0x88(%rbp)
vmovups %ymm0, 0x98(%rbp)
movq 0x10(%r13), %rax
movq %rax, 0xa0(%rbp)
jmp 0x1480147
movq 0x40(%r12), %rsi
movq %r15, %rdi
movq %rcx, %rdx
movq %rbx, %rcx
callq 0x148059e
jmp 0x1480383
vmovups %ymm0, 0x98(%rbp)
vmovups %ymm0, 0x88(%rbp)
movq %r13, %rax
xchgq %rax, 0x8(%rbp)
movq %rbp, 0x30(%rsp)
leaq 0xca671e(%rip), %rdi # 0x2126878
movq %rdi, 0xb0(%rsp)
movb $0x1, 0xb8(%rsp)
vzeroupper
callq 0x1ee7bb6
movq 0x138(%r13), %rsi
cmpq 0x140(%r13), %rsi
je 0x1480194
movq 0x30(%rsp), %rax
movq %rax, (%rsi)
addq $0x8, 0x138(%r13)
jmp 0x14801a5
leaq 0x130(%r13), %rdi
leaq 0x30(%rsp), %rdx
callq 0x90b95a
cmpb $0x1, 0xb8(%rsp)
jne 0x14801bc
movq 0xb0(%rsp), %rdi
callq 0x1ee7c24
cmpb $0x1, 0x48(%rsp)
jne 0x14801cd
movq 0x40(%rsp), %rdi
callq 0x1ee7c24
movq 0x20(%rsp), %rax
addq %rax, 0x28(%rbx)
movq 0x10(%rbx), %rcx
movl %ecx, %edx
negl %edx
andl $0x1f, %edx
leaq (%rcx,%rax), %rbp
addq %rdx, %rbp
movq %rbp, 0x10(%rbx)
cmpq 0x18(%rbx), %rbp
ja 0x148039b
addq %rdx, 0x30(%rbx)
subq %rax, %rbp
addq 0x8(%rbx), %rbp
vbroadcastss 0xa6b816(%rip), %ymm0 # 0x1eeba20
vmovaps %ymm0, 0xc0(%rbp)
vmovaps %ymm0, 0x80(%rbp)
vmovaps %ymm0, 0x40(%rbp)
vbroadcastss 0xa6c95c(%rip), %ymm0 # 0x1eecb84
vmovaps %ymm0, 0xe0(%rbp)
vmovaps %ymm0, 0xa0(%rbp)
vmovaps %ymm0, 0x60(%rbp)
xorl %eax, %eax
vbroadcastsd 0xaa1c30(%rip), %ymm0 # 0x1f21e78
vmovups %ymm0, (%rbp,%rax,8)
addq $0x4, %rax
cmpq $0x8, %rax
jne 0x1480248
testq %r14, %r14
je 0x1480362
movq %r15, 0x38(%rsp)
incq 0x28(%rsp)
leaq 0x70(%rsp), %r13
leaq 0xd0(%rsp), %r15
movq %r12, 0x18(%rsp)
movq %r14, %r12
leaq 0x270(%rsp), %rbx
movq 0x10(%rbx), %rax
movq %rax, 0x10(%rsp)
vmovups (%rbx), %xmm0
vmovups %xmm0, (%rsp)
leaq 0x40(%rsp), %rdi
movq 0x18(%rsp), %rsi
movq 0x28(%rsp), %rdx
movq %r13, %rcx
vzeroupper
callq 0x147ff6e
movq 0x40(%rsp), %rax
movq %rax, -0x20(%r15)
vmovaps 0x50(%rsp), %xmm0
vmovaps %xmm0, -0x10(%r15)
vmovaps 0x60(%rsp), %xmm0
vmovaps %xmm0, (%r15)
addq $0x30, %r15
addq $0x8, %r13
decq %r12
jne 0x1480288
vbroadcastss 0xa6b736(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0xa6c891(%rip), %xmm0 # 0x1eecb84
leaq 0xd0(%rsp), %rax
xorl %ecx, %ecx
movq 0x38(%rsp), %r15
vmovaps -0x10(%rax), %xmm2
vmovaps (%rax), %xmm3
movq -0x20(%rax), %rdx
movq %rdx, (%rbp,%rcx,8)
vmovss %xmm2, 0x40(%rbp,%rcx,4)
vextractps $0x1, %xmm2, 0x80(%rbp,%rcx,4)
vextractps $0x2, %xmm2, 0xc0(%rbp,%rcx,4)
vmovss %xmm3, 0x60(%rbp,%rcx,4)
vextractps $0x1, %xmm3, 0xa0(%rbp,%rcx,4)
vminps %xmm2, %xmm1, %xmm1
vextractps $0x2, %xmm3, 0xe0(%rbp,%rcx,4)
vmaxps %xmm3, %xmm0, %xmm0
incq %rcx
addq $0x30, %rax
cmpq %rcx, %r14
jne 0x1480302
jmp 0x1480374
vbroadcastss 0xa6c819(%rip), %xmm0 # 0x1eecb84
vbroadcastss 0xa6b6ac(%rip), %xmm1 # 0x1eeba20
movq %rbp, (%r15)
vmovaps %xmm1, 0x10(%r15)
vmovaps %xmm0, 0x20(%r15)
movq %r15, %rax
addq $0x238, %rsp # imm = 0x238
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rcx, 0x10(%rbx)
shlq $0x2, %rax
movq 0x20(%rbx), %rcx
cmpq %rcx, %rax
jbe 0x14803c8
leaq 0x20(%rsp), %rsi
movl $0x40, %edx
movq %r13, %rdi
xorl %ecx, %ecx
callq 0x90b5ee
movq %rax, %rbp
jmp 0x1480201
leaq 0xb0(%rsp), %rsi
movq %rcx, (%rsi)
movl $0x40, %edx
movq %r13, %rdi
movl $0x1, %ecx
callq 0x90b5ee
movq %rax, %rbp
movq %rax, 0x8(%rbx)
movq 0x18(%rbx), %rax
subq 0x10(%rbx), %rax
addq 0x30(%rbx), %rax
movq %rax, 0x30(%rbx)
movq $0x0, 0x10(%rbx)
leaq 0xb0(%rsp), %rcx
movq (%rcx), %rcx
movq %rcx, 0x18(%rbx)
movq 0x20(%rsp), %rdx
movq %rdx, 0x10(%rbx)
cmpq %rcx, %rdx
ja 0x14804b0
movq %rax, 0x30(%rbx)
jmp 0x1480201
movl $0x30, %edi
callq 0x6a3b0
movq %rax, %rbx
leaq 0xc0(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0xa6c7ea(%rip), %rsi # 0x1eecc38
leaq 0xa6c7f6(%rip), %rdx # 0x1eecc4b
leaq 0xb0(%rsp), %rdi
callq 0x8d7230
leaq 0xc81447(%rip), %rax # 0x21018b0
movq %rax, (%rbx)
movl $0x1, 0x8(%rbx)
leaq 0x10(%rbx), %rdi
movq %rbx, %rax
addq $0x20, %rax
movq %rax, 0x10(%rbx)
movq 0xb0(%rsp), %rsi
movq 0xb8(%rsp), %rdx
addq %rsi, %rdx
callq 0x8d7100
leaq 0xc813af(%rip), %rsi # 0x2101850
leaq -0xba968c(%rip), %rdx # 0x8d6e1c
movq %rbx, %rdi
callq 0x6a5d0
movq %r12, 0x18(%rsp)
movq %r15, %r12
xorl %r15d, %r15d
movq %r15, 0x10(%rbx)
movq 0x20(%rbx), %rax
leaq 0xb0(%rsp), %rsi
movq %rax, (%rsi)
movl $0x40, %edx
movq %r13, %rdi
movq %rsi, %r13
xorl %ecx, %ecx
callq 0x90b5ee
movq %rax, %rbp
movq %rax, 0x8(%rbx)
movq 0x18(%rbx), %rax
subq 0x10(%rbx), %rax
addq 0x30(%rbx), %rax
movq %rax, 0x30(%rbx)
movq %r15, 0x10(%rbx)
movq (%r13), %rcx
movq %rcx, 0x18(%rbx)
movq 0x20(%rsp), %rdx
movq %rdx, 0x10(%rbx)
cmpq %rcx, %rdx
ja 0x1480522
movq %rax, 0x30(%rbx)
movq %r12, %r15
movq 0x18(%rsp), %r12
jmp 0x1480201
movq $0x0, 0x10(%rbx)
xorl %ebp, %ebp
jmp 0x1480515
movq %rax, %r14
leaq 0xb0(%rsp), %rdi
callq 0x8d6eda
jmp 0x1480576
jmp 0x1480542
movq %rax, %rdi
callq 0x8d6de8
movq %rax, %r14
xorl %ebp, %ebp
jmp 0x148055f
movq %rax, %r14
movq %rbx, %rdi
callq 0x6a0e0
movb $0x1, %bpl
movq 0xb0(%rsp), %rdi
cmpq %r15, %rdi
je 0x1480588
callq 0x6a4f0
jmp 0x1480588
movq %rax, %r14
leaq 0x40(%rsp), %rdi
callq 0x8d6eda
jmp 0x1480595
movq %rax, %r14
movb $0x1, %bpl
testb %bpl, %bpl
je 0x1480595
movq %rbx, %rdi
callq 0x6a8a0
movq %r14, %rdi
callq 0x6a600
nop
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_morton.h
|
embree::avx::BVHNRotate<4>::rotate(embree::NodeRefPtr<4>, unsigned long)
|
size_t BVHNRotate<4>::rotate(NodeRef parentRef, size_t depth)
{
/*! nothing to rotate if we reached a leaf node. */
if (parentRef.isBarrier()) return 0;
if (parentRef.isLeaf()) return 0;
AABBNode* parent = parentRef.getAABBNode();
/*! rotate all children first */
vint4 cdepth;
for (size_t c=0; c<4; c++)
cdepth[c] = (int)rotate(parent->child(c),depth+1);
/* compute current areas of all children */
vfloat4 sizeX = parent->upper_x-parent->lower_x;
vfloat4 sizeY = parent->upper_y-parent->lower_y;
vfloat4 sizeZ = parent->upper_z-parent->lower_z;
vfloat4 childArea = madd(sizeX,(sizeY + sizeZ),sizeY*sizeZ);
/*! get node bounds */
BBox<vfloat4> child1_0,child1_1,child1_2,child1_3;
parent->bounds(child1_0,child1_1,child1_2,child1_3);
/*! Find best rotation. We pick a first child (child1) and a sub-child
(child2child) of a different second child (child2), and swap child1
and child2child. We perform the best such swap. */
float bestArea = 0;
size_t bestChild1 = -1, bestChild2 = -1, bestChild2Child = -1;
for (size_t c2=0; c2<4; c2++)
{
/*! ignore leaf nodes as we cannot descent into them */
if (parent->child(c2).isBarrier()) continue;
if (parent->child(c2).isLeaf()) continue;
AABBNode* child2 = parent->child(c2).getAABBNode();
/*! transpose child bounds */
BBox<vfloat4> child2c0,child2c1,child2c2,child2c3;
child2->bounds(child2c0,child2c1,child2c2,child2c3);
/*! put child1_0 at each child2 position */
float cost00 = halfArea3f(merge(child1_0,child2c1,child2c2,child2c3));
float cost01 = halfArea3f(merge(child2c0,child1_0,child2c2,child2c3));
float cost02 = halfArea3f(merge(child2c0,child2c1,child1_0,child2c3));
float cost03 = halfArea3f(merge(child2c0,child2c1,child2c2,child1_0));
vfloat4 cost0 = vfloat4(cost00,cost01,cost02,cost03);
vfloat4 min0 = vreduce_min(cost0);
int pos0 = (int)bsf(movemask(min0 == cost0));
/*! put child1_1 at each child2 position */
float cost10 = halfArea3f(merge(child1_1,child2c1,child2c2,child2c3));
float cost11 = halfArea3f(merge(child2c0,child1_1,child2c2,child2c3));
float cost12 = halfArea3f(merge(child2c0,child2c1,child1_1,child2c3));
float cost13 = halfArea3f(merge(child2c0,child2c1,child2c2,child1_1));
vfloat4 cost1 = vfloat4(cost10,cost11,cost12,cost13);
vfloat4 min1 = vreduce_min(cost1);
int pos1 = (int)bsf(movemask(min1 == cost1));
/*! put child1_2 at each child2 position */
float cost20 = halfArea3f(merge(child1_2,child2c1,child2c2,child2c3));
float cost21 = halfArea3f(merge(child2c0,child1_2,child2c2,child2c3));
float cost22 = halfArea3f(merge(child2c0,child2c1,child1_2,child2c3));
float cost23 = halfArea3f(merge(child2c0,child2c1,child2c2,child1_2));
vfloat4 cost2 = vfloat4(cost20,cost21,cost22,cost23);
vfloat4 min2 = vreduce_min(cost2);
int pos2 = (int)bsf(movemask(min2 == cost2));
/*! put child1_3 at each child2 position */
float cost30 = halfArea3f(merge(child1_3,child2c1,child2c2,child2c3));
float cost31 = halfArea3f(merge(child2c0,child1_3,child2c2,child2c3));
float cost32 = halfArea3f(merge(child2c0,child2c1,child1_3,child2c3));
float cost33 = halfArea3f(merge(child2c0,child2c1,child2c2,child1_3));
vfloat4 cost3 = vfloat4(cost30,cost31,cost32,cost33);
vfloat4 min3 = vreduce_min(cost3);
int pos3 = (int)bsf(movemask(min3 == cost3));
/*! find best other child */
vfloat4 area0123 = vfloat4(extract<0>(min0),extract<0>(min1),extract<0>(min2),extract<0>(min3)) - vfloat4(childArea[c2]);
int pos[4] = { pos0,pos1,pos2,pos3 };
const size_t mbd = BVH4::maxBuildDepth;
vbool4 valid = vint4(int(depth+1))+cdepth <= vint4(mbd); // only select swaps that fulfill depth constraints
valid &= vint4(int(c2)) != vint4(step);
if (none(valid)) continue;
size_t c1 = select_min(valid,area0123);
float area = area0123[c1];
if (c1 == c2) continue; // can happen if bounds are NANs
/*! accept a swap when it reduces cost and is not swapping a node with itself */
if (area < bestArea) {
bestArea = area;
bestChild1 = c1;
bestChild2 = c2;
bestChild2Child = pos[c1];
}
}
/*! if we did not find a swap that improves the SAH then do nothing */
if (bestChild1 == size_t(-1)) return 1+reduce_max(cdepth);
/*! perform the best found tree rotation */
AABBNode* child2 = parent->child(bestChild2).getAABBNode();
AABBNode::swap(parent,bestChild1,child2,bestChild2Child);
parent->setBounds(bestChild2,child2->bounds());
AABBNode::compact(parent);
AABBNode::compact(child2);
/*! This returned depth is conservative as the child that was
* pulled up in the tree could have been on the critical path. */
cdepth[bestChild1]++; // bestChild1 was pushed down one level
return 1+reduce_max(cdepth);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x190, %rsp # imm = 0x190
movabsq $-0x7ffffffffffffff8, %r12 # imm = 0x8000000000000008
testq %r12, %rdi
je 0x148503a
xorl %eax, %eax
jmp 0x1485ae9
movq %rsi, %r14
movq %rdi, %rbx
leaq 0x1(%rsi), %r15
xorl %r13d, %r13d
movq (%rbx,%r13,8), %rdi
movq %r15, %rsi
callq 0x1485014
movl %eax, 0x20(%rsp,%r13,4)
incq %r13
cmpq $0x4, %r13
jne 0x1485047
vmovaps 0x20(%rbx), %xmm0
vmovaps 0x30(%rbx), %xmm1
vmovaps 0x40(%rbx), %xmm2
vmovaps 0x50(%rbx), %xmm3
vsubps %xmm0, %xmm1, %xmm4
vsubps %xmm2, %xmm3, %xmm5
vmovaps 0x70(%rbx), %xmm6
vmovaps 0x60(%rbx), %xmm7
vsubps %xmm7, %xmm6, %xmm8
vaddps %xmm5, %xmm8, %xmm9
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm5, %xmm8, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmovaps %xmm4, 0x180(%rsp)
vunpcklps %xmm7, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
vunpckhps %xmm7, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
vxorps %xmm7, %xmm7, %xmm7
vunpcklps %xmm7, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm7[0],xmm2[1],xmm7[1]
vunpckhps %xmm7, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm7[2],xmm2[3],xmm7[3]
vunpcklps %xmm5, %xmm4, %xmm8 # xmm8 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
vmovaps %xmm8, 0xf0(%rsp)
vunpckhps %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovaps %xmm4, 0xe0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm4, 0xd0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0xc0(%rsp)
vunpcklps %xmm6, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
vunpckhps %xmm6, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
vunpcklps %xmm7, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm2, %xmm0, %xmm4 # xmm4 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovaps %xmm4, 0xb0(%rsp)
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0xa0(%rsp)
vunpcklps %xmm3, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vmovaps %xmm0, 0x90(%rsp)
vunpckhps %xmm3, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
vmovaps %xmm0, 0x80(%rsp)
incl %r14d
vmovd %r14d, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0x20(%rsp), %xmm0, %xmm0
movq $-0x1, %rax
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm1, 0xc(%rsp)
xorl %esi, %esi
vbroadcastss 0xacf841(%rip), %xmm1 # 0x1f549a0
vpcmpgtd %xmm1, %xmm0, %xmm0
vmovdqa %xmm0, 0x70(%rsp)
movq $-0x1, %rcx
movq $-0x1, %rdx
movq (%rbx,%rsi,8), %rdi
testq %r12, %rdi
je 0x148518e
incq %rsi
cmpq $0x4, %rsi
jne 0x1485177
jmp 0x148575b
vmovaps 0x60(%rdi), %xmm0
vmovaps 0x20(%rdi), %xmm1
vmovaps 0x30(%rdi), %xmm2
vmovaps 0x40(%rdi), %xmm3
vmovaps 0x50(%rdi), %xmm4
vunpcklps %xmm0, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vunpckhps %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vxorps %xmm6, %xmm6, %xmm6
vunpcklps %xmm6, %xmm3, %xmm1 # xmm1 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
vunpckhps %xmm6, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
vunpcklps %xmm1, %xmm5, %xmm8 # xmm8 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
vunpckhps %xmm1, %xmm5, %xmm10 # xmm10 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
vunpcklps %xmm3, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpckhps %xmm3, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
vmovaps 0x70(%rdi), %xmm0
vunpcklps %xmm0, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vunpcklps %xmm6, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
vunpckhps %xmm6, %xmm4, %xmm3 # xmm3 = xmm4[2],xmm6[2],xmm4[3],xmm6[3]
vunpcklps %xmm2, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vunpckhps %xmm2, %xmm1, %xmm11 # xmm11 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
vunpcklps %xmm3, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
vunpckhps %xmm3, %xmm0, %xmm1 # xmm1 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
vmovaps 0xf0(%rsp), %xmm14
vminps %xmm10, %xmm14, %xmm4
vmovaps 0xb0(%rsp), %xmm15
vmaxps %xmm11, %xmm15, %xmm2
vminps %xmm7, %xmm5, %xmm0
vmovaps %xmm7, %xmm3
vmovaps %xmm7, 0x50(%rsp)
vmovaps %xmm5, %xmm13
vmovaps %xmm5, 0x100(%rsp)
vmaxps %xmm1, %xmm6, %xmm7
vmovaps %xmm1, 0x130(%rsp)
vmovaps %xmm6, %xmm9
vminps %xmm0, %xmm4, %xmm4
vmovaps %xmm0, %xmm6
vmovaps %xmm0, 0x30(%rsp)
vmaxps %xmm7, %xmm2, %xmm2
vsubps %xmm4, %xmm2, %xmm0
vshufps $0xc9, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,2,0,3]
vmulps %xmm2, %xmm0, %xmm5
vmovaps %xmm8, 0x40(%rsp)
vminps %xmm14, %xmm8, %xmm0
vmovaps %xmm12, %xmm4
vmaxps %xmm15, %xmm12, %xmm2
vminps %xmm6, %xmm0, %xmm0
vmaxps %xmm7, %xmm2, %xmm2
vmovaps %xmm7, %xmm12
vsubps %xmm0, %xmm2, %xmm0
vshufps $0xc9, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,2,0,3]
vmulps %xmm2, %xmm0, %xmm6
vminps %xmm10, %xmm8, %xmm2
vmaxps %xmm11, %xmm4, %xmm0
vmovaps %xmm11, 0x110(%rsp)
vmovaps %xmm4, 0x60(%rsp)
vminps %xmm3, %xmm14, %xmm7
vmaxps %xmm1, %xmm15, %xmm8
vminps %xmm7, %xmm2, %xmm7
vmaxps %xmm8, %xmm0, %xmm8
vsubps %xmm7, %xmm8, %xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vmulps %xmm7, %xmm8, %xmm7
vminps %xmm14, %xmm13, %xmm8
vminps %xmm8, %xmm2, %xmm8
vmaxps %xmm15, %xmm9, %xmm14
vmovaps %xmm9, %xmm13
vmaxps %xmm14, %xmm0, %xmm14
vsubps %xmm8, %xmm14, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm14 # xmm14 = xmm8[1,2,0,3]
vmulps %xmm14, %xmm8, %xmm8
vunpcklps %xmm6, %xmm5, %xmm14 # xmm14 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
vmovlhps %xmm7, %xmm14, %xmm14 # xmm14 = xmm14[0],xmm7[0]
vinsertps $0x30, %xmm8, %xmm14, %xmm14 # xmm14 = xmm14[0,1,2],xmm8[0]
vinsertps $0x4c, %xmm5, %xmm6, %xmm15 # xmm15 = xmm5[1],xmm6[1],zero,zero
vshufps $0xd4, %xmm7, %xmm15, %xmm15 # xmm15 = xmm15[0,1],xmm7[1,3]
vinsertps $0x70, %xmm8, %xmm15, %xmm15 # xmm15 = xmm15[0,1,2],xmm8[1]
vaddps %xmm15, %xmm14, %xmm14
vunpckhps %xmm6, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
vmovsd %xmm5, %xmm7, %xmm5 # xmm5 = xmm5[0],xmm7[1]
vinsertps $0xb0, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm8[2]
vaddps %xmm5, %xmm14, %xmm6
vshufps $0xb1, %xmm6, %xmm6, %xmm5 # xmm5 = xmm6[1,0,3,2]
vminps %xmm6, %xmm5, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,0]
vminps %xmm5, %xmm7, %xmm1
vmovaps %xmm1, 0x160(%rsp)
vcmpeqps %xmm6, %xmm1, %xmm6
vmovmskps %xmm6, %edi
vmovaps 0xe0(%rsp), %xmm5
vminps %xmm10, %xmm5, %xmm6
vmovaps %xmm10, %xmm3
vmovaps %xmm10, 0x140(%rsp)
vmovaps 0xa0(%rsp), %xmm15
vmaxps %xmm11, %xmm15, %xmm7
vmovaps 0x30(%rsp), %xmm1
vminps %xmm1, %xmm6, %xmm6
vmaxps %xmm12, %xmm7, %xmm7
vsubps %xmm6, %xmm7, %xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vmulps %xmm7, %xmm6, %xmm6
vmovaps 0x40(%rsp), %xmm7
vminps %xmm5, %xmm7, %xmm7
vmaxps %xmm15, %xmm4, %xmm8
vminps %xmm1, %xmm7, %xmm7
vmaxps %xmm12, %xmm8, %xmm8
vmovaps %xmm12, %xmm9
vsubps %xmm7, %xmm8, %xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vmulps %xmm7, %xmm8, %xmm7
vmovaps 0x50(%rsp), %xmm4
vminps %xmm4, %xmm5, %xmm8
vminps %xmm8, %xmm2, %xmm8
vmovaps 0x130(%rsp), %xmm10
vmaxps %xmm10, %xmm15, %xmm14
vmaxps %xmm14, %xmm0, %xmm14
vsubps %xmm8, %xmm14, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm14 # xmm14 = xmm8[1,2,0,3]
vmulps %xmm14, %xmm8, %xmm8
vmovaps 0x100(%rsp), %xmm11
vminps %xmm5, %xmm11, %xmm14
vminps %xmm14, %xmm2, %xmm14
vmovaps %xmm13, %xmm1
vmovaps %xmm13, 0x120(%rsp)
vmaxps %xmm15, %xmm13, %xmm15
vmaxps %xmm15, %xmm0, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vshufps $0xc9, %xmm14, %xmm14, %xmm15 # xmm15 = xmm14[1,2,0,3]
vmulps %xmm15, %xmm14, %xmm14
vunpcklps %xmm7, %xmm6, %xmm15 # xmm15 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
vmovlhps %xmm8, %xmm15, %xmm15 # xmm15 = xmm15[0],xmm8[0]
vinsertps $0x30, %xmm14, %xmm15, %xmm15 # xmm15 = xmm15[0,1,2],xmm14[0]
vinsertps $0x4c, %xmm6, %xmm7, %xmm5 # xmm5 = xmm6[1],xmm7[1],zero,zero
vshufps $0xd4, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm8[1,3]
vinsertps $0x70, %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm14[1]
vaddps %xmm5, %xmm15, %xmm5
vunpckhps %xmm7, %xmm6, %xmm6 # xmm6 = xmm6[2],xmm7[2],xmm6[3],xmm7[3]
vmovsd %xmm6, %xmm8, %xmm6 # xmm6 = xmm6[0],xmm8[1]
vinsertps $0xb0, %xmm14, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm14[2]
vaddps %xmm5, %xmm6, %xmm5
vshufps $0xb1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0,3,2]
vminps %xmm5, %xmm6, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0]
vminps %xmm6, %xmm7, %xmm6
vmovaps %xmm6, 0x150(%rsp)
vcmpeqps %xmm5, %xmm6, %xmm5
vmovmskps %xmm5, %r8d
vmovaps 0xd0(%rsp), %xmm6
vminps %xmm3, %xmm6, %xmm5
vmovaps 0x90(%rsp), %xmm15
vmovaps 0x110(%rsp), %xmm3
vmaxps %xmm3, %xmm15, %xmm7
vmovaps 0x30(%rsp), %xmm12
vminps %xmm12, %xmm5, %xmm5
vmaxps %xmm9, %xmm7, %xmm7
vsubps %xmm5, %xmm7, %xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[1,2,0,3]
vmulps %xmm7, %xmm5, %xmm5
vmovaps 0x40(%rsp), %xmm13
vminps %xmm6, %xmm13, %xmm7
vminps %xmm12, %xmm7, %xmm7
vmovaps 0x60(%rsp), %xmm8
vmaxps %xmm15, %xmm8, %xmm8
vmaxps %xmm9, %xmm8, %xmm8
vsubps %xmm7, %xmm8, %xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vmulps %xmm7, %xmm8, %xmm7
vminps %xmm4, %xmm6, %xmm8
vminps %xmm8, %xmm2, %xmm8
vmaxps %xmm10, %xmm15, %xmm14
vmaxps %xmm14, %xmm0, %xmm14
vsubps %xmm8, %xmm14, %xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm14 # xmm14 = xmm8[1,2,0,3]
vmulps %xmm14, %xmm8, %xmm8
vminps %xmm6, %xmm11, %xmm14
vminps %xmm14, %xmm2, %xmm14
vmaxps %xmm15, %xmm1, %xmm15
vmaxps %xmm15, %xmm0, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vshufps $0xc9, %xmm14, %xmm14, %xmm15 # xmm15 = xmm14[1,2,0,3]
vmulps %xmm15, %xmm14, %xmm14
vunpcklps %xmm7, %xmm5, %xmm15 # xmm15 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
vmovlhps %xmm8, %xmm15, %xmm15 # xmm15 = xmm15[0],xmm8[0]
vinsertps $0x30, %xmm14, %xmm15, %xmm15 # xmm15 = xmm15[0,1,2],xmm14[0]
vinsertps $0x4c, %xmm5, %xmm7, %xmm6 # xmm6 = xmm5[1],xmm7[1],zero,zero
vshufps $0xd4, %xmm8, %xmm6, %xmm6 # xmm6 = xmm6[0,1],xmm8[1,3]
vinsertps $0x70, %xmm14, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm14[1]
vaddps %xmm6, %xmm15, %xmm6
vunpckhps %xmm7, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
vmovsd %xmm5, %xmm8, %xmm5 # xmm5 = xmm5[0],xmm8[1]
vinsertps $0xb0, %xmm14, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm14[2]
vaddps %xmm6, %xmm5, %xmm5
vshufps $0xb1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0,3,2]
vminps %xmm5, %xmm6, %xmm6
vshufpd $0x1, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,0]
vminps %xmm6, %xmm7, %xmm7
vcmpeqps %xmm5, %xmm7, %xmm5
vmovmskps %xmm5, %r9d
vmovaps 0xc0(%rsp), %xmm6
vminps 0x140(%rsp), %xmm6, %xmm5
vmovaps 0x80(%rsp), %xmm8
vmaxps %xmm3, %xmm8, %xmm4
vminps %xmm12, %xmm5, %xmm5
vmaxps %xmm9, %xmm4, %xmm4
vsubps %xmm5, %xmm4, %xmm4
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm4
vminps %xmm6, %xmm13, %xmm5
vminps %xmm12, %xmm5, %xmm3
vmovaps 0x60(%rsp), %xmm1
vmaxps %xmm8, %xmm1, %xmm5
vmaxps %xmm9, %xmm5, %xmm1
vsubps %xmm3, %xmm1, %xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vminps 0x50(%rsp), %xmm6, %xmm3
vmaxps %xmm10, %xmm8, %xmm5
vminps %xmm3, %xmm2, %xmm3
vmaxps %xmm5, %xmm0, %xmm5
vsubps %xmm3, %xmm5, %xmm3
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vmulps %xmm5, %xmm3, %xmm3
vminps %xmm6, %xmm11, %xmm5
vminps %xmm5, %xmm2, %xmm2
vmovaps 0x120(%rsp), %xmm5
vmaxps %xmm8, %xmm5, %xmm5
vmaxps %xmm5, %xmm0, %xmm0
vsubps %xmm2, %xmm0, %xmm0
vshufps $0xc9, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,2,0,3]
vmulps %xmm2, %xmm0, %xmm0
vunpcklps %xmm1, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovlhps %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
vinsertps $0x30, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm0[0]
vinsertps $0x4c, %xmm4, %xmm1, %xmm5 # xmm5 = xmm4[1],xmm1[1],zero,zero
vshufps $0xd4, %xmm3, %xmm5, %xmm5 # xmm5 = xmm5[0,1],xmm3[1,3]
vinsertps $0x70, %xmm0, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm0[1]
vaddps %xmm5, %xmm2, %xmm2
vunpckhps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
vpcmpeqd %xmm4, %xmm4, %xmm4
vmovsd %xmm1, %xmm3, %xmm1 # xmm1 = xmm1[0],xmm3[1]
vinsertps $0xb0, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,1,2],xmm0[2]
vaddps %xmm2, %xmm0, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2]
vminps %xmm0, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vminps %xmm1, %xmm2, %xmm1
vcmpeqps %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %r10d
vmovaps 0x160(%rsp), %xmm0
vunpcklps 0x150(%rsp), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
vmovlhps %xmm7, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm7[0]
bsfq %rdi, %rdi
bsfq %r8, %r8
bsfq %r9, %r9
bsfq %r10, %r10
vinsertps $0x30, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm1[0]
vbroadcastss 0x180(%rsp,%rsi,4), %xmm1
vsubps %xmm1, %xmm0, %xmm1
vmovaps %xmm1, 0x170(%rsp)
movl %edi, 0x10(%rsp)
movl %r8d, 0x14(%rsp)
movl %r9d, 0x18(%rsp)
movl %r10d, 0x1c(%rsp)
vmovd %esi, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpcmpeqd 0xa6b61b(%rip), %xmm0, %xmm0 # 0x1ef0cf0
vpor 0x70(%rsp), %xmm0, %xmm0
vtestps %xmm4, %xmm0
jb 0x1485180
vxorps %xmm4, %xmm0, %xmm0
vbroadcastss 0xa6632d(%rip), %xmm2 # 0x1eeba20
vblendvps %xmm0, %xmm1, %xmm2, %xmm1
vshufps $0xb1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0,3,2]
vminps %xmm1, %xmm2, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,0]
vminps %xmm2, %xmm3, %xmm2
vcmpeqps %xmm2, %xmm1, %xmm1
vtestps %xmm0, %xmm1
je 0x148571b
vandps %xmm0, %xmm1, %xmm0
vmovmskps %xmm0, %edi
bsfq %rdi, %rdi
cmpq %rdi, %rsi
je 0x1485180
vmovss 0x170(%rsp,%rdi,4), %xmm0
vmovss 0xc(%rsp), %xmm1
vucomiss %xmm0, %xmm1
jbe 0x1485180
movslq 0x10(%rsp,%rdi,4), %rdx
movq %rsi, %rcx
movq %rdi, %rax
vmovss %xmm0, 0xc(%rsp)
jmp 0x1485180
cmpq $-0x1, %rax
je 0x1485ac7
movq (%rbx,%rcx,8), %rsi
movq (%rbx,%rax,8), %rdi
movq (%rsi,%rdx,8), %r8
movq %r8, (%rbx,%rax,8)
movq %rdi, (%rsi,%rdx,8)
vmovss 0x20(%rbx,%rax,4), %xmm0
vmovss 0x20(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x20(%rbx,%rax,4)
vmovss %xmm0, 0x20(%rsi,%rdx,4)
vmovss 0x40(%rbx,%rax,4), %xmm0
vmovss 0x40(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x40(%rbx,%rax,4)
vmovss %xmm0, 0x40(%rsi,%rdx,4)
vmovss 0x60(%rbx,%rax,4), %xmm0
vmovss 0x60(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x60(%rbx,%rax,4)
vmovss %xmm0, 0x60(%rsi,%rdx,4)
vmovss 0x30(%rbx,%rax,4), %xmm0
vmovss 0x30(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x30(%rbx,%rax,4)
vmovss %xmm0, 0x30(%rsi,%rdx,4)
vmovss 0x50(%rbx,%rax,4), %xmm0
vmovss 0x50(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x50(%rbx,%rax,4)
vmovss %xmm0, 0x50(%rsi,%rdx,4)
vmovss 0x70(%rbx,%rax,4), %xmm0
vmovss 0x70(%rsi,%rdx,4), %xmm1
vmovss %xmm1, 0x70(%rbx,%rax,4)
vmovss %xmm0, 0x70(%rsi,%rdx,4)
vmovaps 0x20(%rsi), %xmm0
vmovaps 0x30(%rsi), %xmm1
vmovaps 0x40(%rsi), %xmm2
vmovaps 0x50(%rsi), %xmm3
vshufps $0xb1, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,0,3,2]
vminps %xmm0, %xmm4, %xmm0
vshufpd $0x1, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,0]
vminss %xmm0, %xmm4, %xmm0
vshufps $0xb1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0,3,2]
vminps %xmm2, %xmm4, %xmm2
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vminss %xmm2, %xmm4, %xmm2
vmovaps 0x60(%rsi), %xmm4
vshufps $0xb1, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,0,3,2]
vminps %xmm4, %xmm5, %xmm4
vshufpd $0x1, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,0]
vminss %xmm4, %xmm5, %xmm4
vshufps $0xb1, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,0,3,2]
vmaxps %xmm1, %xmm5, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,0]
vmaxss %xmm1, %xmm5, %xmm1
vshufps $0xb1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0,3,2]
vmaxps %xmm3, %xmm5, %xmm3
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vmaxss %xmm3, %xmm5, %xmm3
vmovaps 0x70(%rsi), %xmm5
vshufps $0xb1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0,3,2]
vmaxps %xmm5, %xmm6, %xmm5
vshufpd $0x1, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,0]
vmaxss %xmm5, %xmm6, %xmm5
vmovss %xmm0, 0x20(%rbx,%rcx,4)
vmovss %xmm2, 0x40(%rbx,%rcx,4)
vmovss %xmm4, 0x60(%rbx,%rcx,4)
vmovss %xmm1, 0x30(%rbx,%rcx,4)
vmovss %xmm3, 0x50(%rbx,%rcx,4)
vmovss %xmm5, 0x70(%rbx,%rcx,4)
movl $0x3, %ecx
cmpq $0x8, (%rbx,%rcx,8)
jne 0x14858d0
addq $-0x1, %rcx
jb 0x14858bc
movq $-0x1, %rcx
testq %rcx, %rcx
jle 0x14859bd
xorl %edx, %edx
movq (%rbx,%rdx,8), %rdi
cmpq $0x8, %rdi
jne 0x14859a6
movq (%rbx,%rcx,8), %r8
movq %r8, (%rbx,%rdx,8)
movq %rdi, (%rbx,%rcx,8)
vmovss 0x20(%rbx,%rdx,4), %xmm0
vmovss 0x20(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x20(%rbx,%rdx,4)
vmovss %xmm0, 0x20(%rbx,%rcx,4)
vmovss 0x40(%rbx,%rdx,4), %xmm0
vmovss 0x40(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x40(%rbx,%rdx,4)
vmovss %xmm0, 0x40(%rbx,%rcx,4)
vmovss 0x60(%rbx,%rdx,4), %xmm0
vmovss 0x60(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x60(%rbx,%rdx,4)
vmovss %xmm0, 0x60(%rbx,%rcx,4)
vmovss 0x30(%rbx,%rdx,4), %xmm0
vmovss 0x30(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x30(%rbx,%rdx,4)
vmovss %xmm0, 0x30(%rbx,%rcx,4)
vmovss 0x50(%rbx,%rdx,4), %xmm0
vmovss 0x50(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x50(%rbx,%rdx,4)
vmovss %xmm0, 0x50(%rbx,%rcx,4)
vmovss 0x70(%rbx,%rdx,4), %xmm0
vmovss 0x70(%rbx,%rcx,4), %xmm1
vmovss %xmm1, 0x70(%rbx,%rdx,4)
vmovss %xmm0, 0x70(%rbx,%rcx,4)
leaq -0x1(%rcx), %r8
cmpq %r8, %rdx
cmovlq %rdx, %r8
leaq -0x1(%rcx), %rdi
cmpq %rdx, %rdi
jle 0x14859ab
cmpq $0x8, -0x8(%rbx,%rcx,8)
movq %rdi, %rcx
je 0x1485990
jmp 0x14859ae
movq %rcx, %rdi
jmp 0x14859ae
movq %r8, %rdi
incq %rdx
movq %rdi, %rcx
cmpq %rdi, %rdx
jl 0x14858db
movl $0x3, %ecx
cmpq $0x8, (%rsi,%rcx,8)
jne 0x14859d6
addq $-0x1, %rcx
jb 0x14859c2
movq $-0x1, %rcx
testq %rcx, %rcx
jle 0x1485ac3
xorl %edx, %edx
movq (%rsi,%rdx,8), %rdi
cmpq $0x8, %rdi
jne 0x1485aac
movq (%rsi,%rcx,8), %r8
movq %r8, (%rsi,%rdx,8)
movq %rdi, (%rsi,%rcx,8)
vmovss 0x20(%rsi,%rdx,4), %xmm0
vmovss 0x20(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x20(%rsi,%rdx,4)
vmovss %xmm0, 0x20(%rsi,%rcx,4)
vmovss 0x40(%rsi,%rdx,4), %xmm0
vmovss 0x40(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x40(%rsi,%rdx,4)
vmovss %xmm0, 0x40(%rsi,%rcx,4)
vmovss 0x60(%rsi,%rdx,4), %xmm0
vmovss 0x60(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x60(%rsi,%rdx,4)
vmovss %xmm0, 0x60(%rsi,%rcx,4)
vmovss 0x30(%rsi,%rdx,4), %xmm0
vmovss 0x30(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x30(%rsi,%rdx,4)
vmovss %xmm0, 0x30(%rsi,%rcx,4)
vmovss 0x50(%rsi,%rdx,4), %xmm0
vmovss 0x50(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x50(%rsi,%rdx,4)
vmovss %xmm0, 0x50(%rsi,%rcx,4)
vmovss 0x70(%rsi,%rdx,4), %xmm0
vmovss 0x70(%rsi,%rcx,4), %xmm1
vmovss %xmm1, 0x70(%rsi,%rdx,4)
vmovss %xmm0, 0x70(%rsi,%rcx,4)
leaq -0x1(%rcx), %r8
cmpq %r8, %rdx
cmovlq %rdx, %r8
leaq -0x1(%rcx), %rdi
cmpq %rdx, %rdi
jle 0x1485ab1
cmpq $0x8, -0x8(%rsi,%rcx,8)
movq %rdi, %rcx
je 0x1485a96
jmp 0x1485ab4
movq %rcx, %rdi
jmp 0x1485ab4
movq %r8, %rdi
incq %rdx
movq %rdi, %rcx
cmpq %rdi, %rdx
jl 0x14859e1
incl 0x20(%rsp,%rax,4)
vmovdqa 0x20(%rsp), %xmm0
vpshufd $0xb1, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2]
vpmaxsd %xmm0, %xmm1, %xmm0
vpshufd $0x4e, %xmm0, %xmm1 # xmm1 = xmm0[2,3,0,1]
vpmaxsd %xmm0, %xmm1, %xmm0
vmovd %xmm0, %eax
incl %eax
cltq
addq $0x190, %rsp # imm = 0x190
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_rotate.cpp
|
unsigned long embree::avx::createMortonCodeArray<embree::QuadMesh>(embree::QuadMesh*, embree::vector_t<embree::avx::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::avx::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)
|
size_t createMortonCodeArray(Mesh* mesh, mvector<BVHBuilderMorton::BuildPrim>& morton, BuildProgressMonitor& progressMonitor)
{
size_t numPrimitives = morton.size();
/* compute scene bounds */
std::pair<size_t,BBox3fa> cb_empty(0,empty);
auto cb = parallel_reduce
( size_t(0), numPrimitives, size_t(1024), cb_empty, [&](const range<size_t>& r) -> std::pair<size_t,BBox3fa>
{
size_t num = 0;
BBox3fa bounds = empty;
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa prim_bounds = empty;
if (unlikely(!mesh->buildBounds(j,&prim_bounds))) continue;
bounds.extend(center2(prim_bounds));
num++;
}
return std::make_pair(num,bounds);
}, [] (const std::pair<size_t,BBox3fa>& a, const std::pair<size_t,BBox3fa>& b) {
return std::make_pair(a.first + b.first,merge(a.second,b.second));
});
size_t numPrimitivesGen = cb.first;
const BBox3fa centBounds = cb.second;
/* compute morton codes */
if (likely(numPrimitivesGen == numPrimitives))
{
/* fast path if all primitives were valid */
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_for( size_t(0), numPrimitives, size_t(1024), [&](const range<size_t>& r) -> void {
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
generator(mesh->bounds(j),unsigned(j));
});
}
else
{
/* slow path, fallback in case some primitives were invalid */
ParallelPrefixSumState<size_t> pstate;
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (unlikely(!mesh->buildBounds(j,&bounds))) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[base]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (!mesh->buildBounds(j,&bounds)) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
}
return numPrimitivesGen;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x568, %rsp # imm = 0x568
movq %rsi, %r15
leaq 0x18(%rsp), %rax
movq %rdi, (%rax)
movq 0x10(%rsi), %rbp
xorl %r12d, %r12d
leaq 0x130(%rsp), %r14
movq %r12, (%r14)
vbroadcastss 0xa5809a(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, 0x10(%r14)
vbroadcastss 0xa591ef(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x20(%r14)
leaq 0x30(%rsp), %r13
movq %rax, (%r13)
leaq 0x168(%rsp), %rbx
movw $0x401, 0xc(%rbx) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%rbx)
movq $0x8, 0x40(%rbx)
movq %rbx, %rdi
callq 0x6a660
leaq 0x48(%rsp), %rcx
movq %rbp, (%rcx)
movq %r12, 0x8(%rcx)
movq $0x400, 0x10(%rcx) # imm = 0x400
leaq 0x8(%rsp), %rax
leaq 0x70(%rsp), %r12
movq %rax, (%r12)
movq %r13, 0x8(%r12)
leaq 0xb0(%rsp), %r13
movq %r14, (%r13)
movq %r12, 0x8(%r13)
movq %rax, 0x10(%r13)
movq (%r14), %rax
movq %rax, 0x20(%r13)
vmovups 0x10(%r14), %ymm0
movq %rcx, %r14
vmovups %ymm0, 0x30(%r13)
leaq 0x28(%rsp), %rdx
movq %rcx, %rdi
movq %r13, %rsi
movq %rbx, %rcx
vzeroupper
callq 0x1497fde
movq 0xd0(%rsp), %rbx
vmovaps 0xe0(%rsp), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1493e25
leaq 0x168(%rsp), %rdi
callq 0x6aab0
vmovaps 0xa0(%rsp), %xmm1
vmovaps 0x90(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm0
cmpq %rbp, %rbx
jne 0x1493b91
vmovaps %xmm1, 0xb0(%rsp)
vbroadcastss 0xa8e3af(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa58c4d(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xababc4(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0xc0(%rsp)
movq %r13, 0x70(%rsp)
movq %r15, 0x78(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x80(%rsp)
leaq 0x168(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x48(%rsp), %rdi
movq %rbp, (%rdi)
movq $0x0, 0x8(%rdi)
movq $0x400, 0x10(%rdi) # imm = 0x400
leaq 0x30(%rsp), %rsi
movq %r12, (%rsi)
leaq 0x28(%rsp), %rdx
movq %r14, %rcx
callq 0x1498a22
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1493e57
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rax
addq $0x568, %rsp # imm = 0x568
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps %xmm1, 0x70(%rsp)
vbroadcastss 0xa8e2c0(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa58b5e(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xabaad5(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%rsp)
movq %r12, 0x30(%rsp)
movq %r15, 0x38(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
leaq 0x20(%rsp), %r13
movq %rbp, (%r13)
xorl %edi, %edi
callq 0x6ab80
cltq
movq (%r13), %rcx
movq %r13, %rdx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r13d
cmovbq %rcx, %r13
leaq 0x10(%rsp), %rcx
movq %r13, (%rcx)
leaq 0x8(%rsp), %rax
movq %rax, 0x48(%rsp)
movq %rdx, 0x50(%rsp)
movq %rcx, 0x58(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x30(%rsp), %rax
movq %rax, 0x68(%rsp)
leaq 0xb0(%rsp), %rdi
movw $0x401, 0xc(%rdi) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%rdi)
movq $0x8, 0x40(%rdi)
callq 0x6a660
leaq 0x28(%rsp), %rcx
movq %r14, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r13, %rsi
leaq 0xb0(%rsp), %r9
callq 0x1499474
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1493e89
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1493cfa
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0x1493ce2
movq %r12, 0x30(%rsp)
movq %r15, 0x38(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
movq %rbp, 0x20(%rsp)
xorl %edi, %edi
callq 0x6ab80
cltq
movq 0x20(%rsp), %rcx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r15d
cmovbq %rcx, %r15
movq %r15, 0x10(%rsp)
leaq 0x8(%rsp), %rax
movq %rax, 0x48(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x50(%rsp)
leaq 0x10(%rsp), %rax
movq %rax, 0x58(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x30(%rsp), %rax
movq %rax, 0x68(%rsp)
leaq 0xb0(%rsp), %r12
movw $0x401, 0xc(%r12) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r12)
movq $0x8, 0x40(%r12)
movq %r12, %rdi
callq 0x6a660
leaq 0x28(%rsp), %rcx
movq %r14, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r15, %rsi
movq %r12, %r9
callq 0x149a16c
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1493ebb
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1493b7c
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0x1493e08
jmp 0x1493b7c
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa57cb7(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90dc0(%rip), %rsi # 0x2124c08
movq 0xc90b79(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa57c85(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90d8e(%rip), %rsi # 0x2124c08
movq 0xc90b47(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa57c53(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90d5c(%rip), %rsi # 0x2124c08
movq 0xc90b15(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa57c21(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90d2a(%rip), %rsi # 0x2124c08
movq 0xc90ae3(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1493f12
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1493f26
jmp 0x1493f85
jmp 0x1493f0f
jmp 0x1493f85
jmp 0x1493f23
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x1493f7d
jmp 0x1493f85
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x1493f7d
jmp 0x1493f85
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1493f5c
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1493f70
jmp 0x1493f85
jmp 0x1493f59
jmp 0x1493f85
jmp 0x1493f6d
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
jmp 0x1493f7d
jmp 0x1493f85
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/builders/primrefgen.cpp
|
unsigned long embree::avx::createMortonCodeArray<embree::Instance>(embree::Instance*, embree::vector_t<embree::avx::BVHBuilderMorton::BuildPrim, embree::aligned_monitored_allocator<embree::avx::BVHBuilderMorton::BuildPrim, 8ul>>&, embree::BuildProgressMonitor&)
|
size_t createMortonCodeArray(Mesh* mesh, mvector<BVHBuilderMorton::BuildPrim>& morton, BuildProgressMonitor& progressMonitor)
{
size_t numPrimitives = morton.size();
/* compute scene bounds */
std::pair<size_t,BBox3fa> cb_empty(0,empty);
auto cb = parallel_reduce
( size_t(0), numPrimitives, size_t(1024), cb_empty, [&](const range<size_t>& r) -> std::pair<size_t,BBox3fa>
{
size_t num = 0;
BBox3fa bounds = empty;
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa prim_bounds = empty;
if (unlikely(!mesh->buildBounds(j,&prim_bounds))) continue;
bounds.extend(center2(prim_bounds));
num++;
}
return std::make_pair(num,bounds);
}, [] (const std::pair<size_t,BBox3fa>& a, const std::pair<size_t,BBox3fa>& b) {
return std::make_pair(a.first + b.first,merge(a.second,b.second));
});
size_t numPrimitivesGen = cb.first;
const BBox3fa centBounds = cb.second;
/* compute morton codes */
if (likely(numPrimitivesGen == numPrimitives))
{
/* fast path if all primitives were valid */
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_for( size_t(0), numPrimitives, size_t(1024), [&](const range<size_t>& r) -> void {
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
generator(mesh->bounds(j),unsigned(j));
});
}
else
{
/* slow path, fallback in case some primitives were invalid */
ParallelPrefixSumState<size_t> pstate;
BVHBuilderMorton::MortonCodeMapping mapping(centBounds);
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[r.begin()]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (unlikely(!mesh->buildBounds(j,&bounds))) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
parallel_prefix_sum( pstate, size_t(0), numPrimitives, size_t(1024), size_t(0), [&](const range<size_t>& r, const size_t base) -> size_t {
size_t num = 0;
BVHBuilderMorton::MortonCodeGenerator generator(mapping,&morton.data()[base]);
for (size_t j=r.begin(); j<r.end(); j++)
{
BBox3fa bounds = empty;
if (!mesh->buildBounds(j,&bounds)) continue;
generator(bounds,unsigned(j));
num++;
}
return num;
}, std::plus<size_t>());
}
return numPrimitivesGen;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x568, %rsp # imm = 0x568
movq %rsi, %r15
leaq 0x18(%rsp), %rax
movq %rdi, (%rax)
movq 0x10(%rsi), %rbp
xorl %r12d, %r12d
leaq 0x130(%rsp), %r14
movq %r12, (%r14)
vbroadcastss 0xa5741e(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, 0x10(%r14)
vbroadcastss 0xa58573(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x20(%r14)
leaq 0x30(%rsp), %r13
movq %rax, (%r13)
leaq 0x168(%rsp), %rbx
movw $0x401, 0xc(%rbx) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%rbx)
movq $0x8, 0x40(%rbx)
movq %rbx, %rdi
callq 0x6a660
leaq 0x48(%rsp), %rcx
movq %rbp, (%rcx)
movq %r12, 0x8(%rcx)
movq $0x400, 0x10(%rcx) # imm = 0x400
leaq 0x8(%rsp), %rax
leaq 0x70(%rsp), %r12
movq %rax, (%r12)
movq %r13, 0x8(%r12)
leaq 0xb0(%rsp), %r13
movq %r14, (%r13)
movq %r12, 0x8(%r13)
movq %rax, 0x10(%r13)
movq (%r14), %rax
movq %rax, 0x20(%r13)
vmovups 0x10(%r14), %ymm0
movq %rcx, %r14
vmovups %ymm0, 0x30(%r13)
leaq 0x28(%rsp), %rdx
movq %rcx, %rdi
movq %r13, %rsi
movq %rbx, %rcx
vzeroupper
callq 0x149de48
movq 0xd0(%rsp), %rbx
vmovaps 0xe0(%rsp), %xmm0
vmovaps %xmm0, 0xa0(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1494aa1
leaq 0x168(%rsp), %rdi
callq 0x6aab0
vmovaps 0xa0(%rsp), %xmm1
vmovaps 0x90(%rsp), %xmm0
vsubps %xmm1, %xmm0, %xmm0
cmpq %rbp, %rbx
jne 0x149480d
vmovaps %xmm1, 0xb0(%rsp)
vbroadcastss 0xa8d733(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa57fd1(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xab9f48(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0xc0(%rsp)
movq %r13, 0x70(%rsp)
movq %r15, 0x78(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x80(%rsp)
leaq 0x168(%rsp), %r14
movw $0x401, 0xc(%r14) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r14)
movq $0x8, 0x40(%r14)
movq %r14, %rdi
callq 0x6a660
leaq 0x48(%rsp), %rdi
movq %rbp, (%rdi)
movq $0x0, 0x8(%rdi)
movq $0x400, 0x10(%rdi) # imm = 0x400
leaq 0x30(%rsp), %rsi
movq %r12, (%rsi)
leaq 0x28(%rsp), %rdx
movq %r14, %rcx
callq 0x149e8b4
leaq 0x168(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1494ad3
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rax
addq $0x568, %rsp # imm = 0x568
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vmovaps %xmm1, 0x70(%rsp)
vbroadcastss 0xa8d644(%rip), %xmm1 # 0x1f21e60
vcmpnleps %xmm1, %xmm0, %xmm1
vrcpps %xmm0, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vbroadcastss 0xa57ee2(%rip), %xmm3 # 0x1eec714
vsubps %xmm0, %xmm3, %xmm0
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm0, %xmm2, %xmm0
vbroadcastss 0xab9e59(%rip), %xmm2 # 0x1f4e6a0
vmulps %xmm2, %xmm0, %xmm0
vandps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%rsp)
movq %r12, 0x30(%rsp)
movq %r15, 0x38(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
leaq 0x20(%rsp), %r13
movq %rbp, (%r13)
xorl %edi, %edi
callq 0x6ab80
cltq
movq (%r13), %rcx
movq %r13, %rdx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r13d
cmovbq %rcx, %r13
leaq 0x10(%rsp), %rcx
movq %r13, (%rcx)
leaq 0x8(%rsp), %rax
movq %rax, 0x48(%rsp)
movq %rdx, 0x50(%rsp)
movq %rcx, 0x58(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x30(%rsp), %rax
movq %rax, 0x68(%rsp)
leaq 0xb0(%rsp), %rdi
movw $0x401, 0xc(%rdi) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%rdi)
movq $0x8, 0x40(%rdi)
callq 0x6a660
leaq 0x28(%rsp), %rcx
movq %r14, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r13, %rsi
leaq 0xb0(%rsp), %r9
callq 0x149f438
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1494b05
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x1494976
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0x149495e
movq %r12, 0x30(%rsp)
movq %r15, 0x38(%rsp)
leaq 0x18(%rsp), %rax
movq %rax, 0x40(%rsp)
movq $0x0, 0x8(%rsp)
movq %rbp, 0x20(%rsp)
xorl %edi, %edi
callq 0x6ab80
cltq
movq 0x20(%rsp), %rcx
subq 0x8(%rsp), %rcx
addq $0x3ff, %rcx # imm = 0x3FF
shrq $0xa, %rcx
cmpq %rcx, %rax
cmovbq %rax, %rcx
cmpq $0x40, %rcx
movl $0x40, %r15d
cmovbq %rcx, %r15
movq %r15, 0x10(%rsp)
leaq 0x8(%rsp), %rax
movq %rax, 0x48(%rsp)
leaq 0x20(%rsp), %rax
movq %rax, 0x50(%rsp)
leaq 0x10(%rsp), %rax
movq %rax, 0x58(%rsp)
leaq 0x168(%rsp), %rax
movq %rax, 0x60(%rsp)
leaq 0x30(%rsp), %rax
movq %rax, 0x68(%rsp)
leaq 0xb0(%rsp), %r12
movw $0x401, 0xc(%r12) # imm = 0x401
vxorps %xmm0, %xmm0, %xmm0
vmovups %xmm0, 0x20(%r12)
movq $0x8, 0x40(%r12)
movq %r12, %rdi
callq 0x6a660
leaq 0x28(%rsp), %rcx
movq %r14, (%rcx)
leaq 0x7(%rsp), %r8
movl $0x1, %edx
xorl %edi, %edi
movq %r15, %rsi
movq %r12, %r9
callq 0x14a016a
leaq 0xb0(%rsp), %rdi
callq 0x6a770
testb %al, %al
jne 0x1494b37
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
movq 0x10(%rsp), %rax
testq %rax, %rax
je 0x14947f8
xorl %ecx, %ecx
xorl %edx, %edx
movq %rdx, 0x368(%rsp,%rcx,8)
addq 0x168(%rsp,%rcx,8), %rdx
incq %rcx
cmpq %rcx, %rax
jne 0x1494a84
jmp 0x14947f8
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa5703b(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90144(%rip), %rsi # 0x2124c08
movq 0xc8fefd(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa57009(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc90112(%rip), %rsi # 0x2124c08
movq 0xc8fecb(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa56fd7(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc900e0(%rip), %rsi # 0x2124c08
movq 0xc8fe99(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movl $0x10, %edi
callq 0x6a3b0
movq %rax, %r14
leaq 0xa56fa5(%rip), %rsi # 0x1eebaf0
movq %rax, %rdi
callq 0x6a230
movq 0xc900ae(%rip), %rsi # 0x2124c08
movq 0xc8fe67(%rip), %rdx # 0x21249c8
movq %r14, %rdi
callq 0x6a5d0
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1494b8e
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1494ba2
jmp 0x1494c01
jmp 0x1494b8b
jmp 0x1494c01
jmp 0x1494b9f
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x1494bf9
jmp 0x1494c01
movq %rax, %rbx
leaq 0xb0(%rsp), %rdi
callq 0x6aab0
jmp 0x1494bf9
jmp 0x1494c01
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1494bd8
movq %rax, %rbx
movq %r14, %rdi
callq 0x6a8a0
jmp 0x1494bec
jmp 0x1494c01
jmp 0x1494bd5
jmp 0x1494c01
jmp 0x1494be9
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
jmp 0x1494bf9
jmp 0x1494c01
movq %rax, %rbx
leaq 0x168(%rsp), %rdi
callq 0x6aab0
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/builders/primrefgen.cpp
|
embree::avx::BVHNIntersector1<8, 16781328, false, embree::avx::VirtualCurveIntersector1>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This,
RayHit& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
if (bvh->root == BVH::emptyNode)
return;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node);
tray.tfar = ray.tfar;
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2600, %rsp # imm = 0x2600
movq %rdx, 0xb8(%rsp)
movq %rdi, 0xb0(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x14a6bfb
vmovaps 0x10(%rsi), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm3
vrsqrtss %xmm3, %xmm3, %xmm4
vmovss 0xa45c6d(%rip), %xmm1 # 0x1eec718
vmulss %xmm1, %xmm4, %xmm5
vmovss 0xa460c9(%rip), %xmm2 # 0x1eecb80
vmulss %xmm2, %xmm3, %xmm3
vmulss %xmm4, %xmm3, %xmm3
vmulss %xmm4, %xmm4, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vsubss %xmm3, %xmm5, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm3 # xmm3 = xmm6[0,0,0,0]
vmulps %xmm3, %xmm0, %xmm4
vshufpd $0x1, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,0]
vmovshdup %xmm4, %xmm5 # xmm5 = xmm4[1,1,3,3]
vbroadcastss 0xa7a3da(%rip), %xmm8 # 0x1f20ec0
vxorps %xmm5, %xmm8, %xmm9
vxorps %xmm5, %xmm5, %xmm5
vunpckhps %xmm5, %xmm4, %xmm10 # xmm10 = xmm4[2],xmm5[2],xmm4[3],xmm5[3]
vmovss %xmm9, %xmm5, %xmm9 # xmm9 = xmm9[0],xmm5[1,2,3]
vshufps $0x41, %xmm9, %xmm10, %xmm9 # xmm9 = xmm10[1,0],xmm9[0,1]
vxorpd %xmm7, %xmm8, %xmm7
vinsertps $0x2a, %xmm4, %xmm7, %xmm7 # xmm7 = xmm7[0],zero,xmm4[0],zero
vdpps $0x7f, %xmm9, %xmm9, %xmm8
vdpps $0x7f, %xmm7, %xmm7, %xmm10
vcmpltps %xmm8, %xmm10, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vblendvps %xmm8, %xmm9, %xmm7, %xmm7
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm6, 0x160(%rsp)
vrsqrtss %xmm8, %xmm8, %xmm6
vmulss %xmm1, %xmm6, %xmm9
vmulss %xmm2, %xmm8, %xmm8
vmulss %xmm6, %xmm8, %xmm8
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm8, %xmm6
vsubss %xmm6, %xmm9, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm7, %xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,2,0,3]
vmulps %xmm6, %xmm8, %xmm8
vmulps %xmm7, %xmm4, %xmm7
vsubps %xmm8, %xmm7, %xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss %xmm1, %xmm9, %xmm1
vmulss %xmm2, %xmm8, %xmm2
vmulss %xmm2, %xmm9, %xmm2
vmulss %xmm9, %xmm9, %xmm8
vmulss %xmm2, %xmm8, %xmm2
vsubss %xmm2, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm7, %xmm1, %xmm1
vmulps %xmm4, %xmm3, %xmm2
vunpcklps %xmm2, %xmm6, %xmm3 # xmm3 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
vunpckhps %xmm2, %xmm6, %xmm2 # xmm2 = xmm6[2],xmm2[2],xmm6[3],xmm2[3]
vunpcklps %xmm5, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vunpckhps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm5[2],xmm1[3],xmm5[3]
vunpcklps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vunpcklps %xmm4, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
vunpckhps %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3]
vmovaps %xmm2, 0x170(%rsp)
vmovaps %xmm3, 0x180(%rsp)
vmovaps %xmm1, 0x190(%rsp)
movq 0x70(%rax), %rax
movq %rax, 0x2a0(%rsp)
movl $0x0, 0x2a8(%rsp)
cmpq $0x8, %rax
jne 0x14a6c0d
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
leaq 0x2b0(%rsp), %r11
vxorps %xmm6, %xmm6, %xmm6
vmaxss 0xc(%rsi), %xmm6, %xmm2
vmaxss 0x20(%rsi), %xmm6, %xmm1
vbroadcastss 0xa7a298(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm0, %xmm3
vbroadcastss 0xa4a3af(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm0, %xmm3
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0xa45abf(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm7
vbroadcastss 0x4(%rsi), %ymm8
vbroadcastss 0x8(%rsi), %ymm9
vshufps $0x0, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovaps %ymm4, 0x1e0(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmovaps %ymm4, 0x1c0(%rsp)
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
xorl %r8d, %r8d
vucomiss %xmm6, %xmm3
setb %r8b
vshufps $0x0, %xmm3, %xmm3, %xmm0 # xmm0 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm10
vmovshdup %xmm3, %xmm0 # xmm0 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm11
vshufpd $0x1, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm12
shll $0x5, %r8d
xorl %r10d, %r10d
vucomiss %xmm6, %xmm0
setb %r10b
shll $0x5, %r10d
orq $0x40, %r10
xorl %r13d, %r13d
vucomiss %xmm6, %xmm4
setb %r13b
shll $0x5, %r13d
orq $0x80, %r13
movq %r8, %r12
xorq $0x20, %r12
movq %r10, %rbx
xorq $0x20, %rbx
movq %r13, %r15
xorq $0x20, %r15
vshufps $0x0, %xmm2, %xmm2, %xmm0 # xmm0 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm13
vshufps $0x0, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vmovaps %ymm7, 0x80(%rsp)
vmovaps %ymm8, 0x60(%rsp)
vmovaps %ymm9, 0x40(%rsp)
movq %r8, 0x18(%rsp)
vmovaps %ymm10, 0x140(%rsp)
vmovaps %ymm11, 0x120(%rsp)
vmovaps %ymm12, 0x100(%rsp)
movq %r10, 0x10(%rsp)
vmovaps %ymm13, 0xe0(%rsp)
movq %rsi, 0xa8(%rsp)
vmovss 0x20(%rsi), %xmm1
leaq 0x2a0(%rsp), %rax
cmpq %rax, %r11
je 0x14a6bfb
vmovss -0x8(%r11), %xmm2
addq $-0x10, %r11
vucomiss %xmm1, %xmm2
ja 0x14a6d8b
movq (%r11), %rcx
testb $0x8, %cl
jne 0x14a6ec0
vmovss 0x1c(%rsi), %xmm1
movl %ecx, %edx
andl $0x7, %edx
movq %rcx, %rax
andq $-0x10, %rax
cmpq $0x3, %rdx
je 0x14a6f15
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmulps 0x100(%rax,%r8), %ymm1, %ymm2
vaddps 0x40(%rax,%r8), %ymm2, %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vmulps 0x100(%rax,%r10), %ymm1, %ymm3
vaddps 0x40(%rax,%r10), %ymm3, %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vmulps 0x100(%rax,%r13), %ymm1, %ymm4
vaddps 0x40(%rax,%r13), %ymm4, %ymm4
vsubps %ymm9, %ymm4, %ymm4
vmulps %ymm4, %ymm12, %ymm4
vmulps 0x100(%rax,%r12), %ymm1, %ymm5
vmaxps %ymm4, %ymm3, %ymm3
vaddps 0x40(%rax,%r12), %ymm5, %ymm4
vsubps %ymm7, %ymm4, %ymm4
vmulps 0x100(%rax,%rbx), %ymm1, %ymm5
vaddps 0x40(%rax,%rbx), %ymm5, %ymm5
vmulps %ymm4, %ymm10, %ymm4
vsubps %ymm8, %ymm5, %ymm5
vmulps 0x100(%rax,%r15), %ymm1, %ymm6
vmulps %ymm5, %ymm11, %ymm5
vaddps 0x40(%rax,%r15), %ymm6, %ymm6
vsubps %ymm9, %ymm6, %ymm6
vmulps %ymm6, %ymm12, %ymm6
vminps %ymm6, %ymm5, %ymm5
vmaxps %ymm2, %ymm13, %ymm2
vmaxps %ymm3, %ymm2, %ymm2
vminps %ymm4, %ymm0, %ymm3
vminps %ymm5, %ymm3, %ymm3
vcmpleps %ymm3, %ymm2, %ymm3
cmpl $0x6, %edx
je 0x14a72c0
vextractf128 $0x1, %ymm3, %xmm1
vpackssdw %xmm1, %xmm3, %xmm1
vpsllw $0xf, %xmm1, %xmm1
vpacksswb %xmm1, %xmm1, %xmm1
vpmovmskb %xmm1, %eax
movzbl %al, %r14d
vmovaps %ymm2, 0xc0(%rsp)
testb $0x8, %cl
jne 0x14a6f0e
testq %r14, %r14
je 0x14a72b6
andq $-0x10, %rcx
bsfq %r14, %rdi
leaq -0x1(%r14), %r9
xorl %eax, %eax
movq (%rcx,%rdi,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
andq %r14, %r9
jne 0x14a72ed
movq %rdx, %rcx
testl %eax, %eax
je 0x14a6daf
jmp 0x14a7602
movl $0x6, %eax
jmp 0x14a6f01
vmovaps 0x40(%rax), %ymm4
vmovaps %ymm4, 0x240(%rsp)
vmovaps 0xa0(%rax), %ymm5
vmovaps 0xc0(%rax), %ymm8
vmovaps 0xe0(%rax), %ymm15
vmovaps %ymm15, 0x220(%rsp)
vmovaps 0x100(%rax), %ymm10
vmovaps 0x120(%rax), %ymm6
vmovaps 0x140(%rax), %ymm7
vshufps $0x0, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm13
vmulps 0x1c0(%rax), %ymm13, %ymm3
vmulps 0x1e0(%rax), %ymm13, %ymm9
vmovss 0xa45795(%rip), %xmm2 # 0x1eec714
vsubss %xmm1, %xmm2, %xmm2
vmulps 0x200(%rax), %ymm13, %ymm11
vmulss 0xa44a91(%rip), %xmm2, %xmm12 # 0x1eeba24
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vinsertf128 $0x1, %xmm12, %ymm12, %ymm12
vaddps %ymm3, %ymm12, %ymm1
vmovaps %ymm1, 0x20(%rsp)
vaddps %ymm9, %ymm12, %ymm1
vmovaps %ymm1, 0x280(%rsp)
vaddps %ymm11, %ymm12, %ymm1
vmovaps %ymm1, 0x260(%rsp)
vmovaps 0x1a0(%rsp), %ymm1
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm6, %ymm1, %ymm11
vmulps %ymm7, %ymm1, %ymm12
vmovaps 0x1c0(%rsp), %ymm1
vmulps %ymm5, %ymm1, %ymm14
vaddps %ymm3, %ymm14, %ymm3
vmulps %ymm1, %ymm8, %ymm14
vaddps %ymm11, %ymm14, %ymm14
vmulps %ymm1, %ymm15, %ymm11
vaddps %ymm12, %ymm11, %ymm15
vmovaps 0x1e0(%rsp), %ymm1
vmulps %ymm4, %ymm1, %ymm11
vaddps %ymm3, %ymm11, %ymm3
vmovaps 0x60(%rax), %ymm11
vmulps %ymm1, %ymm11, %ymm12
vaddps %ymm14, %ymm12, %ymm14
vmovaps 0x80(%rax), %ymm12
vmulps %ymm1, %ymm12, %ymm9
vaddps %ymm15, %ymm9, %ymm9
vbroadcastss 0xa79e8e(%rip), %ymm1 # 0x1f20ec4
vandps %ymm1, %ymm3, %ymm15
vbroadcastss 0xa49fa5(%rip), %ymm4 # 0x1ef0fe8
vcmpltps %ymm4, %ymm15, %ymm15
vblendvps %ymm15, %ymm4, %ymm3, %ymm3
vandps %ymm1, %ymm14, %ymm15
vcmpltps %ymm4, %ymm15, %ymm15
vblendvps %ymm15, %ymm4, %ymm14, %ymm14
vandps %ymm1, %ymm9, %ymm15
vcmpltps %ymm4, %ymm15, %ymm15
vblendvps %ymm15, %ymm4, %ymm9, %ymm4
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vmulps 0x220(%rax), %ymm13, %ymm9
vmulps 0x240(%rax), %ymm13, %ymm15
vmulps 0x260(%rax), %ymm13, %ymm1
vaddps %ymm2, %ymm9, %ymm13
vaddps %ymm2, %ymm15, %ymm9
vmovaps %ymm9, 0x200(%rsp)
vaddps %ymm1, %ymm2, %ymm15
vrcpps %ymm3, %ymm1
vmulps %ymm1, %ymm3, %ymm2
vbroadcastss 0xa4565f(%rip), %ymm3 # 0x1eec714
vsubps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm1, %ymm2
vaddps %ymm2, %ymm1, %ymm9
vrcpps %ymm14, %ymm1
vmulps %ymm1, %ymm14, %ymm2
vsubps %ymm2, %ymm3, %ymm2
vrcpps %ymm4, %ymm14
vmulps %ymm2, %ymm1, %ymm2
vaddps %ymm2, %ymm1, %ymm1
vmulps %ymm4, %ymm14, %ymm2
vsubps %ymm2, %ymm3, %ymm2
vmulps %ymm2, %ymm14, %ymm2
vmulps 0x40(%rsp), %ymm10, %ymm4
vaddps 0x160(%rax), %ymm4, %ymm4
vaddps %ymm2, %ymm14, %ymm10
vmulps 0x60(%rsp), %ymm5, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vmulps 0x40(%rsp), %ymm6, %ymm4
vaddps 0x180(%rax), %ymm4, %ymm4
vmulps 0x60(%rsp), %ymm8, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps 0x40(%rsp), %ymm7, %ymm5
vaddps 0x1a0(%rax), %ymm5, %ymm5
vmovaps 0x220(%rsp), %ymm3
vmulps 0x60(%rsp), %ymm3, %ymm6
vaddps %ymm5, %ymm6, %ymm5
vmovaps 0x240(%rsp), %ymm3
vmulps 0x80(%rsp), %ymm3, %ymm6
vaddps %ymm2, %ymm6, %ymm6
vmulps 0x80(%rsp), %ymm11, %ymm2
vaddps %ymm4, %ymm2, %ymm7
vmulps 0x80(%rsp), %ymm12, %ymm2
vaddps %ymm5, %ymm2, %ymm8
vmovaps 0x20(%rsp), %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmovaps 0x280(%rsp), %ymm3
vsubps %ymm7, %ymm3, %ymm4
vmovaps 0x260(%rsp), %ymm3
vsubps %ymm8, %ymm3, %ymm11
vmulps %ymm2, %ymm9, %ymm5
vmulps %ymm1, %ymm4, %ymm4
vmulps %ymm10, %ymm11, %ymm2
vsubps %ymm6, %ymm13, %ymm6
vmovaps 0x200(%rsp), %ymm3
vsubps %ymm7, %ymm3, %ymm7
vsubps %ymm8, %ymm15, %ymm8
vmulps %ymm6, %ymm9, %ymm3
vmulps %ymm1, %ymm7, %ymm6
vmulps %ymm10, %ymm8, %ymm1
vextractf128 $0x1, %ymm3, %xmm7
vextractf128 $0x1, %ymm5, %xmm8
vpminsd %xmm7, %xmm8, %xmm9
vpminsd %xmm3, %xmm5, %xmm10
vinsertf128 $0x1, %xmm9, %ymm10, %ymm9
vmovaps %ymm9, 0x20(%rsp)
vextractf128 $0x1, %ymm6, %xmm10
vextractf128 $0x1, %ymm4, %xmm11
vpminsd %xmm10, %xmm11, %xmm12
vpminsd %xmm6, %xmm4, %xmm13
vinsertf128 $0x1, %xmm12, %ymm13, %ymm12
vextractf128 $0x1, %ymm1, %xmm13
vextractf128 $0x1, %ymm2, %xmm14
vpminsd %xmm13, %xmm14, %xmm15
vpminsd %xmm1, %xmm2, %xmm9
vinsertf128 $0x1, %xmm15, %ymm9, %ymm9
vmaxps %ymm9, %ymm12, %ymm9
vmovaps 0x100(%rsp), %ymm12
vpmaxsd %xmm7, %xmm8, %xmm7
vmovaps 0x60(%rsp), %ymm8
vpmaxsd %xmm3, %xmm5, %xmm3
vinsertf128 $0x1, %xmm7, %ymm3, %ymm3
vmovaps 0x80(%rsp), %ymm7
vpmaxsd %xmm10, %xmm11, %xmm5
vmovaps 0x120(%rsp), %ymm11
vmovaps 0x140(%rsp), %ymm10
vpmaxsd %xmm6, %xmm4, %xmm4
vinsertf128 $0x1, %xmm5, %ymm4, %ymm4
vpmaxsd %xmm13, %xmm14, %xmm5
vmovaps 0xe0(%rsp), %ymm13
vpmaxsd %xmm1, %xmm2, %xmm1
vinsertf128 $0x1, %xmm5, %ymm1, %ymm1
vminps %ymm1, %ymm4, %ymm1
vmaxps 0x20(%rsp), %ymm13, %ymm2
vmaxps %ymm9, %ymm2, %ymm2
vmovaps 0x40(%rsp), %ymm9
vminps %ymm3, %ymm0, %ymm3
vminps %ymm1, %ymm3, %ymm1
vmovaps %ymm2, 0xc0(%rsp)
vcmpleps %ymm1, %ymm2, %ymm1
vmovmskps %ymm1, %r14d
jmp 0x14a6ec0
movl $0x4, %eax
jmp 0x14a6f01
vmovaps 0x1c0(%rax), %ymm4
vcmpleps %ymm1, %ymm4, %ymm4
vcmpltps 0x1e0(%rax), %ymm1, %ymm1
vandps %ymm1, %ymm4, %ymm1
vandps %ymm3, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm3
vpackssdw %xmm3, %xmm1, %xmm1
jmp 0x14a6ea6
movq %r11, 0x20(%rsp)
movl 0xc0(%rsp,%rdi,4), %edi
bsfq %r9, %r10
leaq -0x1(%r9), %r11
movq (%rcx,%r10,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
movl 0xc0(%rsp,%r10,4), %r10d
andq %r9, %r11
jne 0x14a7364
movq 0x20(%rsp), %r9
leaq 0x10(%r9), %rcx
cmpl %r10d, %edi
jae 0x14a7348
movq %r8, (%r9)
movl %r10d, 0x8(%r9)
movq %rcx, %r11
movq %rdx, %rcx
jmp 0x14a7355
movq %rdx, (%r9)
movl %edi, 0x8(%r9)
movq %rcx, %r11
movq %r8, %rcx
movq 0x18(%rsp), %r8
movq 0x10(%rsp), %r10
jmp 0x14a6f01
vmovq %rdx, %xmm1
vmovd %edi, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovq %r8, %xmm2
vmovd %r10d, %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
bsfq %r11, %rdx
leaq -0x1(%r11), %rdi
movq (%rcx,%rdx,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm3
vmovd 0xc0(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r11, %rdi
jne 0x14a7418
vpcmpgtd %xmm1, %xmm2, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm2, %xmm5
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm5, %xmm3, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm5, %xmm3, %xmm4
vblendvps %xmm2, %xmm3, %xmm5, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm5
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
movq 0x20(%rsp), %r11
vmovaps %xmm1, (%r11)
vmovaps %xmm5, 0x10(%r11)
vmovq %xmm4, %rcx
addq $0x20, %r11
jmp 0x14a7355
movq %rsi, %r10
bsfq %rdi, %rsi
movq (%rcx,%rsi,8), %rdx
prefetcht0 (%rdx)
prefetcht0 0x40(%rdx)
prefetcht0 0x80(%rdx)
prefetcht0 0xc0(%rdx)
vmovq %rdx, %xmm4
leaq -0x1(%rdi), %rdx
vmovd 0xc0(%rsp,%rsi,4), %xmm5
vpunpcklqdq %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm5[0]
andq %rdi, %rdx
jne 0x14a752a
vpcmpgtd %xmm1, %xmm2, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm1, %xmm2, %xmm6
vblendvps %xmm5, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm3, %xmm4, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm3, %xmm4, %xmm5
vblendvps %xmm2, %xmm4, %xmm3, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm4
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm6, %xmm5, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm6, %xmm5, %xmm3
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vpcmpgtd %xmm2, %xmm4, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm2, %xmm4, %xmm6
vblendvps %xmm5, %xmm4, %xmm2, %xmm2
movq 0x20(%rsp), %r11
vmovaps %xmm1, (%r11)
vmovaps %xmm2, 0x10(%r11)
vmovaps %xmm6, 0x20(%r11)
vmovq %xmm3, %rcx
addq $0x30, %r11
movq %r10, %rsi
vmovaps 0x80(%rsp), %ymm7
vmovaps 0x60(%rsp), %ymm8
vmovaps 0x40(%rsp), %ymm9
movq 0x18(%rsp), %r8
vmovaps 0x140(%rsp), %ymm10
vmovaps 0x120(%rsp), %ymm11
vmovaps 0x100(%rsp), %ymm12
movq 0x10(%rsp), %r10
vmovaps 0xe0(%rsp), %ymm13
jmp 0x14a6f01
movq 0x20(%rsp), %rsi
vmovdqa %xmm1, (%rsi)
vmovdqa %xmm2, 0x10(%rsi)
vmovdqa %xmm3, 0x20(%rsi)
vmovdqa %xmm4, 0x30(%rsi)
movl $0x30, %esi
bsfq %rdx, %rdi
leaq -0x1(%rdx), %r8
movq (%rcx,%rdi,8), %r9
prefetcht0 (%r9)
prefetcht0 0x40(%r9)
prefetcht0 0x80(%r9)
prefetcht0 0xc0(%r9)
vmovq %r9, %xmm1
vmovd 0xc0(%rsp,%rdi,4), %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
movq 0x20(%rsp), %rdi
vmovdqa %xmm1, 0x10(%rdi,%rsi)
addq $0x10, %rsi
andq %r8, %rdx
jne 0x14a7547
movq 0x20(%rsp), %rcx
leaq (%rcx,%rsi), %rdx
testq %rsi, %rsi
je 0x14a75f7
movl $0x10, %ecx
movq 0x20(%rsp), %rsi
vmovdqa 0x10(%rsi), %xmm1
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rcx, %rdi
movq 0x20(%rsp), %r9
cmpl %r8d, -0x8(%r9,%rdi)
jae 0x14a75e5
movq 0x20(%rsp), %r9
vmovdqa -0x10(%r9,%rdi), %xmm2
vmovdqa %xmm2, (%r9,%rdi)
addq $-0x10, %rdi
jne 0x14a75ba
movq 0x20(%rsp), %rdi
jmp 0x14a75ea
addq 0x20(%rsp), %rdi
vmovdqa %xmm1, (%rdi)
addq $0x10, %rcx
cmpq %rsi, %rdx
jne 0x14a75aa
movq (%rdx), %rcx
movq %rdx, %r11
jmp 0x14a74df
cmpl $0x6, %eax
jne 0x14a6d86
andq $-0x10, %rcx
movzbl (%rcx), %eax
movq 0xb0(%rsp), %rdx
movq 0x8(%rdx), %r8
shll $0x6, %eax
leaq 0x160(%rsp), %rdi
movq 0xb8(%rsp), %rdx
movq %r11, 0x20(%rsp)
vzeroupper
callq *(%r8,%rax)
vmovaps 0xe0(%rsp), %ymm13
movq 0x10(%rsp), %r10
vmovaps 0x100(%rsp), %ymm12
vmovaps 0x120(%rsp), %ymm11
vmovaps 0x140(%rsp), %ymm10
movq 0x18(%rsp), %r8
vmovaps 0x40(%rsp), %ymm9
vmovaps 0x60(%rsp), %ymm8
vmovaps 0x80(%rsp), %ymm7
movq 0x20(%rsp), %r11
movq 0xa8(%rsp), %rsi
vbroadcastss 0x20(%rsi), %ymm0
jmp 0x14a6d86
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This,
RayHit& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
if (bvh->root == BVH::emptyNode)
return;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node);
tray.tfar = ray.tfar;
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x14a8f42
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2640, %rsp # imm = 0x2640
movq %rdx, 0x18(%rsp)
movq 0x70(%rax), %rax
movq %rax, 0x2e0(%rsp)
movl $0x0, 0x2e8(%rsp)
cmpq $0x8, %rax
jne 0x14a8f46
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r14
vmovaps 0x10(%rsi), %xmm3
vxorps %xmm2, %xmm2, %xmm2
vmaxss 0xc(%rsi), %xmm2, %xmm1
vmaxss 0x20(%rsi), %xmm2, %xmm0
vbroadcastss 0xa77f5f(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm3, %xmm4
vbroadcastss 0xa48076(%rip), %xmm5 # 0x1ef0fe8
vcmpltps %xmm5, %xmm4, %xmm4
vblendvps %xmm4, %xmm5, %xmm3, %xmm3
leaq 0x2f0(%rsp), %r11
vrcpps %xmm3, %xmm4
vmulps %xmm4, %xmm3, %xmm3
vbroadcastss 0xa4377e(%rip), %xmm5 # 0x1eec714
vsubps %xmm3, %xmm5, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vbroadcastss 0x8(%rsi), %ymm8
vaddps %xmm3, %xmm4, %xmm3
xorl %edi, %edi
vucomiss %xmm2, %xmm3
setb %dil
vshufps $0x0, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm9
vmovshdup %xmm3, %xmm4 # xmm4 = xmm3[1,1,3,3]
vshufps $0x55, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,1,1,1]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm10
vshufpd $0x1, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,0]
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
shll $0x5, %edi
xorl %r9d, %r9d
vucomiss %xmm2, %xmm4
vinsertf128 $0x1, %xmm3, %ymm3, %ymm11
setb %r9b
shll $0x5, %r9d
orq $0x40, %r9
xorl %r10d, %r10d
vucomiss %xmm2, %xmm5
setb %r10b
shll $0x5, %r10d
orq $0x80, %r10
movq %rdi, %rdx
xorq $0x20, %rdx
movq %r9, %rbx
xorq $0x20, %rbx
movq %r10, %r15
xorq $0x20, %r15
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
leaq 0xca6f3b(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm1
vmovaps %xmm1, 0x130(%rsp)
movq %rbx, 0x10(%rsp)
movq %r15, 0x8(%rsp)
vmovaps %ymm6, 0x2c0(%rsp)
vmovaps %ymm7, 0x2a0(%rsp)
vmovaps %ymm8, 0x280(%rsp)
vmovaps %ymm9, 0x260(%rsp)
vmovaps %ymm10, 0x240(%rsp)
vmovaps %ymm11, 0x220(%rsp)
movq %rdx, (%rsp)
vmovaps %ymm12, 0x200(%rsp)
vmovss 0x20(%r14), %xmm1
leaq 0x2e0(%rsp), %rax
cmpq %rax, %r11
je 0x14a8f34
vmovss -0x8(%r11), %xmm2
addq $-0x10, %r11
vucomiss %xmm1, %xmm2
ja 0x14a90a9
movq (%r11), %r13
testb $0x8, %r13b
jne 0x14a915e
vmovaps 0x40(%r13,%rdi), %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmovaps 0x40(%r13,%r9), %ymm2
vsubps %ymm7, %ymm2, %ymm2
vmulps %ymm2, %ymm10, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r13,%r10), %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmulps %ymm2, %ymm11, %ymm2
vmaxps %ymm12, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vmovaps 0x40(%r13,%rdx), %ymm2
vsubps %ymm6, %ymm2, %ymm2
vmulps %ymm2, %ymm9, %ymm2
vmovaps 0x40(%r13,%rbx), %ymm3
vsubps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm10, %ymm3
vminps %ymm3, %ymm2, %ymm2
vmovaps 0x40(%r13,%r15), %ymm3
vsubps %ymm8, %ymm3, %ymm3
vmulps %ymm3, %ymm11, %ymm3
vminps %ymm0, %ymm3, %ymm3
vminps %ymm3, %ymm2, %ymm2
vcmpleps %ymm2, %ymm1, %ymm2
vmovmskps %ymm2, %r12d
vmovaps %ymm1, 0x140(%rsp)
testb $0x8, %r13b
jne 0x14a91a7
testq %r12, %r12
je 0x14a91ae
andq $-0x10, %r13
bsfq %r12, %rsi
leaq -0x1(%r12), %r8
xorl %eax, %eax
movq (%r13,%rsi,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r12, %r8
jne 0x14a91b5
movq %rcx, %r13
testl %eax, %eax
je 0x14a90cd
jmp 0x14a94cb
movl $0x6, %eax
jmp 0x14a919a
movl $0x4, %eax
jmp 0x14a919a
movq %r10, %r15
movq %r9, %rdx
movq %rdi, %rbx
movl 0x140(%rsp,%rsi,4), %esi
bsfq %r8, %r9
leaq -0x1(%r8), %r10
movq (%r13,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x140(%rsp,%r9,4), %r9d
andq %r8, %r10
jne 0x14a9235
leaq 0x10(%r11), %r8
cmpl %r9d, %esi
movq %r15, %r10
jae 0x14a920f
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %r8, %r11
movq %rcx, %r13
jmp 0x14a921c
movq %rcx, (%r11)
movl %esi, 0x8(%r11)
movq %r8, %r11
movq %rdi, %r13
movq %rbx, %rdi
movq %rdx, %r9
movq (%rsp), %rdx
movq 0x10(%rsp), %rbx
movq 0x8(%rsp), %r15
jmp 0x14a919a
vmovq %rcx, %xmm1
vmovd %esi, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovq %rdi, %xmm2
vmovd %r9d, %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
bsfq %r10, %rcx
leaq -0x1(%r10), %rsi
movq (%r13,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm3
vmovd 0x140(%rsp,%rcx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r10, %rsi
jne 0x14a92ea
vpcmpgtd %xmm1, %xmm2, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm2, %xmm5
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm5, %xmm3, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm5, %xmm3, %xmm4
vblendvps %xmm2, %xmm3, %xmm5, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm5
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%r11)
vmovaps %xmm5, 0x10(%r11)
vmovq %xmm4, %r13
addq $0x20, %r11
movq %rbx, %rdi
movq %rdx, %r9
movq %r15, %r10
jmp 0x14a9222
vmovaps %ymm12, %ymm13
vmovaps %ymm11, %ymm12
vmovaps %ymm10, %ymm11
vmovaps %ymm9, %ymm10
vmovaps %ymm8, %ymm9
vmovaps %ymm7, %ymm8
vmovaps %ymm6, %ymm7
bsfq %rsi, %rdi
movq (%r13,%rdi,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm4
leaq -0x1(%rsi), %rcx
vmovd 0x140(%rsp,%rdi,4), %xmm5
vpunpcklqdq %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm5[0]
andq %rsi, %rcx
jne 0x14a9408
vpcmpgtd %xmm1, %xmm2, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm1, %xmm2, %xmm6
vblendvps %xmm5, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm3, %xmm4, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm3, %xmm4, %xmm5
vblendvps %xmm2, %xmm4, %xmm3, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm4
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm6, %xmm5, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm6, %xmm5, %xmm3
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vpcmpgtd %xmm2, %xmm4, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm2, %xmm4, %xmm6
vblendvps %xmm5, %xmm4, %xmm2, %xmm2
vmovaps %xmm1, (%r11)
vmovaps %xmm2, 0x10(%r11)
vmovaps %xmm6, 0x20(%r11)
vmovq %xmm3, %r13
addq $0x30, %r11
vmovaps %ymm7, %ymm6
vmovaps %ymm8, %ymm7
vmovaps %ymm9, %ymm8
movq %rbx, %rdi
vmovaps %ymm10, %ymm9
vmovaps %ymm11, %ymm10
movq %rdx, %r9
vmovaps %ymm12, %ymm11
movq %r15, %r10
movq (%rsp), %rdx
movq 0x10(%rsp), %rbx
movq 0x8(%rsp), %r15
vmovaps %ymm13, %ymm12
jmp 0x14a919a
vmovdqa %xmm1, (%r11)
vmovdqa %xmm2, 0x10(%r11)
vmovdqa %xmm3, 0x20(%r11)
vmovdqa %xmm4, 0x30(%r11)
movl $0x30, %esi
movq %rsi, %r10
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r13,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm1
vmovd 0x140(%rsp,%rsi,4), %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovdqa %xmm1, 0x10(%r11,%r10)
leaq 0x10(%r10), %rsi
andq %rdi, %rcx
jne 0x14a9424
leaq (%r11,%rsi), %rcx
testq %rsi, %rsi
je 0x14a94c0
movl $0x10, %r13d
movq %r11, %rsi
vmovdqa 0x10(%rsi), %xmm1
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %r13, %rdi
cmpl %r8d, -0x8(%r11,%rdi)
jae 0x14a94b0
vmovdqa -0x10(%r11,%rdi), %xmm2
vmovdqa %xmm2, (%r11,%rdi)
addq $-0x10, %rdi
jne 0x14a9491
movq %r11, %rdi
jmp 0x14a94b3
addq %r11, %rdi
vmovdqa %xmm1, (%rdi)
addq $0x10, %r13
cmpq %rsi, %rcx
jne 0x14a9481
movq (%rcx), %r13
movq %rcx, %r11
jmp 0x14a93cb
cmpl $0x6, %eax
jne 0x14a90a3
movl %r13d, %esi
andl $0xf, %esi
addq $-0x8, %rsi
je 0x14a9bc8
andq $-0x10, %r13
xorl %ebx, %ebx
imulq $0xb0, %rbx, %r15
vmovaps 0x80(%r13,%r15), %xmm9
vmovaps 0x40(%r13,%r15), %xmm6
vmulps %xmm6, %xmm9, %xmm0
vmovaps 0x70(%r13,%r15), %xmm10
vmovaps 0x50(%r13,%r15), %xmm7
vmulps %xmm7, %xmm10, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x80(%rsp)
vmovaps 0x60(%r13,%r15), %xmm11
vmulps %xmm7, %xmm11, %xmm1
vmovaps (%r13,%r15), %xmm3
vmovaps 0x10(%r13,%r15), %xmm13
vmovaps 0x20(%r13,%r15), %xmm0
vmovaps 0x30(%r13,%r15), %xmm8
vmulps %xmm8, %xmm9, %xmm2
vsubps %xmm1, %xmm2, %xmm5
vmulps %xmm8, %xmm10, %xmm2
vmulps %xmm6, %xmm11, %xmm12
vsubps %xmm2, %xmm12, %xmm4
vbroadcastss (%r14), %xmm12
vsubps %xmm12, %xmm3, %xmm2
vbroadcastss 0x4(%r14), %xmm12
vsubps %xmm12, %xmm13, %xmm3
vbroadcastss 0x8(%r14), %xmm12
vsubps %xmm12, %xmm0, %xmm1
vbroadcastss 0x14(%r14), %xmm12
vbroadcastss 0x18(%r14), %xmm13
vmulps %xmm1, %xmm12, %xmm14
vmulps %xmm3, %xmm13, %xmm15
vsubps %xmm14, %xmm15, %xmm14
vbroadcastss 0x10(%r14), %xmm15
vmulps %xmm2, %xmm13, %xmm0
vmovaps %xmm1, 0x60(%rsp)
vmulps %xmm1, %xmm15, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm3, 0x70(%rsp)
vmulps %xmm3, %xmm15, %xmm1
vmovaps %xmm2, 0xf0(%rsp)
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmovaps %xmm4, 0x50(%rsp)
vmulps %xmm4, %xmm13, %xmm2
vmovaps 0x80(%rsp), %xmm13
vmovaps %xmm5, 0x20(%rsp)
vmulps %xmm5, %xmm12, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm15, %xmm13, %xmm12
vaddps %xmm2, %xmm12, %xmm2
vmulps %xmm1, %xmm9, %xmm9
vmulps %xmm0, %xmm10, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vmulps %xmm14, %xmm11, %xmm10
vaddps %xmm9, %xmm10, %xmm10
vmulps %xmm1, %xmm7, %xmm1
vmulps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm1, %xmm0
vmovddup 0xa77945(%rip), %xmm1 # xmm1 = mem[0,0]
vandps %xmm1, %xmm2, %xmm9
vxorps %xmm10, %xmm9, %xmm7
vmulps %xmm14, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm8
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm7, %xmm0
vcmpnltps %xmm10, %xmm8, %xmm1
vandps %xmm1, %xmm0, %xmm0
vbroadcastss 0xa7786d(%rip), %xmm1 # 0x1f20ec4
vandps %xmm1, %xmm2, %xmm6
vcmpneqps %xmm2, %xmm10, %xmm1
vandps %xmm1, %xmm0, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x130(%rsp), %xmm10
jne 0x14a968e
incq %rbx
cmpq %rsi, %rbx
jne 0x14a94ea
jmp 0x14a9bc8
vandps 0x130(%rsp), %xmm10, %xmm10
vmovaps 0x60(%rsp), %xmm0
vmulps 0x50(%rsp), %xmm0, %xmm0
vmovaps 0x70(%rsp), %xmm1
vmulps 0x20(%rsp), %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0xf0(%rsp), %xmm13, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm4
vbroadcastss 0xc(%r14), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm4, %xmm0, %xmm0
vbroadcastss 0x20(%r14), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm4, %xmm1
vandps %xmm0, %xmm1, %xmm3
vtestps %xmm10, %xmm3
je 0x14a967d
vandps %xmm3, %xmm10, %xmm3
vmovaps %xmm7, 0x140(%rsp)
vmovaps %xmm8, 0x150(%rsp)
vmovaps %xmm4, 0x160(%rsp)
vmovaps %xmm6, 0x170(%rsp)
vmovaps %xmm3, 0x190(%rsp)
vmovaps %xmm13, 0x1d0(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps 0x50(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %rcx
vrcpps %xmm6, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vbroadcastss 0xa42fb6(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x160(%rsp), %xmm0, %xmm4
vmovaps %xmm3, 0x90(%rsp)
vmovaps %xmm4, 0x1c0(%rsp)
vmulps 0x140(%rsp), %xmm0, %xmm1
vmovaps %xmm1, 0x1a0(%rsp)
vmulps 0x150(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
vbroadcastss 0xa4226e(%rip), %xmm0 # 0x1eeba20
vblendvps %xmm3, %xmm4, %xmm0, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2]
vminps %xmm0, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vminps %xmm1, %xmm2, %xmm1
vcmpeqps %xmm1, %xmm0, %xmm0
vtestps %xmm3, %xmm0
je 0x14a97da
vandps %xmm3, %xmm0, %xmm3
addq %r13, %r15
vmovmskps %xmm3, %eax
bsfq %rax, %rax
movq %rcx, 0xf0(%rsp)
movl 0x90(%r15,%rax,4), %r8d
movq 0x1e8(%rcx), %rcx
movq %r8, 0x20(%rsp)
movq (%rcx,%r8,8), %r8
movl 0x24(%r14), %ecx
movq %r8, 0x80(%rsp)
testl %ecx, 0x34(%r8)
je 0x14a9a41
movq %rax, 0x50(%rsp)
movq 0x18(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x14a9843
movq 0x80(%rsp), %rax
cmpq $0x0, 0x40(%rax)
je 0x14a9b39
movq %rcx, 0xa8(%rsp)
vmovaps %xmm4, 0xe0(%rsp)
movq %rsi, 0x30(%rsp)
movq %r10, 0x38(%rsp)
movq %r9, 0x40(%rsp)
movq %rdi, 0x60(%rsp)
movq %r11, 0x70(%rsp)
movq 0x50(%rsp), %rsi
vmovss 0x1a0(%rsp,%rsi,4), %xmm0
vmovss 0x1b0(%rsp,%rsi,4), %xmm1
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0xa0(%r15,%rsi,4), %edx
vmovss 0x1d0(%rsp,%rsi,4), %xmm2
vmovss 0x1e0(%rsp,%rsi,4), %xmm3
vmovss 0x1f0(%rsp,%rsi,4), %xmm4
vmovss %xmm2, 0x100(%rsp)
vmovss %xmm3, 0x104(%rsp)
vmovss %xmm4, 0x108(%rsp)
vmovss %xmm0, 0x10c(%rsp)
vmovss %xmm1, 0x110(%rsp)
movl %edx, 0x114(%rsp)
movq 0x20(%rsp), %rax
movl %eax, 0x118(%rsp)
movl (%rcx), %eax
movl %eax, 0x11c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x120(%rsp)
vmovss 0x20(%r14), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x1c0(%rsp,%rsi,4), %xmm0
vmovss %xmm0, 0x20(%r14)
movl $0xffffffff, 0x4c(%rsp) # imm = 0xFFFFFFFF
leaq 0x4c(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x80(%rsp), %rdx
movq 0x18(%rdx), %rax
movq %rax, 0xb8(%rsp)
movq %rcx, 0xc0(%rsp)
movq %r14, 0xc8(%rsp)
leaq 0x100(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x40(%rdx), %rax
testq %rax, %rax
je 0x14a9999
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x14a9ad5
movq 0xa8(%rsp), %rax
movq 0x10(%rax), %rax
testq %rax, %rax
je 0x14a99e3
movq 0xa8(%rsp), %rcx
testb $0x2, (%rcx)
jne 0x14a99c5
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x14a99d2
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x14a9ad5
movq 0xc8(%rsp), %rax
movq 0xd0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x14a9ae1
movl $0x0, 0x90(%rsp,%rax,4)
vmovaps 0x90(%rsp), %xmm1
vtestps %xmm1, %xmm1
je 0x14a967d
vmovaps %xmm4, %xmm0
vmovaps %xmm1, %xmm2
movq %rdi, 0x60(%rsp)
leaq 0xb0(%rsp), %rdi
movq %r11, 0x70(%rsp)
movq %r9, 0x40(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
vmovaps %xmm4, 0xe0(%rsp)
vzeroupper
callq 0x216fff
vmovaps 0xe0(%rsp), %xmm4
movq 0x30(%rsp), %rsi
movq (%rsp), %rdx
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r9
movq 0x60(%rsp), %rdi
movq 0x70(%rsp), %r11
movq 0xb0(%rsp), %rax
movq 0xf0(%rsp), %rcx
jmp 0x14a97ed
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%r14)
movq 0x50(%rsp), %rax
movl $0x0, 0x90(%rsp,%rax,4)
vbroadcastss 0x20(%r14), %xmm0
vmovaps 0xe0(%rsp), %xmm4
vcmpleps %xmm0, %xmm4, %xmm0
vandps 0x90(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x90(%rsp)
movq 0x70(%rsp), %r11
movq 0x60(%rsp), %rdi
movq 0x40(%rsp), %r9
movq 0x38(%rsp), %r10
movq (%rsp), %rdx
movq 0x30(%rsp), %rsi
jmp 0x14a9a4c
movq 0x50(%rsp), %rax
vmovss 0x1a0(%rsp,%rax,4), %xmm0
vmovss 0x1b0(%rsp,%rax,4), %xmm1
vmovss 0x1c0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x20(%r14)
vmovss 0x1d0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x30(%r14)
vmovss 0x1e0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x34(%r14)
vmovss 0x1f0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x38(%r14)
vmovss %xmm0, 0x3c(%r14)
vmovss %xmm1, 0x40(%r14)
movl 0xa0(%r15,%rax,4), %ecx
movl %ecx, 0x44(%r14)
movq 0x20(%rsp), %rax
movl %eax, 0x48(%r14)
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rax
movl (%rax), %ecx
movl %ecx, 0x4c(%r14)
movl 0x4(%rax), %eax
movl %eax, 0x50(%r14)
jmp 0x14a967d
vbroadcastss 0x20(%r14), %ymm0
vmovaps 0x2c0(%rsp), %ymm6
vmovaps 0x2a0(%rsp), %ymm7
vmovaps 0x280(%rsp), %ymm8
vmovaps 0x260(%rsp), %ymm9
vmovaps 0x240(%rsp), %ymm10
vmovaps 0x220(%rsp), %ymm11
movq 0x10(%rsp), %rbx
movq 0x8(%rsp), %r15
vmovaps 0x200(%rsp), %ymm12
jmp 0x14a90a3
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
|
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return false;
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
/* verify correct input */
assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f));
/* load the point query into SIMD registers */
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > cull_radius))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(point_query.trav_nodes,1,1,1);
bool nodeIntersected;
if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) {
nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
} else {
nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
}
if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(point_query.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node))
{
changed = true;
tquery.rad = context->query_radius;
cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
return changed;
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x14adda7
xorl %eax, %eax
jmp 0x14ae46e
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2520, %rsp # imm = 0x2520
movq %rdx, %rbx
movq %rsi, %r12
movq 0x70(%rax), %rax
movq %rax, 0x1c0(%rsp)
movl $0x0, 0x1c8(%rsp)
cmpl $0x1, 0x18(%rdx)
jne 0x14addef
vmovss 0x10(%r12), %xmm0
vmulss %xmm0, %xmm0, %xmm9
jmp 0x14addfa
vmovaps 0x50(%rbx), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm9
leaq 0x1d0(%rsp), %r11
vbroadcastss (%r12), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vbroadcastss 0x4(%r12), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vbroadcastss 0x8(%r12), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
movl $0x0, 0x14(%rsp)
leaq 0x1c0(%rsp), %r14
vmovaps 0xa0(%rsp), %ymm3
vsubps %ymm0, %ymm3, %ymm4
vmovaps %ymm4, 0x180(%rsp)
vaddps %ymm0, %ymm3, %ymm3
vmovaps %ymm3, 0x160(%rsp)
vmovaps 0x80(%rsp), %ymm3
vsubps %ymm1, %ymm3, %ymm4
vmovaps %ymm4, 0x140(%rsp)
vaddps %ymm1, %ymm3, %ymm1
vmovaps %ymm1, 0x120(%rsp)
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm2, %ymm1, %ymm3
vmovaps %ymm3, 0x100(%rsp)
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmulps %ymm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps %xmm9, 0xd0(%rsp)
cmpq %r14, %r11
je 0x14ae45c
vmovss -0x8(%r11), %xmm0
addq $-0x10, %r11
vucomiss %xmm9, %xmm0
ja 0x14adecc
movq (%r11), %r13
cmpl $0x1, 0x18(%rbx)
jne 0x14adfde
testb $0x8, %r13b
jne 0x14adf8a
vmovaps 0x40(%r13), %ymm0
vmovaps 0x60(%r13), %ymm1
vmovaps 0xa0(%rsp), %ymm3
vmaxps %ymm0, %ymm3, %ymm2
vminps %ymm1, %ymm2, %ymm2
vsubps %ymm3, %ymm2, %ymm2
vmovaps 0x80(%rsp), %ymm5
vmaxps 0x80(%r13), %ymm5, %ymm3
vminps 0xa0(%r13), %ymm3, %ymm3
vmovaps 0x60(%rsp), %ymm6
vmaxps 0xc0(%r13), %ymm6, %ymm4
vminps 0xe0(%r13), %ymm4, %ymm4
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm6, %ymm4, %ymm4
vmulps %ymm2, %ymm2, %ymm2
vmulps %ymm3, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm4, %ymm4, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps %ymm2, 0x40(%rsp)
vcmpleps 0x1a0(%rsp), %ymm2, %ymm2
vcmpleps %ymm1, %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovmskps %ymm0, %r15d
testb $0x8, %r13b
jne 0x14ae0ca
testq %r15, %r15
je 0x14ae0d4
andq $-0x10, %r13
bsfq %r15, %rdx
leaq -0x1(%r15), %r8
xorl %eax, %eax
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r15, %r8
jne 0x14ae0de
movq %rcx, %r13
testl %eax, %eax
je 0x14adee9
jmp 0x14ae35e
testb $0x8, %r13b
jne 0x14adf8a
vmovaps 0xc0(%r13), %ymm0
vmovaps 0x40(%r13), %ymm1
vmovaps 0x60(%r13), %ymm2
vmovaps 0x80(%r13), %ymm3
vmovaps 0xa0(%r13), %ymm4
vmovaps 0xe0(%r13), %ymm5
vmovaps 0xa0(%rsp), %ymm7
vmaxps %ymm1, %ymm7, %ymm6
vminps %ymm2, %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmovaps 0x80(%rsp), %ymm8
vmaxps %ymm3, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmulps %ymm6, %ymm6, %ymm6
vmulps %ymm7, %ymm7, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovaps 0x60(%rsp), %ymm8
vmaxps %ymm0, %ymm8, %ymm7
vminps %ymm5, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmulps %ymm7, %ymm7, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovaps %ymm6, 0x40(%rsp)
vcmpleps 0x160(%rsp), %ymm1, %ymm6
vcmpleps 0x120(%rsp), %ymm3, %ymm3
vandps %ymm6, %ymm3, %ymm3
vcmpleps 0xe0(%rsp), %ymm0, %ymm0
vcmpnltps 0x180(%rsp), %ymm2, %ymm6
vandps %ymm6, %ymm0, %ymm0
vandps %ymm0, %ymm3, %ymm0
vcmpleps %ymm2, %ymm1, %ymm1
vcmpnltps 0x140(%rsp), %ymm4, %ymm2
vandps %ymm2, %ymm1, %ymm1
vcmpnltps 0x100(%rsp), %ymm5, %ymm2
vandps %ymm2, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm0
jmp 0x14adf86
movl $0x6, %eax
jmp 0x14adfd1
movl $0x4, %eax
jmp 0x14adfd1
movl 0x40(%rsp,%rdx,4), %r10d
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%r13,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x40(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14ae136
leaq 0x10(%r11), %rdx
cmpl %r9d, %r10d
jae 0x14ae124
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %rcx, %r13
jmp 0x14ae12e
movq %rcx, (%r11)
movl %r10d, 0x8(%r11)
movq %rdi, %r13
movq %rdx, %r11
jmp 0x14adfd1
vmovq %rcx, %xmm0
vmovd %r10d, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq %rdi, %xmm1
vmovd %r9d, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %r8
movq (%r13,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm2
vmovd 0x40(%rsp,%rcx,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
andq %rdx, %r8
jne 0x14ae1e0
vpcmpgtd %xmm0, %xmm1, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r11)
vmovaps %xmm4, 0x10(%r11)
vmovq %xmm3, %r13
addq $0x20, %r11
jmp 0x14adfd1
bsfq %r8, %rdx
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm3
leaq -0x1(%r8), %rcx
vmovd 0x40(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r8, %rcx
jne 0x14ae2a2
vpcmpgtd %xmm0, %xmm1, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm0, %xmm1, %xmm5
vblendvps %xmm4, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm3, %xmm4
vblendvps %xmm1, %xmm3, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm3
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm5, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm5, %xmm4, %xmm2
vblendvps %xmm1, %xmm4, %xmm5, %xmm1
vpcmpgtd %xmm1, %xmm3, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm3, %xmm5
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmovaps %xmm0, (%r11)
vmovaps %xmm1, 0x10(%r11)
vmovaps %xmm5, 0x20(%r11)
vmovq %xmm2, %r13
addq $0x30, %r11
jmp 0x14adfd1
vmovdqa %xmm0, (%r11)
vmovdqa %xmm1, 0x10(%r11)
vmovdqa %xmm2, 0x20(%r11)
vmovdqa %xmm3, 0x30(%r11)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r13,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm0
vmovd 0x40(%rsp,%rsi,4), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x10(%r11,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14ae2be
leaq (%r11,%rdx), %rcx
testq %rdx, %rdx
je 0x14ae353
movl $0x10, %edx
movq %r11, %rsi
vmovdqa 0x10(%rsi), %xmm0
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r11,%rdi)
jae 0x14ae343
vmovdqa -0x10(%r11,%rdi), %xmm1
vmovdqa %xmm1, (%r11,%rdi)
addq $-0x10, %rdi
jne 0x14ae324
movq %r11, %rdi
jmp 0x14ae346
addq %r11, %rdi
vmovdqa %xmm0, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14ae314
movq (%rcx), %r13
movq %rcx, %r11
jmp 0x14adfd1
cmpl $0x6, %eax
jne 0x14adecc
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x28(%rsp)
je 0x14adecc
movq %r15, 0x18(%rsp)
movq %r11, 0x20(%rsp)
andq $-0x10, %r13
addq $0x40, %r13
xorl %eax, %eax
xorl %ecx, %ecx
movq %rcx, 0x30(%rsp)
movq %rax, 0x38(%rsp)
xorl %r14d, %r14d
xorl %r15d, %r15d
cmpl $-0x1, (%r13,%r15,4)
je 0x14ae3e2
movq (%rbx), %rax
movl -0x10(%r13,%r15,4), %ecx
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rdi
movl %ecx, 0x44(%rbx)
movl (%r13,%r15,4), %eax
movl %eax, 0x40(%rbx)
movq %r12, %rsi
movq %rbx, %rdx
vzeroupper
callq 0x91bd12
orb %al, %r14b
incq %r15
cmpq $0x4, %r15
jne 0x14ae3a2
movq 0x30(%rsp), %rcx
orb %r14b, %cl
movq 0x38(%rsp), %rax
incq %rax
addq $0x50, %r13
cmpq 0x28(%rsp), %rax
jne 0x14ae392
testb $0x1, %cl
vmovaps 0xd0(%rsp), %xmm9
movq 0x20(%rsp), %r11
leaq 0x1c0(%rsp), %r14
movq 0x18(%rsp), %r15
je 0x14adecc
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
cmpl $0x1, 0x18(%rbx)
jne 0x14ae446
vmovss 0x10(%r12), %xmm3
vmulss %xmm3, %xmm3, %xmm9
jmp 0x14ae451
vmovaps 0x50(%rbx), %xmm3
vdpps $0x7f, %xmm3, %xmm3, %xmm9
movb $0x1, %al
movl %eax, 0x14(%rsp)
jmp 0x14ade50
movl 0x14(%rsp), %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
andb $0x1, %al
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMvIntersector1Woop<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
|
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return false;
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
/* verify correct input */
assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f));
/* load the point query into SIMD registers */
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > cull_radius))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(point_query.trav_nodes,1,1,1);
bool nodeIntersected;
if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) {
nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
} else {
nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
}
if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(point_query.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node))
{
changed = true;
tquery.rad = context->query_radius;
cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
return changed;
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x14af287
xorl %eax, %eax
jmp 0x14af94d
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2520, %rsp # imm = 0x2520
movq %rdx, %rbx
movq %rsi, %r13
movq 0x70(%rax), %rax
movq %rax, 0x1c0(%rsp)
movl $0x0, 0x1c8(%rsp)
cmpl $0x1, 0x18(%rdx)
jne 0x14af2ce
vmovss 0x10(%r13), %xmm0
vmulss %xmm0, %xmm0, %xmm9
jmp 0x14af2d9
vmovaps 0x50(%rbx), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm9
vbroadcastss (%r13), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vbroadcastss 0x4(%r13), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vbroadcastss 0x8(%r13), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
leaq 0x1d0(%rsp), %r14
movl $0x0, 0x14(%rsp)
leaq 0x1c0(%rsp), %r11
vmovaps 0xa0(%rsp), %ymm3
vsubps %ymm0, %ymm3, %ymm4
vmovaps %ymm4, 0x180(%rsp)
vaddps %ymm0, %ymm3, %ymm3
vmovaps %ymm3, 0x160(%rsp)
vmovaps 0x80(%rsp), %ymm3
vsubps %ymm1, %ymm3, %ymm4
vmovaps %ymm4, 0x140(%rsp)
vaddps %ymm1, %ymm3, %ymm1
vmovaps %ymm1, 0x120(%rsp)
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm2, %ymm1, %ymm3
vmovaps %ymm3, 0x100(%rsp)
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmulps %ymm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps %xmm9, 0xd0(%rsp)
cmpq %r11, %r14
je 0x14af93b
vmovss -0x8(%r14), %xmm0
addq $-0x10, %r14
vucomiss %xmm9, %xmm0
ja 0x14af3a9
movq (%r14), %r15
cmpl $0x1, 0x18(%rbx)
jne 0x14af4bb
testb $0x8, %r15b
jne 0x14af467
vmovaps 0x40(%r15), %ymm0
vmovaps 0x60(%r15), %ymm1
vmovaps 0xa0(%rsp), %ymm3
vmaxps %ymm0, %ymm3, %ymm2
vminps %ymm1, %ymm2, %ymm2
vsubps %ymm3, %ymm2, %ymm2
vmovaps 0x80(%rsp), %ymm5
vmaxps 0x80(%r15), %ymm5, %ymm3
vminps 0xa0(%r15), %ymm3, %ymm3
vmovaps 0x60(%rsp), %ymm6
vmaxps 0xc0(%r15), %ymm6, %ymm4
vminps 0xe0(%r15), %ymm4, %ymm4
vsubps %ymm5, %ymm3, %ymm3
vsubps %ymm6, %ymm4, %ymm4
vmulps %ymm2, %ymm2, %ymm2
vmulps %ymm3, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm4, %ymm4, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps %ymm2, 0x40(%rsp)
vcmpleps 0x1a0(%rsp), %ymm2, %ymm2
vcmpleps %ymm1, %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovmskps %ymm0, %r12d
testb $0x8, %r15b
jne 0x14af5a7
testq %r12, %r12
je 0x14af5b1
andq $-0x10, %r15
bsfq %r12, %rdx
leaq -0x1(%r12), %r8
xorl %eax, %eax
movq (%r15,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r12, %r8
jne 0x14af5bb
movq %rcx, %r15
testl %eax, %eax
je 0x14af3c6
jmp 0x14af837
testb $0x8, %r15b
jne 0x14af467
vmovaps 0xc0(%r15), %ymm0
vmovaps 0x40(%r15), %ymm1
vmovaps 0x60(%r15), %ymm2
vmovaps 0x80(%r15), %ymm3
vmovaps 0xa0(%r15), %ymm4
vmovaps 0xe0(%r15), %ymm5
vmovaps 0xa0(%rsp), %ymm7
vmaxps %ymm1, %ymm7, %ymm6
vminps %ymm2, %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmovaps 0x80(%rsp), %ymm8
vmaxps %ymm3, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmulps %ymm6, %ymm6, %ymm6
vmulps %ymm7, %ymm7, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovaps 0x60(%rsp), %ymm8
vmaxps %ymm0, %ymm8, %ymm7
vminps %ymm5, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmulps %ymm7, %ymm7, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vmovaps %ymm6, 0x40(%rsp)
vcmpleps 0x160(%rsp), %ymm1, %ymm6
vcmpleps 0x120(%rsp), %ymm3, %ymm3
vandps %ymm6, %ymm3, %ymm3
vcmpleps 0xe0(%rsp), %ymm0, %ymm0
vcmpnltps 0x180(%rsp), %ymm2, %ymm6
vandps %ymm6, %ymm0, %ymm0
vandps %ymm0, %ymm3, %ymm0
vcmpleps %ymm2, %ymm1, %ymm1
vcmpnltps 0x140(%rsp), %ymm4, %ymm2
vandps %ymm2, %ymm1, %ymm1
vcmpnltps 0x100(%rsp), %ymm5, %ymm2
vandps %ymm2, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm0
jmp 0x14af463
movl $0x6, %eax
jmp 0x14af4ae
movl $0x4, %eax
jmp 0x14af4ae
movl 0x40(%rsp,%rdx,4), %r10d
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%r15,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x40(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14af612
leaq 0x10(%r14), %rdx
cmpl %r9d, %r10d
jae 0x14af600
movq %rdi, (%r14)
movl %r9d, 0x8(%r14)
movq %rcx, %r15
jmp 0x14af60a
movq %rcx, (%r14)
movl %r10d, 0x8(%r14)
movq %rdi, %r15
movq %rdx, %r14
jmp 0x14af4ae
vmovq %rcx, %xmm0
vmovd %r10d, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq %rdi, %xmm1
vmovd %r9d, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %r8
movq (%r15,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm2
vmovd 0x40(%rsp,%rcx,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
andq %rdx, %r8
jne 0x14af6bb
vpcmpgtd %xmm0, %xmm1, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r14)
vmovaps %xmm4, 0x10(%r14)
vmovq %xmm3, %r15
addq $0x20, %r14
jmp 0x14af4ae
bsfq %r8, %rdx
movq (%r15,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm3
leaq -0x1(%r8), %rcx
vmovd 0x40(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r8, %rcx
jne 0x14af77c
vpcmpgtd %xmm0, %xmm1, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm0, %xmm1, %xmm5
vblendvps %xmm4, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm3, %xmm4
vblendvps %xmm1, %xmm3, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm3
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm5, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm5, %xmm4, %xmm2
vblendvps %xmm1, %xmm4, %xmm5, %xmm1
vpcmpgtd %xmm1, %xmm3, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm3, %xmm5
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmovaps %xmm0, (%r14)
vmovaps %xmm1, 0x10(%r14)
vmovaps %xmm5, 0x20(%r14)
vmovq %xmm2, %r15
addq $0x30, %r14
jmp 0x14af4ae
vmovdqa %xmm0, (%r14)
vmovdqa %xmm1, 0x10(%r14)
vmovdqa %xmm2, 0x20(%r14)
vmovdqa %xmm3, 0x30(%r14)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r15,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm0
vmovd 0x40(%rsp,%rsi,4), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x10(%r14,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14af798
leaq (%r14,%rdx), %rcx
testq %rdx, %rdx
je 0x14af82c
movl $0x10, %edx
movq %r14, %rsi
vmovdqa 0x10(%rsi), %xmm0
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r14,%rdi)
jae 0x14af81c
vmovdqa -0x10(%r14,%rdi), %xmm1
vmovdqa %xmm1, (%r14,%rdi)
addq $-0x10, %rdi
jne 0x14af7fd
movq %r14, %rdi
jmp 0x14af81f
addq %r14, %rdi
vmovdqa %xmm0, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14af7ed
movq (%rcx), %r15
movq %rcx, %r14
jmp 0x14af4ae
cmpl $0x6, %eax
jne 0x14af3a9
movl %r15d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x28(%rsp)
je 0x14af3a9
movq %r12, 0x18(%rsp)
movq %r14, 0x20(%rsp)
andq $-0x10, %r15
addq $0xa0, %r15
xorl %eax, %eax
xorl %ecx, %ecx
movq %rcx, 0x30(%rsp)
movq %rax, 0x38(%rsp)
movq $-0x4, %r12
xorl %r14d, %r14d
movl (%r15,%r12,4), %eax
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
cmpq %rcx, %rax
je 0x14af8bf
movq (%rbx), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rdi
movl %eax, 0x44(%rbx)
movl 0x10(%r15,%r12,4), %eax
movl %eax, 0x40(%rbx)
movq %r13, %rsi
movq %rbx, %rdx
vzeroupper
callq 0x91bd12
orb %al, %r14b
incq %r12
jne 0x14af882
movq 0x30(%rsp), %rcx
orb %r14b, %cl
movq 0x38(%rsp), %rax
incq %rax
addq $0xb0, %r15
cmpq 0x28(%rsp), %rax
jne 0x14af86e
testb $0x1, %cl
vmovaps 0xd0(%rsp), %xmm9
leaq 0x1c0(%rsp), %r11
movq 0x20(%rsp), %r14
movq 0x18(%rsp), %r12
je 0x14af3a9
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
cmpl $0x1, 0x18(%rbx)
jne 0x14af925
vmovss 0x10(%r13), %xmm3
vmulss %xmm3, %xmm3, %xmm9
jmp 0x14af930
vmovaps 0x50(%rbx), %xmm3
vdpps $0x7f, %xmm3, %xmm3, %xmm9
movb $0x1, %al
movl %eax, 0x14(%rsp)
jmp 0x14af32d
movl 0x14(%rsp), %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
andb $0x1, %al
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Moeller<4, true>>>::intersect(embree::Accel::Intersectors const*, embree::RayHitK<1>&, embree::RayQueryContext*)
|
void BVHNIntersector1<N, types, robust, PrimitiveIntersector1>::intersect(const Accel::Intersectors* __restrict__ This,
RayHit& __restrict__ ray,
RayQueryContext* __restrict__ context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return;
/* perform per ray precalculations required by the primitive intersector */
Precalculations pre(ray, bvh);
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
if (bvh->root == BVH::emptyNode)
return;
/* filter out invalid rays */
#if defined(EMBREE_IGNORE_INVALID_RAYS)
if (!ray.valid()) return;
#endif
/* verify correct input */
assert(ray.valid());
assert(ray.tnear() >= 0.0f);
assert(!(types & BVH_MB) || (ray.time() >= 0.0f && ray.time() <= 1.0f));
/* load the ray into SIMD registers */
TravRay<N,robust> tray(ray.org, ray.dir, max(ray.tnear(), 0.0f), max(ray.tfar, 0.0f));
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N, types> nodeTraverser;
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > ray.tfar))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(normal.trav_nodes,1,1,1);
bool nodeIntersected = BVHNNodeIntersector1<N, types, robust>::intersect(cur, tray, ray.time(), tNear, mask);
if (unlikely(!nodeIntersected)) { STAT3(normal.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(normal.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
PrimitiveIntersector1::intersect(This, pre, ray, context, prim, num, tray, lazy_node);
tray.tfar = ray.tfar;
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x26c0, %rsp # imm = 0x26C0
movq %rdx, 0x18(%rsp)
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
je 0x14b0fa4
movq 0x70(%rax), %rax
movq %rax, 0x360(%rsp)
movl $0x0, 0x368(%rsp)
cmpq $0x8, %rax
jne 0x14b0fb6
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movq %rsi, %r15
leaq 0x370(%rsp), %r10
vxorps %xmm5, %xmm5, %xmm5
vmaxss 0xc(%rsi), %xmm5, %xmm1
vmaxss 0x20(%rsi), %xmm5, %xmm0
vmovaps 0x10(%rsi), %xmm2
vbroadcastss 0xa6fee7(%rip), %xmm3 # 0x1f20ec4
vandps %xmm3, %xmm2, %xmm3
vbroadcastss 0xa3fffe(%rip), %xmm4 # 0x1ef0fe8
vcmpltps %xmm4, %xmm3, %xmm3
vblendvps %xmm3, %xmm4, %xmm2, %xmm2
vrcpps %xmm2, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa3b70e(%rip), %xmm4 # 0x1eec714
vsubps %xmm2, %xmm4, %xmm2
vmulps %xmm2, %xmm3, %xmm2
vbroadcastss (%rsi), %ymm6
vbroadcastss 0x4(%rsi), %ymm7
vaddps %xmm2, %xmm3, %xmm2
vbroadcastss 0x8(%rsi), %ymm8
xorl %r11d, %r11d
vucomiss %xmm5, %xmm2
setb %r11b
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm9
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vshufps $0x55, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1,1,1]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm10
vshufpd $0x1, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,0]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
shll $0x5, %r11d
xorl %r14d, %r14d
vucomiss %xmm5, %xmm3
setb %r14b
shll $0x5, %r14d
orq $0x40, %r14
xorl %r12d, %r12d
vucomiss %xmm5, %xmm4
vinsertf128 $0x1, %xmm2, %ymm2, %ymm11
setb %r12b
shll $0x5, %r12d
orq $0x80, %r12
movq %r11, %r13
xorq $0x20, %r13
movq %r14, %rsi
xorq $0x20, %rsi
movq %r12, %rdi
xorq $0x20, %rdi
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm12
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
leaq 0xc9eec9(%rip), %rax # 0x214ff80
vmovaps 0xf0(%rax), %xmm1
vmovaps %xmm1, 0x140(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
movq %r15, 0x40(%rsp)
vmovaps %ymm6, 0x340(%rsp)
vmovaps %ymm7, 0x320(%rsp)
vmovaps %ymm8, 0x300(%rsp)
movq %r11, 0xf0(%rsp)
vmovaps %ymm9, 0x2e0(%rsp)
vmovaps %ymm10, 0x2c0(%rsp)
movq %r14, 0xe8(%rsp)
movq %r12, 0xe0(%rsp)
vmovaps %ymm11, 0x2a0(%rsp)
movq %r13, 0xd8(%rsp)
vmovaps %ymm12, 0x280(%rsp)
vmovss 0x20(%r15), %xmm1
leaq 0x360(%rsp), %rax
cmpq %rax, %r10
je 0x14b0fa4
vmovss -0x8(%r10), %xmm2
addq $-0x10, %r10
vucomiss %xmm1, %xmm2
ja 0x14b113c
movq (%r10), %rbx
testb $0x8, %bl
jne 0x14b1255
movq %rbx, %rax
andq $-0x10, %rax
vbroadcastss 0x1c(%r15), %ymm2
vmulps 0x100(%rax,%r11), %ymm2, %ymm1
vaddps 0x40(%rax,%r11), %ymm1, %ymm1
vsubps %ymm6, %ymm1, %ymm1
vmulps %ymm1, %ymm9, %ymm1
vmulps 0x100(%rax,%r14), %ymm2, %ymm3
vaddps 0x40(%rax,%r14), %ymm3, %ymm3
vmaxps %ymm1, %ymm12, %ymm1
vsubps %ymm7, %ymm3, %ymm3
vmulps 0x100(%rax,%r12), %ymm2, %ymm4
vmulps %ymm3, %ymm10, %ymm3
vaddps 0x40(%rax,%r12), %ymm4, %ymm4
vsubps %ymm8, %ymm4, %ymm4
vmulps %ymm4, %ymm11, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmulps 0x100(%rax,%r13), %ymm2, %ymm4
vmaxps %ymm3, %ymm1, %ymm1
vaddps 0x40(%rax,%r13), %ymm4, %ymm3
vsubps %ymm6, %ymm3, %ymm3
vmulps 0x100(%rax,%rsi), %ymm2, %ymm4
vaddps 0x40(%rax,%rsi), %ymm4, %ymm4
vmulps %ymm3, %ymm9, %ymm3
vsubps %ymm7, %ymm4, %ymm4
vmulps 0x100(%rax,%rdi), %ymm2, %ymm5
vmulps %ymm4, %ymm10, %ymm4
vaddps 0x40(%rax,%rdi), %ymm5, %ymm5
vsubps %ymm8, %ymm5, %ymm5
vmulps %ymm5, %ymm11, %ymm5
vminps %ymm5, %ymm4, %ymm4
vminps %ymm3, %ymm0, %ymm3
vminps %ymm4, %ymm3, %ymm3
vcmpleps %ymm3, %ymm1, %ymm3
movl %ebx, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x14b12a2
vextractf128 $0x1, %ymm3, %xmm2
vpackssdw %xmm2, %xmm3, %xmm2
vpsllw $0xf, %xmm2, %xmm2
vpacksswb %xmm2, %xmm2, %xmm2
vpmovmskb %xmm2, %eax
movzbl %al, %r9d
vmovaps %ymm1, 0x160(%rsp)
testb $0x8, %bl
jne 0x14b129b
testq %r9, %r9
je 0x14b12cf
andq $-0x10, %rbx
bsfq %r9, %rdx
leaq -0x1(%r9), %r8
xorl %eax, %eax
movq (%rbx,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r9, %r8
jne 0x14b12d6
movq %rcx, %rbx
testl %eax, %eax
je 0x14b1160
jmp 0x14b15c0
movl $0x6, %eax
jmp 0x14b128e
vmovaps 0x1c0(%rax), %ymm4
vcmpleps %ymm2, %ymm4, %ymm4
vcmpltps 0x1e0(%rax), %ymm2, %ymm2
vandps %ymm2, %ymm4, %ymm2
vandps %ymm3, %ymm2, %ymm2
vextractf128 $0x1, %ymm2, %xmm3
vpackssdw %xmm3, %xmm2, %xmm2
jmp 0x14b123b
movl $0x4, %eax
jmp 0x14b128e
movq %r9, (%rsp)
movl 0x160(%rsp,%rdx,4), %esi
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%rbx,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x160(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14b1347
leaq 0x10(%r10), %rdx
cmpl %r9d, %esi
jae 0x14b1327
movq %rdi, (%r10)
movl %r9d, 0x8(%r10)
movq %rdx, %r10
movq %rcx, %rbx
jmp 0x14b1334
movq %rcx, (%r10)
movl %esi, 0x8(%r10)
movq %rdx, %r10
movq %rdi, %rbx
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
movq (%rsp), %r9
jmp 0x14b128e
vmovq %rcx, %xmm1
vmovd %esi, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovq %rdi, %xmm2
vmovd %r9d, %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %rsi
movq (%rbx,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm3
vmovd 0x160(%rsp,%rcx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %rdx, %rsi
jne 0x14b13f2
vpcmpgtd %xmm1, %xmm2, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm2, %xmm5
vblendvps %xmm4, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm5, %xmm3, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm5, %xmm3, %xmm4
vblendvps %xmm2, %xmm3, %xmm5, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm5
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vmovaps %xmm1, (%r10)
vmovaps %xmm5, 0x10(%r10)
vmovq %xmm4, %rbx
addq $0x20, %r10
jmp 0x14b1334
vmovaps %ymm12, %ymm13
vmovaps %ymm11, %ymm12
vmovaps %ymm10, %ymm11
vmovaps %ymm9, %ymm10
vmovaps %ymm8, %ymm9
vmovaps %ymm7, %ymm8
vmovaps %ymm6, %ymm7
bsfq %rsi, %rdx
movq (%rbx,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm4
leaq -0x1(%rsi), %rcx
vmovd 0x160(%rsp,%rdx,4), %xmm5
vpunpcklqdq %xmm5, %xmm4, %xmm4 # xmm4 = xmm4[0],xmm5[0]
andq %rsi, %rcx
jne 0x14b1502
vpcmpgtd %xmm1, %xmm2, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm1, %xmm2, %xmm6
vblendvps %xmm5, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm3, %xmm4, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm3, %xmm4, %xmm5
vblendvps %xmm2, %xmm4, %xmm3, %xmm2
vpcmpgtd %xmm1, %xmm2, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm1, %xmm2, %xmm4
vblendvps %xmm3, %xmm2, %xmm1, %xmm1
vpcmpgtd %xmm6, %xmm5, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm6, %xmm5, %xmm3
vblendvps %xmm2, %xmm5, %xmm6, %xmm2
vpcmpgtd %xmm2, %xmm4, %xmm5
vpshufd $0xaa, %xmm5, %xmm5 # xmm5 = xmm5[2,2,2,2]
vblendvps %xmm5, %xmm2, %xmm4, %xmm6
vblendvps %xmm5, %xmm4, %xmm2, %xmm2
vmovaps %xmm1, (%r10)
vmovaps %xmm2, 0x10(%r10)
vmovaps %xmm6, 0x20(%r10)
vmovq %xmm3, %rbx
addq $0x30, %r10
vmovaps %ymm7, %ymm6
vmovaps %ymm8, %ymm7
vmovaps %ymm9, %ymm8
vmovaps %ymm10, %ymm9
vmovaps %ymm11, %ymm10
vmovaps %ymm12, %ymm11
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
vmovaps %ymm13, %ymm12
jmp 0x14b133e
vmovdqa %xmm1, (%r10)
vmovdqa %xmm2, 0x10(%r10)
vmovdqa %xmm3, 0x20(%r10)
vmovdqa %xmm4, 0x30(%r10)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%rbx,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm1
vmovd 0x160(%rsp,%rsi,4), %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
vmovdqa %xmm1, 0x10(%r10,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14b151e
leaq (%r10,%rdx), %rcx
testq %rdx, %rdx
je 0x14b15b5
movl $0x10, %edx
movq %r10, %rsi
vmovdqa 0x10(%rsi), %xmm1
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r10,%rdi)
jae 0x14b15a5
vmovdqa -0x10(%r10,%rdi), %xmm2
vmovdqa %xmm2, (%r10,%rdi)
addq $-0x10, %rdi
jne 0x14b1586
movq %r10, %rdi
jmp 0x14b15a8
addq %r10, %rdi
vmovdqa %xmm1, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14b1576
movq (%rcx), %rbx
movq %rcx, %r10
jmp 0x14b14d2
cmpl $0x6, %eax
jne 0x14b1136
movq %r9, (%rsp)
movq %r10, 0xf8(%rsp)
movl %ebx, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x100(%rsp)
je 0x14b1f1e
andq $-0x10, %rbx
movq 0x18(%rsp), %rax
movq (%rax), %rax
movq %rax, 0x88(%rsp)
xorl %eax, %eax
movq %rax, 0x108(%rsp)
leaq (%rax,%rax,4), %rax
shlq $0x4, %rax
vmovss 0x1c(%r15), %xmm0
movl 0x30(%rbx,%rax), %ecx
movq 0x88(%rsp), %rdx
movq 0x1e8(%rdx), %rdx
movq (%rdx,%rcx,8), %rcx
vmovss 0x28(%rcx), %xmm1
vmovss 0x2c(%rcx), %xmm2
vmovss 0x30(%rcx), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm6
vroundss $0x9, %xmm6, %xmm6, %xmm0
vaddss 0xa3f370(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmaxss %xmm0, %xmm1, %xmm2
vcvttss2si %xmm2, %edx
movslq %edx, %rdx
movq 0xe0(%rcx), %rcx
imulq $0x38, %rdx, %rdx
movl (%rbx,%rax), %r14d
movl 0x4(%rbx,%rax), %esi
movq (%rcx,%rdx), %r13
movq 0x38(%rcx,%rdx), %rcx
vmovups (%r13,%r14,4), %xmm4
movl 0x10(%rbx,%rax), %edx
vmovups (%r13,%rdx,4), %xmm5
movl 0x20(%rbx,%rax), %edi
movq %rdi, 0x30(%rsp)
vmovups (%r13,%rdi,4), %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovups (%r13,%rsi,4), %xmm8
movl 0x14(%rbx,%rax), %edi
vmovups (%r13,%rdi,4), %xmm7
movl 0x24(%rbx,%rax), %r8d
movq %r8, 0x20(%rsp)
vmovups (%r13,%r8,4), %xmm0
vmovaps %xmm0, 0x50(%rsp)
movl 0x8(%rbx,%rax), %r15d
vmovups (%r13,%r15,4), %xmm10
movl 0x18(%rbx,%rax), %r10d
vmovups (%r13,%r10,4), %xmm9
movl 0x28(%rbx,%rax), %r8d
vmovups (%r13,%r8,4), %xmm11
movl 0xc(%rbx,%rax), %r12d
vmovups (%r13,%r12,4), %xmm1
movl 0x1c(%rbx,%rax), %r11d
vmovups (%r13,%r11,4), %xmm14
movl 0x2c(%rbx,%rax), %r9d
vmovups (%r13,%r9,4), %xmm12
vmovups (%rcx,%r14,4), %xmm15
vmovups (%rcx,%rdx,4), %xmm13
vsubss %xmm2, %xmm6, %xmm6
vunpcklps %xmm10, %xmm4, %xmm2 # xmm2 = xmm4[0],xmm10[0],xmm4[1],xmm10[1]
vunpckhps %xmm10, %xmm4, %xmm4 # xmm4 = xmm4[2],xmm10[2],xmm4[3],xmm10[3]
vunpcklps %xmm1, %xmm8, %xmm0 # xmm0 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
vunpckhps %xmm1, %xmm8, %xmm1 # xmm1 = xmm8[2],xmm1[2],xmm8[3],xmm1[3]
vmovups (%rcx,%rsi,4), %xmm3
vunpcklps %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
vmovaps %xmm1, 0x250(%rsp)
vunpcklps %xmm0, %xmm2, %xmm8 # xmm8 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
vunpckhps %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
vmovaps %xmm0, 0x230(%rsp)
vunpcklps %xmm9, %xmm5, %xmm0 # xmm0 = xmm5[0],xmm9[0],xmm5[1],xmm9[1]
vunpckhps %xmm9, %xmm5, %xmm1 # xmm1 = xmm5[2],xmm9[2],xmm5[3],xmm9[3]
vunpcklps %xmm14, %xmm7, %xmm2 # xmm2 = xmm7[0],xmm14[0],xmm7[1],xmm14[1]
vunpckhps %xmm14, %xmm7, %xmm5 # xmm5 = xmm7[2],xmm14[2],xmm7[3],xmm14[3]
vmovups (%rcx,%r15,4), %xmm14
movq 0x40(%rsp), %r15
vunpcklps %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
vmovaps %xmm1, 0xc0(%rsp)
vunpcklps %xmm2, %xmm0, %xmm9 # xmm9 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vunpckhps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm2[2],xmm0[3],xmm2[3]
vmovaps %xmm0, 0x240(%rsp)
vmovaps 0x60(%rsp), %xmm1
vunpcklps %xmm11, %xmm1, %xmm0 # xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1]
vunpckhps %xmm11, %xmm1, %xmm1 # xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
vmovaps 0x50(%rsp), %xmm2
vunpcklps %xmm12, %xmm2, %xmm5 # xmm5 = xmm2[0],xmm12[0],xmm2[1],xmm12[1]
vunpckhps %xmm12, %xmm2, %xmm2 # xmm2 = xmm2[2],xmm12[2],xmm2[3],xmm12[3]
vmovups (%rcx,%r12,4), %xmm11
vunpcklps %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vmovaps %xmm1, 0x60(%rsp)
vunpcklps %xmm5, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vmovaps %xmm1, 0x260(%rsp)
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vmovaps %xmm0, 0x50(%rsp)
vunpcklps %xmm14, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm14[0],xmm15[1],xmm14[1]
vunpckhps %xmm14, %xmm15, %xmm1 # xmm1 = xmm15[2],xmm14[2],xmm15[3],xmm14[3]
vunpcklps %xmm11, %xmm3, %xmm5 # xmm5 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
vunpckhps %xmm11, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
vmovups (%rcx,%r10,4), %xmm11
vunpcklps %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
vunpcklps %xmm5, %xmm0, %xmm3 # xmm3 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm2 # xmm2 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
vunpcklps %xmm11, %xmm13, %xmm0 # xmm0 = xmm13[0],xmm11[0],xmm13[1],xmm11[1]
vunpckhps %xmm11, %xmm13, %xmm5 # xmm5 = xmm13[2],xmm11[2],xmm13[3],xmm11[3]
vmovups (%rcx,%rdi,4), %xmm11
vmovups (%rcx,%r11,4), %xmm12
vunpcklps %xmm12, %xmm11, %xmm13 # xmm13 = xmm11[0],xmm12[0],xmm11[1],xmm12[1]
vunpckhps %xmm12, %xmm11, %xmm11 # xmm11 = xmm11[2],xmm12[2],xmm11[3],xmm12[3]
vunpcklps %xmm11, %xmm5, %xmm11 # xmm11 = xmm5[0],xmm11[0],xmm5[1],xmm11[1]
vunpcklps %xmm13, %xmm0, %xmm1 # xmm1 = xmm0[0],xmm13[0],xmm0[1],xmm13[1]
vunpckhps %xmm13, %xmm0, %xmm7 # xmm7 = xmm0[2],xmm13[2],xmm0[3],xmm13[3]
movq 0x30(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm0
vmovups (%rcx,%r8,4), %xmm5
vunpcklps %xmm5, %xmm0, %xmm13 # xmm13 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpckhps %xmm5, %xmm0, %xmm0 # xmm0 = xmm0[2],xmm5[2],xmm0[3],xmm5[3]
movq 0x20(%rsp), %rdx
vmovups (%rcx,%rdx,4), %xmm5
vmovups (%rcx,%r9,4), %xmm12
vunpcklps %xmm12, %xmm5, %xmm15 # xmm15 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
vunpckhps %xmm12, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm12[2],xmm5[3],xmm12[3]
vunpcklps %xmm5, %xmm0, %xmm10 # xmm10 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
vunpcklps %xmm15, %xmm13, %xmm14 # xmm14 = xmm13[0],xmm15[0],xmm13[1],xmm15[1]
vunpckhps %xmm15, %xmm13, %xmm15 # xmm15 = xmm13[2],xmm15[2],xmm13[3],xmm15[3]
vshufps $0x0, %xmm6, %xmm6, %xmm0 # xmm0 = xmm6[0,0,0,0]
vmovss 0xa3ae87(%rip), %xmm5 # 0x1eec714
vsubss %xmm6, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm13 # xmm13 = xmm5[0,0,0,0]
vmulps %xmm3, %xmm0, %xmm3
vmulps %xmm8, %xmm13, %xmm5
vaddps %xmm3, %xmm5, %xmm12
vmulps %xmm2, %xmm0, %xmm2
vmulps 0x230(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm5
vmulps %xmm4, %xmm0, %xmm2
vmulps 0x250(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm6
vmulps %xmm1, %xmm0, %xmm1
vmulps %xmm9, %xmm13, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmulps %xmm7, %xmm0, %xmm2
vmulps 0x240(%rsp), %xmm13, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmovaps 0x30(%rbx,%rax), %xmm3
vmovaps %xmm3, 0x270(%rsp)
vmovaps 0x40(%rbx,%rax), %xmm3
movq 0x108(%rsp), %rax
vmulps %xmm0, %xmm11, %xmm7
vmulps 0xc0(%rsp), %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm9
vmulps %xmm0, %xmm14, %xmm7
vmulps %xmm0, %xmm15, %xmm8
vmulps %xmm0, %xmm10, %xmm0
vmulps 0x260(%rsp), %xmm13, %xmm10
vaddps %xmm7, %xmm10, %xmm10
vmulps 0x50(%rsp), %xmm13, %xmm7
vaddps %xmm7, %xmm8, %xmm11
vmulps 0x60(%rsp), %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vmovaps %xmm3, 0x150(%rsp)
vsubps %xmm1, %xmm12, %xmm7
vmovaps %xmm5, %xmm1
vmovaps %xmm5, 0x20(%rsp)
vsubps %xmm2, %xmm5, %xmm8
vsubps %xmm9, %xmm6, %xmm9
vsubps %xmm12, %xmm10, %xmm10
vmovaps %xmm12, %xmm5
vsubps %xmm1, %xmm11, %xmm11
vsubps %xmm6, %xmm0, %xmm12
vmulps %xmm12, %xmm8, %xmm0
vmulps %xmm11, %xmm9, %xmm1
vsubps %xmm0, %xmm1, %xmm0
vmovaps %xmm0, 0x30(%rsp)
vmulps %xmm10, %xmm9, %xmm1
vmulps %xmm7, %xmm12, %xmm2
vsubps %xmm1, %xmm2, %xmm4
vmulps %xmm7, %xmm11, %xmm2
vmulps %xmm10, %xmm8, %xmm3
vsubps %xmm2, %xmm3, %xmm3
vbroadcastss (%r15), %xmm0
vbroadcastss 0x4(%r15), %xmm13
vbroadcastss 0x8(%r15), %xmm14
vbroadcastss 0x14(%r15), %xmm15
vsubps %xmm0, %xmm5, %xmm2
vbroadcastss 0x18(%r15), %xmm0
vmovaps 0x20(%rsp), %xmm1
vsubps %xmm13, %xmm1, %xmm5
vsubps %xmm14, %xmm6, %xmm1
vmulps %xmm1, %xmm15, %xmm6
vmulps %xmm0, %xmm5, %xmm13
vsubps %xmm6, %xmm13, %xmm6
vbroadcastss 0x10(%r15), %xmm13
vmulps %xmm0, %xmm2, %xmm14
vmovaps %xmm1, 0x50(%rsp)
vmulps %xmm1, %xmm13, %xmm1
vsubps %xmm14, %xmm1, %xmm1
vmovaps %xmm5, 0x60(%rsp)
vmulps %xmm5, %xmm13, %xmm14
vmovaps %xmm2, 0xc0(%rsp)
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm14, %xmm2, %xmm2
vmovaps %xmm3, 0x20(%rsp)
vmulps %xmm0, %xmm3, %xmm0
vmulps %xmm4, %xmm15, %xmm14
vaddps %xmm0, %xmm14, %xmm0
vmovaps 0x30(%rsp), %xmm14
vmulps %xmm13, %xmm14, %xmm13
vaddps %xmm0, %xmm13, %xmm0
vmulps %xmm2, %xmm12, %xmm12
vmulps %xmm1, %xmm11, %xmm11
vaddps %xmm12, %xmm11, %xmm11
vmulps %xmm6, %xmm10, %xmm10
vaddps %xmm11, %xmm10, %xmm10
vmulps %xmm2, %xmm9, %xmm2
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmovddup 0xa6f50e(%rip), %xmm2 # xmm2 = mem[0,0]
vandps %xmm2, %xmm0, %xmm9
vxorps %xmm10, %xmm9, %xmm8
vmulps %xmm6, %xmm7, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vxorps %xmm1, %xmm9, %xmm7
vxorps %xmm10, %xmm10, %xmm10
vcmpnltps %xmm10, %xmm8, %xmm1
vcmpnltps %xmm10, %xmm7, %xmm2
vandps %xmm2, %xmm1, %xmm1
vbroadcastss 0xa6f437(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm6
vcmpneqps %xmm0, %xmm10, %xmm0
vandps %xmm0, %xmm1, %xmm0
vaddps %xmm7, %xmm8, %xmm1
vcmpleps %xmm6, %xmm1, %xmm1
vandps %xmm1, %xmm0, %xmm10
vtestps 0x140(%rsp), %xmm10
jne 0x14b1ac9
incq %rax
cmpq 0x100(%rsp), %rax
jne 0x14b1602
jmp 0x14b1f1e
vmovaps %xmm4, %xmm15
vandps 0x140(%rsp), %xmm10, %xmm10
vmovaps 0x50(%rsp), %xmm0
vmulps 0x20(%rsp), %xmm0, %xmm0
vmulps 0x60(%rsp), %xmm4, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vmulps 0xc0(%rsp), %xmm14, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vxorps %xmm0, %xmm9, %xmm4
vbroadcastss 0xc(%r15), %xmm0
vmulps %xmm0, %xmm6, %xmm0
vcmpltps %xmm4, %xmm0, %xmm0
vbroadcastss 0x20(%r15), %xmm1
vmulps %xmm1, %xmm6, %xmm1
vcmpleps %xmm1, %xmm4, %xmm1
vandps %xmm0, %xmm1, %xmm3
vtestps %xmm10, %xmm3
je 0x14b1ab3
movq %rax, %r12
vandps %xmm3, %xmm10, %xmm3
vmovaps %xmm8, 0x160(%rsp)
vmovaps %xmm7, 0x170(%rsp)
vmovaps %xmm4, 0x180(%rsp)
vmovaps %xmm6, 0x190(%rsp)
vmovaps %xmm3, 0x1b0(%rsp)
vmovaps %xmm14, 0x1f0(%rsp)
vmovaps %xmm15, 0x200(%rsp)
vmovaps 0x20(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vrcpps %xmm6, %xmm0
vmulps %xmm0, %xmm6, %xmm1
vbroadcastss 0xa3ab88(%rip), %xmm2 # 0x1eec714
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm1, %xmm0, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmulps 0x180(%rsp), %xmm0, %xmm4
vmovaps %xmm3, 0x70(%rsp)
vmovaps %xmm4, 0x1e0(%rsp)
vmulps 0x160(%rsp), %xmm0, %xmm1
vmovaps %xmm1, 0x1c0(%rsp)
vmulps 0x170(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vbroadcastss 0xa39e43(%rip), %xmm0 # 0x1eeba20
vblendvps %xmm3, %xmm4, %xmm0, %xmm0
vshufps $0xb1, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,0,3,2]
vminps %xmm0, %xmm1, %xmm1
vshufpd $0x1, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,0]
vminps %xmm1, %xmm2, %xmm1
vcmpeqps %xmm1, %xmm0, %xmm0
vtestps %xmm3, %xmm0
je 0x14b1c05
vandps %xmm3, %xmm0, %xmm3
vmovmskps %xmm3, %eax
bsfq %rax, %r13
movl 0x270(%rsp,%r13,4), %eax
movq 0x88(%rsp), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rsi
movl 0x24(%r15), %ecx
testl %ecx, 0x34(%rsi)
je 0x14b1e08
movq 0x18(%rsp), %rcx
movq 0x10(%rcx), %r14
cmpq $0x0, 0x10(%r14)
jne 0x14b1c50
cmpq $0x0, 0x40(%rsi)
je 0x14b1e90
vmovaps %xmm4, 0x30(%rsp)
vmovss 0x1c0(%rsp,%r13,4), %xmm0
vmovss 0x1d0(%rsp,%r13,4), %xmm1
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rcx
movl 0x150(%rsp,%r13,4), %edx
vmovss 0x1f0(%rsp,%r13,4), %xmm2
vmovss 0x200(%rsp,%r13,4), %xmm3
vmovss 0x210(%rsp,%r13,4), %xmm4
vmovss %xmm2, 0x110(%rsp)
vmovss %xmm3, 0x114(%rsp)
vmovss %xmm4, 0x118(%rsp)
vmovss %xmm0, 0x11c(%rsp)
vmovss %xmm1, 0x120(%rsp)
movl %edx, 0x124(%rsp)
movl %eax, 0x128(%rsp)
movl (%rcx), %eax
movl %eax, 0x12c(%rsp)
movl 0x4(%rcx), %eax
movl %eax, 0x130(%rsp)
vmovss 0x20(%r15), %xmm0
vmovss %xmm0, 0x20(%rsp)
vmovss 0x1e0(%rsp,%r13,4), %xmm0
vmovss %xmm0, 0x20(%r15)
movl $0xffffffff, 0x4c(%rsp) # imm = 0xFFFFFFFF
leaq 0x4c(%rsp), %rax
movq %rax, 0x90(%rsp)
movq 0x18(%rsi), %rax
movq %rax, 0x98(%rsp)
movq %rcx, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x110(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, 0xb8(%rsp)
movq %rsi, %r15
movq 0x40(%rsi), %rax
testq %rax, %rax
je 0x14b1d79
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x14b1e53
movq 0x10(%r14), %rax
testq %rax, %rax
je 0x14b1dad
testb $0x2, (%r14)
jne 0x14b1d8f
testb $0x40, 0x3e(%r15)
je 0x14b1d9c
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x14b1e53
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x14b1e63
movl $0x0, 0x70(%rsp,%r13,4)
vmovaps 0x70(%rsp), %xmm1
vtestps %xmm1, %xmm1
je 0x14b1f16
vmovaps %xmm4, %xmm0
vmovaps %xmm1, %xmm2
leaq 0x90(%rsp), %rdi
vmovaps %xmm4, 0x30(%rsp)
vzeroupper
callq 0x21716d
vmovaps 0x30(%rsp), %xmm4
movq 0x90(%rsp), %r13
jmp 0x14b1c0d
movq 0x40(%rsp), %rax
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%rax)
movl $0x0, 0x70(%rsp,%r13,4)
movq 0x40(%rsp), %r15
vbroadcastss 0x20(%r15), %xmm0
vmovaps 0x30(%rsp), %xmm4
vcmpleps %xmm0, %xmm4, %xmm0
vandps 0x70(%rsp), %xmm0, %xmm0
vmovaps %xmm0, 0x70(%rsp)
jmp 0x14b1e11
vmovss 0x1c0(%rsp,%r13,4), %xmm0
vmovss 0x1d0(%rsp,%r13,4), %xmm1
vmovss 0x1e0(%rsp,%r13,4), %xmm2
vmovss %xmm2, 0x20(%r15)
vmovss 0x1f0(%rsp,%r13,4), %xmm2
vmovss %xmm2, 0x30(%r15)
vmovss 0x200(%rsp,%r13,4), %xmm2
vmovss %xmm2, 0x34(%r15)
vmovss 0x210(%rsp,%r13,4), %xmm2
vmovss %xmm2, 0x38(%r15)
vmovss %xmm0, 0x3c(%r15)
vmovss %xmm1, 0x40(%r15)
movl 0x150(%rsp,%r13,4), %ecx
movl %ecx, 0x44(%r15)
movl %eax, 0x48(%r15)
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rax
movl (%rax), %ecx
movl %ecx, 0x4c(%r15)
movl 0x4(%rax), %eax
movl %eax, 0x50(%r15)
movq %r12, %rax
jmp 0x14b1ab3
vbroadcastss 0x20(%r15), %ymm0
movq 0xf8(%rsp), %r10
vmovaps 0x340(%rsp), %ymm6
vmovaps 0x320(%rsp), %ymm7
vmovaps 0x300(%rsp), %ymm8
movq 0xf0(%rsp), %r11
vmovaps 0x2e0(%rsp), %ymm9
vmovaps 0x2c0(%rsp), %ymm10
movq 0xe8(%rsp), %r14
movq 0xe0(%rsp), %r12
vmovaps 0x2a0(%rsp), %ymm11
movq 0xd8(%rsp), %r13
movq 0x10(%rsp), %rsi
movq 0x8(%rsp), %rdi
vmovaps 0x280(%rsp), %ymm12
movq (%rsp), %r9
jmp 0x14b1136
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, true, embree::avx::ArrayIntersector1<embree::avx::TriangleMiMBIntersector1Pluecker<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
|
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return false;
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
/* verify correct input */
assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f));
/* load the point query into SIMD registers */
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > cull_radius))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(point_query.trav_nodes,1,1,1);
bool nodeIntersected;
if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) {
nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
} else {
nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
}
if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(point_query.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node))
{
changed = true;
tquery.rad = context->query_radius;
cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
return changed;
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x14b5243
xorl %eax, %eax
jmp 0x14b5a0a
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2520, %rsp # imm = 0x2520
movq %rdx, %rbx
movq %rsi, %r12
movq 0x70(%rax), %rax
movq %rax, 0x1c0(%rsp)
movl $0x0, 0x1c8(%rsp)
cmpl $0x1, 0x18(%rdx)
jne 0x14b528b
vmovss 0x10(%r12), %xmm0
vmulss %xmm0, %xmm0, %xmm10
jmp 0x14b5296
vmovaps 0x50(%rbx), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm10
leaq 0x1d0(%rsp), %r11
vbroadcastss (%r12), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vbroadcastss 0x4(%r12), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vbroadcastss 0x8(%r12), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
movl $0x0, 0x14(%rsp)
leaq 0x1c0(%rsp), %r14
vmovaps 0xa0(%rsp), %ymm3
vsubps %ymm0, %ymm3, %ymm4
vmovaps %ymm4, 0x180(%rsp)
vaddps %ymm0, %ymm3, %ymm3
vmovaps %ymm3, 0x160(%rsp)
vmovaps 0x80(%rsp), %ymm3
vsubps %ymm1, %ymm3, %ymm4
vmovaps %ymm4, 0x140(%rsp)
vaddps %ymm1, %ymm3, %ymm1
vmovaps %ymm1, 0x120(%rsp)
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm2, %ymm1, %ymm3
vmovaps %ymm3, 0x100(%rsp)
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmulps %ymm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps %xmm10, 0xd0(%rsp)
cmpq %r14, %r11
je 0x14b59f8
vmovss -0x8(%r11), %xmm0
addq $-0x10, %r11
vucomiss %xmm10, %xmm0
ja 0x14b5368
movq (%r11), %r13
cmpl $0x1, 0x18(%rbx)
jne 0x14b54d3
testb $0x8, %r13b
jne 0x14b547f
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0xc(%r12), %ymm0
vmulps 0x100(%rax), %ymm0, %ymm1
vaddps 0x40(%rax), %ymm1, %ymm1
vmulps 0x140(%rax), %ymm0, %ymm2
vaddps 0x80(%rax), %ymm2, %ymm2
vmulps 0x180(%rax), %ymm0, %ymm3
vaddps 0xc0(%rax), %ymm3, %ymm3
vmulps 0x120(%rax), %ymm0, %ymm4
vaddps 0x60(%rax), %ymm4, %ymm4
vmulps 0x160(%rax), %ymm0, %ymm5
vaddps 0xa0(%rax), %ymm5, %ymm5
vmulps 0x1a0(%rax), %ymm0, %ymm6
vaddps 0xe0(%rax), %ymm6, %ymm6
vmovaps 0xa0(%rsp), %ymm8
vmaxps %ymm1, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmovaps 0x80(%rsp), %ymm8
vmaxps %ymm2, %ymm8, %ymm2
vminps %ymm5, %ymm2, %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmovaps 0x60(%rsp), %ymm5
vmaxps %ymm3, %ymm5, %ymm3
vminps %ymm6, %ymm3, %ymm3
vsubps %ymm5, %ymm3, %ymm3
vmulps %ymm7, %ymm7, %ymm5
vmulps %ymm2, %ymm2, %ymm2
vaddps %ymm2, %ymm5, %ymm2
vmulps %ymm3, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps %ymm2, 0x40(%rsp)
vcmpleps 0x1a0(%rsp), %ymm2, %ymm2
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm2, %ymm1
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x14b5632
vmovmskps %ymm1, %r15d
testb $0x8, %r13b
jne 0x14b561e
testq %r15, %r15
je 0x14b5628
andq $-0x10, %r13
bsfq %r15, %rdx
leaq -0x1(%r15), %r8
xorl %eax, %eax
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r15, %r8
jne 0x14b567a
movq %rcx, %r13
testl %eax, %eax
je 0x14b5385
jmp 0x14b58fa
testb $0x8, %r13b
jne 0x14b547f
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0xc(%r12), %ymm0
vmulps 0x100(%rax), %ymm0, %ymm1
vaddps 0x40(%rax), %ymm1, %ymm1
vmulps 0x140(%rax), %ymm0, %ymm2
vaddps 0x80(%rax), %ymm2, %ymm2
vmulps 0x180(%rax), %ymm0, %ymm3
vaddps 0xc0(%rax), %ymm3, %ymm3
vmulps 0x120(%rax), %ymm0, %ymm4
vaddps 0x60(%rax), %ymm4, %ymm4
vmulps 0x160(%rax), %ymm0, %ymm5
vaddps 0xa0(%rax), %ymm5, %ymm5
vmulps 0x1a0(%rax), %ymm0, %ymm6
vaddps 0xe0(%rax), %ymm6, %ymm6
vmovaps 0xa0(%rsp), %ymm8
vmaxps %ymm1, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmovaps 0x80(%rsp), %ymm9
vmaxps %ymm2, %ymm9, %ymm8
vminps %ymm5, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vmulps %ymm7, %ymm7, %ymm7
vmulps %ymm8, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmovaps 0x60(%rsp), %ymm9
vmaxps %ymm3, %ymm9, %ymm8
vminps %ymm6, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vmulps %ymm8, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmovaps %ymm7, 0x40(%rsp)
vcmpleps 0x160(%rsp), %ymm1, %ymm7
vcmpleps 0x120(%rsp), %ymm2, %ymm2
vandps %ymm7, %ymm2, %ymm2
vcmpleps %ymm4, %ymm1, %ymm1
vcmpleps 0xe0(%rsp), %ymm3, %ymm3
vcmpnltps 0x180(%rsp), %ymm4, %ymm4
vandps %ymm4, %ymm3, %ymm3
vandps %ymm3, %ymm2, %ymm2
vcmpnltps 0x140(%rsp), %ymm5, %ymm3
vandps %ymm3, %ymm1, %ymm1
vcmpnltps 0x100(%rsp), %ymm6, %ymm3
vandps %ymm3, %ymm1, %ymm1
vandps %ymm1, %ymm2, %ymm1
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
jne 0x14b547b
vbroadcastss 0xa6b957(%rip), %ymm2 # 0x1f20f64
vandps %ymm2, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vpackusdw %xmm2, %xmm1, %xmm1
jmp 0x14b563c
movl $0x6, %eax
jmp 0x14b54c6
movl $0x4, %eax
jmp 0x14b54c6
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vextractf128 $0x1, %ymm0, %xmm2
vpackssdw %xmm2, %xmm0, %xmm0
vpand %xmm1, %xmm0, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
jmp 0x14b547f
movl 0x40(%rsp,%rdx,4), %r10d
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%r13,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x40(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14b56d2
leaq 0x10(%r11), %rdx
cmpl %r9d, %r10d
jae 0x14b56c0
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %rcx, %r13
jmp 0x14b56ca
movq %rcx, (%r11)
movl %r10d, 0x8(%r11)
movq %rdi, %r13
movq %rdx, %r11
jmp 0x14b54c6
vmovq %rcx, %xmm0
vmovd %r10d, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq %rdi, %xmm1
vmovd %r9d, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %r8
movq (%r13,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm2
vmovd 0x40(%rsp,%rcx,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
andq %rdx, %r8
jne 0x14b577c
vpcmpgtd %xmm0, %xmm1, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r11)
vmovaps %xmm4, 0x10(%r11)
vmovq %xmm3, %r13
addq $0x20, %r11
jmp 0x14b54c6
bsfq %r8, %rdx
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm3
leaq -0x1(%r8), %rcx
vmovd 0x40(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r8, %rcx
jne 0x14b583e
vpcmpgtd %xmm0, %xmm1, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm0, %xmm1, %xmm5
vblendvps %xmm4, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm3, %xmm4
vblendvps %xmm1, %xmm3, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm3
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm5, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm5, %xmm4, %xmm2
vblendvps %xmm1, %xmm4, %xmm5, %xmm1
vpcmpgtd %xmm1, %xmm3, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm3, %xmm5
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmovaps %xmm0, (%r11)
vmovaps %xmm1, 0x10(%r11)
vmovaps %xmm5, 0x20(%r11)
vmovq %xmm2, %r13
addq $0x30, %r11
jmp 0x14b54c6
vmovdqa %xmm0, (%r11)
vmovdqa %xmm1, 0x10(%r11)
vmovdqa %xmm2, 0x20(%r11)
vmovdqa %xmm3, 0x30(%r11)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r13,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm0
vmovd 0x40(%rsp,%rsi,4), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x10(%r11,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14b585a
leaq (%r11,%rdx), %rcx
testq %rdx, %rdx
je 0x14b58ef
movl $0x10, %edx
movq %r11, %rsi
vmovdqa 0x10(%rsi), %xmm0
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r11,%rdi)
jae 0x14b58df
vmovdqa -0x10(%r11,%rdi), %xmm1
vmovdqa %xmm1, (%r11,%rdi)
addq $-0x10, %rdi
jne 0x14b58c0
movq %r11, %rdi
jmp 0x14b58e2
addq %r11, %rdi
vmovdqa %xmm0, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14b58b0
movq (%rcx), %r13
movq %rcx, %r11
jmp 0x14b54c6
cmpl $0x6, %eax
jne 0x14b5368
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x28(%rsp)
je 0x14b5368
movq %r15, 0x18(%rsp)
movq %r11, 0x20(%rsp)
andq $-0x10, %r13
addq $0x40, %r13
xorl %eax, %eax
xorl %ecx, %ecx
movq %rcx, 0x30(%rsp)
movq %rax, 0x38(%rsp)
xorl %r14d, %r14d
xorl %r15d, %r15d
cmpl $-0x1, (%r13,%r15,4)
je 0x14b597e
movq (%rbx), %rax
movl -0x10(%r13,%r15,4), %ecx
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rdi
movl %ecx, 0x44(%rbx)
movl (%r13,%r15,4), %eax
movl %eax, 0x40(%rbx)
movq %r12, %rsi
movq %rbx, %rdx
vzeroupper
callq 0x91bd12
orb %al, %r14b
incq %r15
cmpq $0x4, %r15
jne 0x14b593e
movq 0x30(%rsp), %rcx
orb %r14b, %cl
movq 0x38(%rsp), %rax
incq %rax
addq $0x50, %r13
cmpq 0x28(%rsp), %rax
jne 0x14b592e
testb $0x1, %cl
vmovaps 0xd0(%rsp), %xmm10
movq 0x20(%rsp), %r11
leaq 0x1c0(%rsp), %r14
movq 0x18(%rsp), %r15
je 0x14b5368
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
cmpl $0x1, 0x18(%rbx)
jne 0x14b59e2
vmovss 0x10(%r12), %xmm3
vmulss %xmm3, %xmm3, %xmm10
jmp 0x14b59ed
vmovaps 0x50(%rbx), %xmm3
vdpps $0x7f, %xmm3, %xmm3, %xmm10
movb $0x1, %al
movl %eax, 0x14(%rsp)
jmp 0x14b52ec
movl 0x14(%rsp), %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
andb $0x1, %al
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 16777232, false, embree::avx::ArrayIntersector1<embree::avx::QuadMiMBIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
|
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return false;
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
/* verify correct input */
assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f));
/* load the point query into SIMD registers */
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > cull_radius))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(point_query.trav_nodes,1,1,1);
bool nodeIntersected;
if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) {
nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
} else {
nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
}
if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(point_query.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node))
{
changed = true;
tquery.rad = context->query_radius;
cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
return changed;
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x14bc6e5
xorl %eax, %eax
jmp 0x14bceac
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2520, %rsp # imm = 0x2520
movq %rdx, %rbx
movq %rsi, %r12
movq 0x70(%rax), %rax
movq %rax, 0x1c0(%rsp)
movl $0x0, 0x1c8(%rsp)
cmpl $0x1, 0x18(%rdx)
jne 0x14bc72d
vmovss 0x10(%r12), %xmm0
vmulss %xmm0, %xmm0, %xmm10
jmp 0x14bc738
vmovaps 0x50(%rbx), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm10
leaq 0x1d0(%rsp), %r11
vbroadcastss (%r12), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vbroadcastss 0x4(%r12), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vbroadcastss 0x8(%r12), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
movl $0x0, 0x14(%rsp)
leaq 0x1c0(%rsp), %r14
vmovaps 0xa0(%rsp), %ymm3
vsubps %ymm0, %ymm3, %ymm4
vmovaps %ymm4, 0x180(%rsp)
vaddps %ymm0, %ymm3, %ymm3
vmovaps %ymm3, 0x160(%rsp)
vmovaps 0x80(%rsp), %ymm3
vsubps %ymm1, %ymm3, %ymm4
vmovaps %ymm4, 0x140(%rsp)
vaddps %ymm1, %ymm3, %ymm1
vmovaps %ymm1, 0x120(%rsp)
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm2, %ymm1, %ymm3
vmovaps %ymm3, 0x100(%rsp)
vaddps %ymm2, %ymm1, %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmulps %ymm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps %xmm10, 0xd0(%rsp)
cmpq %r14, %r11
je 0x14bce9a
vmovss -0x8(%r11), %xmm0
addq $-0x10, %r11
vucomiss %xmm10, %xmm0
ja 0x14bc80a
movq (%r11), %r13
cmpl $0x1, 0x18(%rbx)
jne 0x14bc975
testb $0x8, %r13b
jne 0x14bc921
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0xc(%r12), %ymm0
vmulps 0x100(%rax), %ymm0, %ymm1
vaddps 0x40(%rax), %ymm1, %ymm1
vmulps 0x140(%rax), %ymm0, %ymm2
vaddps 0x80(%rax), %ymm2, %ymm2
vmulps 0x180(%rax), %ymm0, %ymm3
vaddps 0xc0(%rax), %ymm3, %ymm3
vmulps 0x120(%rax), %ymm0, %ymm4
vaddps 0x60(%rax), %ymm4, %ymm4
vmulps 0x160(%rax), %ymm0, %ymm5
vaddps 0xa0(%rax), %ymm5, %ymm5
vmulps 0x1a0(%rax), %ymm0, %ymm6
vaddps 0xe0(%rax), %ymm6, %ymm6
vmovaps 0xa0(%rsp), %ymm8
vmaxps %ymm1, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmovaps 0x80(%rsp), %ymm8
vmaxps %ymm2, %ymm8, %ymm2
vminps %ymm5, %ymm2, %ymm2
vsubps %ymm8, %ymm2, %ymm2
vmovaps 0x60(%rsp), %ymm5
vmaxps %ymm3, %ymm5, %ymm3
vminps %ymm6, %ymm3, %ymm3
vsubps %ymm5, %ymm3, %ymm3
vmulps %ymm7, %ymm7, %ymm5
vmulps %ymm2, %ymm2, %ymm2
vaddps %ymm2, %ymm5, %ymm2
vmulps %ymm3, %ymm3, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vmovaps %ymm2, 0x40(%rsp)
vcmpleps 0x1a0(%rsp), %ymm2, %ymm2
vcmpleps %ymm4, %ymm1, %ymm1
vandps %ymm1, %ymm2, %ymm1
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
je 0x14bcad4
vmovmskps %ymm1, %r15d
testb $0x8, %r13b
jne 0x14bcac0
testq %r15, %r15
je 0x14bcaca
andq $-0x10, %r13
bsfq %r15, %rdx
leaq -0x1(%r15), %r8
xorl %eax, %eax
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
andq %r15, %r8
jne 0x14bcb1c
movq %rcx, %r13
testl %eax, %eax
je 0x14bc827
jmp 0x14bcd9c
testb $0x8, %r13b
jne 0x14bc921
movq %r13, %rax
andq $-0x10, %rax
vbroadcastss 0xc(%r12), %ymm0
vmulps 0x100(%rax), %ymm0, %ymm1
vaddps 0x40(%rax), %ymm1, %ymm1
vmulps 0x140(%rax), %ymm0, %ymm2
vaddps 0x80(%rax), %ymm2, %ymm2
vmulps 0x180(%rax), %ymm0, %ymm3
vaddps 0xc0(%rax), %ymm3, %ymm3
vmulps 0x120(%rax), %ymm0, %ymm4
vaddps 0x60(%rax), %ymm4, %ymm4
vmulps 0x160(%rax), %ymm0, %ymm5
vaddps 0xa0(%rax), %ymm5, %ymm5
vmulps 0x1a0(%rax), %ymm0, %ymm6
vaddps 0xe0(%rax), %ymm6, %ymm6
vmovaps 0xa0(%rsp), %ymm8
vmaxps %ymm1, %ymm8, %ymm7
vminps %ymm4, %ymm7, %ymm7
vsubps %ymm8, %ymm7, %ymm7
vmovaps 0x80(%rsp), %ymm9
vmaxps %ymm2, %ymm9, %ymm8
vminps %ymm5, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vmulps %ymm7, %ymm7, %ymm7
vmulps %ymm8, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmovaps 0x60(%rsp), %ymm9
vmaxps %ymm3, %ymm9, %ymm8
vminps %ymm6, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vmulps %ymm8, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmovaps %ymm7, 0x40(%rsp)
vcmpleps 0x160(%rsp), %ymm1, %ymm7
vcmpleps 0x120(%rsp), %ymm2, %ymm2
vandps %ymm7, %ymm2, %ymm2
vcmpleps %ymm4, %ymm1, %ymm1
vcmpleps 0xe0(%rsp), %ymm3, %ymm3
vcmpnltps 0x180(%rsp), %ymm4, %ymm4
vandps %ymm4, %ymm3, %ymm3
vandps %ymm3, %ymm2, %ymm2
vcmpnltps 0x140(%rsp), %ymm5, %ymm3
vandps %ymm3, %ymm1, %ymm1
vcmpnltps 0x100(%rsp), %ymm6, %ymm3
vandps %ymm3, %ymm1, %ymm1
vandps %ymm1, %ymm2, %ymm1
movl %r13d, %ecx
andl $0x7, %ecx
cmpl $0x6, %ecx
jne 0x14bc91d
vbroadcastss 0xa644b5(%rip), %ymm2 # 0x1f20f64
vandps %ymm2, %ymm1, %ymm1
vextractf128 $0x1, %ymm1, %xmm2
vpackusdw %xmm2, %xmm1, %xmm1
jmp 0x14bcade
movl $0x6, %eax
jmp 0x14bc968
movl $0x4, %eax
jmp 0x14bc968
vextractf128 $0x1, %ymm1, %xmm2
vpackssdw %xmm2, %xmm1, %xmm1
vmovaps 0x1c0(%rax), %ymm2
vcmpleps %ymm0, %ymm2, %ymm2
vcmpltps 0x1e0(%rax), %ymm0, %ymm0
vandps %ymm0, %ymm2, %ymm0
vextractf128 $0x1, %ymm0, %xmm2
vpackssdw %xmm2, %xmm0, %xmm0
vpand %xmm1, %xmm0, %xmm0
vpsllw $0xf, %xmm0, %xmm0
vpacksswb %xmm0, %xmm0, %xmm0
vpmovmskb %xmm0, %eax
movzbl %al, %r15d
jmp 0x14bc921
movl 0x40(%rsp,%rdx,4), %r10d
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%r13,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
movl 0x40(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14bcb74
leaq 0x10(%r11), %rdx
cmpl %r9d, %r10d
jae 0x14bcb62
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %rcx, %r13
jmp 0x14bcb6c
movq %rcx, (%r11)
movl %r10d, 0x8(%r11)
movq %rdi, %r13
movq %rdx, %r11
jmp 0x14bc968
vmovq %rcx, %xmm0
vmovd %r10d, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq %rdi, %xmm1
vmovd %r9d, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %r8
movq (%r13,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
prefetcht0 0xc0(%rdi)
vmovq %rdi, %xmm2
vmovd 0x40(%rsp,%rcx,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
andq %rdx, %r8
jne 0x14bcc1e
vpcmpgtd %xmm0, %xmm1, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r11)
vmovaps %xmm4, 0x10(%r11)
vmovq %xmm3, %r13
addq $0x20, %r11
jmp 0x14bc968
bsfq %r8, %rdx
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
prefetcht0 0xc0(%rcx)
vmovq %rcx, %xmm3
leaq -0x1(%r8), %rcx
vmovd 0x40(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r8, %rcx
jne 0x14bcce0
vpcmpgtd %xmm0, %xmm1, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm0, %xmm1, %xmm5
vblendvps %xmm4, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm3, %xmm4
vblendvps %xmm1, %xmm3, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm3
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm5, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm5, %xmm4, %xmm2
vblendvps %xmm1, %xmm4, %xmm5, %xmm1
vpcmpgtd %xmm1, %xmm3, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm3, %xmm5
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmovaps %xmm0, (%r11)
vmovaps %xmm1, 0x10(%r11)
vmovaps %xmm5, 0x20(%r11)
vmovq %xmm2, %r13
addq $0x30, %r11
jmp 0x14bc968
vmovdqa %xmm0, (%r11)
vmovdqa %xmm1, 0x10(%r11)
vmovdqa %xmm2, 0x20(%r11)
vmovdqa %xmm3, 0x30(%r11)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r13,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
prefetcht0 0xc0(%r8)
vmovq %r8, %xmm0
vmovd 0x40(%rsp,%rsi,4), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x10(%r11,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14bccfc
leaq (%r11,%rdx), %rcx
testq %rdx, %rdx
je 0x14bcd91
movl $0x10, %edx
movq %r11, %rsi
vmovdqa 0x10(%rsi), %xmm0
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r11,%rdi)
jae 0x14bcd81
vmovdqa -0x10(%r11,%rdi), %xmm1
vmovdqa %xmm1, (%r11,%rdi)
addq $-0x10, %rdi
jne 0x14bcd62
movq %r11, %rdi
jmp 0x14bcd84
addq %r11, %rdi
vmovdqa %xmm0, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14bcd52
movq (%rcx), %r13
movq %rcx, %r11
jmp 0x14bc968
cmpl $0x6, %eax
jne 0x14bc80a
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x28(%rsp)
je 0x14bc80a
movq %r15, 0x18(%rsp)
movq %r11, 0x20(%rsp)
andq $-0x10, %r13
addq $0x50, %r13
xorl %eax, %eax
xorl %ecx, %ecx
movq %rcx, 0x30(%rsp)
movq %rax, 0x38(%rsp)
xorl %r14d, %r14d
xorl %r15d, %r15d
cmpl $-0x1, (%r13,%r15,4)
je 0x14bce20
movq (%rbx), %rax
movl -0x10(%r13,%r15,4), %ecx
movq 0x1e8(%rax), %rax
movq (%rax,%rcx,8), %rdi
movl %ecx, 0x44(%rbx)
movl (%r13,%r15,4), %eax
movl %eax, 0x40(%rbx)
movq %r12, %rsi
movq %rbx, %rdx
vzeroupper
callq 0x91bd12
orb %al, %r14b
incq %r15
cmpq $0x4, %r15
jne 0x14bcde0
movq 0x30(%rsp), %rcx
orb %r14b, %cl
movq 0x38(%rsp), %rax
incq %rax
addq $0x60, %r13
cmpq 0x28(%rsp), %rax
jne 0x14bcdd0
testb $0x1, %cl
vmovaps 0xd0(%rsp), %xmm10
movq 0x20(%rsp), %r11
leaq 0x1c0(%rsp), %r14
movq 0x18(%rsp), %r15
je 0x14bc80a
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm1
vbroadcastss 0x58(%rbx), %ymm2
cmpl $0x1, 0x18(%rbx)
jne 0x14bce84
vmovss 0x10(%r12), %xmm3
vmulss %xmm3, %xmm3, %xmm10
jmp 0x14bce8f
vmovaps 0x50(%rbx), %xmm3
vdpps $0x7f, %xmm3, %xmm3, %xmm10
movb $0x1, %al
movl %eax, 0x14(%rsp)
jmp 0x14bc78e
movl 0x14(%rsp), %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
andb $0x1, %al
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::avx::BVHNIntersector1<8, 1048576, false, embree::avx::ArrayIntersector1<embree::avx::TriangleMIntersector1Moeller<4, true>>>::pointQuery(embree::Accel::Intersectors const*, embree::PointQueryK<1>*, embree::PointQueryContext*)
|
static __forceinline bool pointQuery(const Accel::Intersectors* This, PointQuery* query, PointQueryContext* context)
{
const BVH* __restrict__ bvh = (const BVH*)This->ptr;
/* we may traverse an empty BVH in case all geometry was invalid */
if (bvh->root == BVH::emptyNode)
return false;
/* stack state */
StackItemT<NodeRef> stack[stackSize]; // stack of nodes
StackItemT<NodeRef>* stackPtr = stack+1; // current stack pointer
StackItemT<NodeRef>* stackEnd = stack+stackSize;
stack[0].ptr = bvh->root;
stack[0].dist = neg_inf;
/* verify correct input */
assert(!(types & BVH_MB) || (query->time >= 0.0f && query->time <= 1.0f));
/* load the point query into SIMD registers */
TravPointQuery<N> tquery(query->p, context->query_radius);
/* initialize the node traverser */
BVHNNodeTraverser1Hit<N,types> nodeTraverser;
bool changed = false;
float cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
/* pop loop */
while (true) pop:
{
/* pop next node */
if (unlikely(stackPtr == stack)) break;
stackPtr--;
NodeRef cur = NodeRef(stackPtr->ptr);
/* if popped node is too far, pop next one */
if (unlikely(*(float*)&stackPtr->dist > cull_radius))
continue;
/* downtraversal loop */
while (true)
{
/* intersect node */
size_t mask; vfloat<N> tNear;
STAT3(point_query.trav_nodes,1,1,1);
bool nodeIntersected;
if (likely(context->query_type == POINT_QUERY_TYPE_SPHERE)) {
nodeIntersected = BVHNNodePointQuerySphere1<N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
} else {
nodeIntersected = BVHNNodePointQueryAABB1 <N, types>::pointQuery(cur, tquery, query->time, tNear, mask);
}
if (unlikely(!nodeIntersected)) { STAT3(point_query.trav_nodes,-1,-1,-1); break; }
/* if no child is hit, pop next node */
if (unlikely(mask == 0))
goto pop;
/* select next child and push other children */
nodeTraverser.traverseClosestHit(cur, mask, tNear, stackPtr, stackEnd);
}
/* this is a leaf node */
assert(cur != BVH::emptyNode);
STAT3(point_query.trav_leaves,1,1,1);
size_t num; Primitive* prim = (Primitive*)cur.leaf(num);
size_t lazy_node = 0;
if (PrimitiveIntersector1::pointQuery(This, query, context, prim, num, tquery, lazy_node))
{
changed = true;
tquery.rad = context->query_radius;
cull_radius = context->query_type == POINT_QUERY_TYPE_SPHERE
? query->radius * query->radius
: dot(context->query_radius, context->query_radius);
}
/* push lazy node onto stack */
if (unlikely(lazy_node)) {
stackPtr->ptr = lazy_node;
stackPtr->dist = neg_inf;
stackPtr++;
}
}
return changed;
}
|
movq (%rdi), %rax
cmpq $0x8, 0x70(%rax)
jne 0x14c13b7
xorl %eax, %eax
jmp 0x14c1ce3
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x2520, %rsp # imm = 0x2520
movq %rdx, %rbx
movq 0x70(%rax), %rax
movq %rax, 0x1c0(%rsp)
movl $0x0, 0x1c8(%rsp)
cmpl $0x1, 0x18(%rdx)
jne 0x14c13fa
vmovss 0x10(%rsi), %xmm0
vmulss %xmm0, %xmm0, %xmm12
jmp 0x14c1405
vmovaps 0x50(%rbx), %xmm0
vdpps $0x7f, %xmm0, %xmm0, %xmm12
leaq 0x1d0(%rsp), %r11
vbroadcastss (%rsi), %ymm0
vmovaps %ymm0, 0xa0(%rsp)
vbroadcastss 0x4(%rsi), %ymm0
vmovaps %ymm0, 0x80(%rsp)
vbroadcastss 0x8(%rsi), %ymm0
vmovaps %ymm0, 0x60(%rsp)
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm2
vbroadcastss 0x58(%rbx), %ymm1
movl $0x0, 0x14(%rsp)
leaq 0x1c0(%rsp), %r14
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0xa2f566(%rip), %ymm14 # 0x1ef09cc
vmovaps 0xa0(%rsp), %ymm3
vsubps %ymm0, %ymm3, %ymm4
vmovaps %ymm4, 0x180(%rsp)
vaddps %ymm0, %ymm3, %ymm3
vmovaps %ymm3, 0x160(%rsp)
vmovaps 0x80(%rsp), %ymm3
vsubps %ymm2, %ymm3, %ymm4
vmovaps %ymm4, 0x140(%rsp)
vaddps %ymm2, %ymm3, %ymm2
vmovaps %ymm2, 0x120(%rsp)
vmovaps 0x60(%rsp), %ymm2
vsubps %ymm1, %ymm2, %ymm3
vmovaps %ymm3, 0x100(%rsp)
vaddps %ymm1, %ymm2, %ymm1
vmovaps %ymm1, 0xe0(%rsp)
vmulps %ymm0, %ymm0, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vmovaps %xmm12, 0xd0(%rsp)
cmpq %r14, %r11
je 0x14c1cd1
vmovss -0x8(%r11), %xmm0
addq $-0x10, %r11
vucomiss %xmm12, %xmm0
ja 0x14c14e2
movq (%r11), %r13
cmpl $0x1, 0x18(%rbx)
jne 0x14c1729
testb $0x8, %r13b
jne 0x14c16dc
movq %r13, %rcx
andq $-0x10, %rcx
leaq 0x40(%rcx), %rax
testq %rcx, %rcx
cmoveq %rcx, %rax
vbroadcastss 0x30(%rax), %ymm5
vbroadcastss 0x3c(%rax), %ymm6
vmovq (%rax), %xmm1
vpmovzxbd %xmm1, %xmm2 # xmm2 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
vmovq 0x4(%rax), %xmm0
vpmovzxbd %xmm0, %xmm3 # xmm3 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vinsertf128 $0x1, %xmm3, %ymm2, %ymm2
vcvtdq2ps %ymm2, %ymm2
vmulps %ymm2, %ymm6, %ymm2
vaddps %ymm2, %ymm5, %ymm2
vmovq 0x8(%rax), %xmm4
vpmovzxbd %xmm4, %xmm7 # xmm7 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vmovq 0xc(%rax), %xmm3
vpmovzxbd %xmm3, %xmm8 # xmm8 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm6, %ymm6
vbroadcastss 0x34(%rax), %ymm7
vbroadcastss 0x40(%rax), %ymm8
vaddps %ymm6, %ymm5, %ymm5
vmovq 0x10(%rax), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vmovq 0x14(%rax), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vinsertf128 $0x1, %xmm9, %ymm6, %ymm6
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vmovq 0x18(%rax), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vmovq 0x1c(%rax), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm9, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vbroadcastss 0x44(%rax), %ymm8
vmovq 0x20(%rax), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vmovq 0x24(%rax), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vmovq 0x28(%rax), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vmovq 0x2c(%rax), %xmm11
vpmovzxbd %xmm11, %xmm11 # xmm11 = xmm11[0],zero,zero,zero,xmm11[1],zero,zero,zero,xmm11[2],zero,zero,zero,xmm11[3],zero,zero,zero
vinsertf128 $0x1, %xmm11, %ymm10, %ymm10
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm9, %ymm8, %ymm9
vcvtdq2ps %ymm10, %ymm10
vmulps %ymm10, %ymm8, %ymm8
vbroadcastss 0x38(%rax), %ymm10
vaddps %ymm9, %ymm10, %ymm9
vaddps %ymm8, %ymm10, %ymm8
vmovaps 0x80(%rsp), %ymm10
vmaxps %ymm6, %ymm10, %ymm6
vminps %ymm7, %ymm6, %ymm6
vmovaps 0x60(%rsp), %ymm11
vmaxps %ymm9, %ymm11, %ymm7
vminps %ymm8, %ymm7, %ymm7
vmovaps 0xa0(%rsp), %ymm9
vmaxps %ymm2, %ymm9, %ymm8
vminps %ymm5, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vsubps %ymm10, %ymm6, %ymm6
vmulps %ymm8, %ymm8, %ymm8
vmulps %ymm6, %ymm6, %ymm6
vaddps %ymm6, %ymm8, %ymm6
vsubps %ymm11, %ymm7, %ymm7
vmulps %ymm7, %ymm7, %ymm7
vaddps %ymm7, %ymm6, %ymm6
vpminub %xmm4, %xmm1, %xmm4
vpcmpeqb %xmm4, %xmm1, %xmm1
vpminub %xmm3, %xmm0, %xmm3
vpcmpeqb %xmm3, %xmm0, %xmm0
vcmpleps %ymm5, %ymm2, %ymm2
vpxor %xmm1, %xmm13, %xmm1
vpmovsxbd %xmm1, %xmm1
vpxor %xmm0, %xmm13, %xmm0
vpmovsxbd %xmm0, %xmm0
vinsertf128 $0x1, %xmm0, %ymm1, %ymm0
vcvtdq2ps %ymm0, %ymm0
vcmpltps %ymm0, %ymm14, %ymm0
vandps %ymm0, %ymm2, %ymm0
vmovaps %ymm6, 0x40(%rsp)
vcmpleps 0x1a0(%rsp), %ymm6, %ymm1
vandps %ymm1, %ymm0, %ymm0
vmovmskps %ymm0, %r15d
testb $0x8, %r13b
jne 0x14c1937
testq %r15, %r15
je 0x14c1941
andq $-0x10, %r13
bsfq %r15, %rdx
leaq -0x1(%r15), %r8
xorl %eax, %eax
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
andq %r15, %r8
jne 0x14c194b
movq %rcx, %r13
testl %eax, %eax
je 0x14c14ff
jmp 0x14c1bbc
testb $0x8, %r13b
jne 0x14c16dc
movq %r13, %rcx
andq $-0x10, %rcx
leaq 0x40(%rcx), %rax
testq %rcx, %rcx
cmoveq %rcx, %rax
vmovq (%rax), %xmm0
vpmovzxbd %xmm0, %xmm1 # xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
vmovq 0x4(%rax), %xmm2
vpmovzxbd %xmm2, %xmm3 # xmm3 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero
vinsertf128 $0x1, %xmm3, %ymm1, %ymm1
vmovq 0x8(%rax), %xmm3
vpmovzxbd %xmm3, %xmm4 # xmm4 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero
vmovq 0xc(%rax), %xmm5
vpmovzxbd %xmm5, %xmm6 # xmm6 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
vinsertf128 $0x1, %xmm6, %ymm4, %ymm4
vpminub %xmm3, %xmm0, %xmm3
vpcmpeqb %xmm3, %xmm0, %xmm0
vpminub %xmm5, %xmm2, %xmm3
vbroadcastss 0x30(%rax), %ymm5
vbroadcastss 0x3c(%rax), %ymm6
vpcmpeqb %xmm3, %xmm2, %xmm3
vcvtdq2ps %ymm1, %ymm1
vmulps %ymm1, %ymm6, %ymm1
vaddps %ymm1, %ymm5, %ymm1
vcvtdq2ps %ymm4, %ymm2
vmulps %ymm2, %ymm6, %ymm2
vbroadcastss 0x34(%rax), %ymm6
vaddps %ymm2, %ymm5, %ymm2
vbroadcastss 0x40(%rax), %ymm5
vmovq 0x10(%rax), %xmm4
vpmovzxbd %xmm4, %xmm4 # xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero
vmovq 0x14(%rax), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vinsertf128 $0x1, %xmm7, %ymm4, %ymm4
vcvtdq2ps %ymm4, %ymm4
vmulps %ymm4, %ymm5, %ymm4
vaddps %ymm4, %ymm6, %ymm4
vmovq 0x18(%rax), %xmm7
vpmovzxbd %xmm7, %xmm7 # xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero
vmovq 0x1c(%rax), %xmm8
vpmovzxbd %xmm8, %xmm8 # xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero
vinsertf128 $0x1, %xmm8, %ymm7, %ymm7
vcvtdq2ps %ymm7, %ymm7
vmulps %ymm7, %ymm5, %ymm5
vbroadcastss 0x38(%rax), %ymm7
vaddps %ymm5, %ymm6, %ymm5
vbroadcastss 0x44(%rax), %ymm8
vmovq 0x20(%rax), %xmm6
vpmovzxbd %xmm6, %xmm6 # xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero
vmovq 0x24(%rax), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vinsertf128 $0x1, %xmm9, %ymm6, %ymm6
vmovq 0x28(%rax), %xmm9
vpmovzxbd %xmm9, %xmm9 # xmm9 = xmm9[0],zero,zero,zero,xmm9[1],zero,zero,zero,xmm9[2],zero,zero,zero,xmm9[3],zero,zero,zero
vmovq 0x2c(%rax), %xmm10
vpmovzxbd %xmm10, %xmm10 # xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero
vinsertf128 $0x1, %xmm10, %ymm9, %ymm9
vcvtdq2ps %ymm6, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm7, %ymm6
vcvtdq2ps %ymm9, %ymm9
vmulps %ymm9, %ymm8, %ymm8
vaddps %ymm7, %ymm8, %ymm7
vmovaps 0xa0(%rsp), %ymm9
vmaxps %ymm1, %ymm9, %ymm8
vminps %ymm2, %ymm8, %ymm8
vsubps %ymm9, %ymm8, %ymm8
vmovaps 0x80(%rsp), %ymm10
vmaxps %ymm4, %ymm10, %ymm9
vminps %ymm5, %ymm9, %ymm9
vsubps %ymm10, %ymm9, %ymm9
vmulps %ymm8, %ymm8, %ymm8
vmulps %ymm9, %ymm9, %ymm9
vaddps %ymm9, %ymm8, %ymm8
vmovaps 0x60(%rsp), %ymm10
vmaxps %ymm6, %ymm10, %ymm9
vminps %ymm7, %ymm9, %ymm9
vsubps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm9, %ymm9
vaddps %ymm9, %ymm8, %ymm8
vmovaps %ymm8, 0x40(%rsp)
vpxor %xmm0, %xmm13, %xmm0
vpmovsxbd %xmm0, %xmm0
vpxor %xmm3, %xmm13, %xmm3
vpmovsxbd %xmm3, %xmm3
vinsertf128 $0x1, %xmm3, %ymm0, %ymm0
vcmpleps %ymm2, %ymm1, %ymm3
vcmpleps 0x160(%rsp), %ymm1, %ymm1
vandps %ymm3, %ymm1, %ymm1
vcvtdq2ps %ymm0, %ymm0
vcmpltps %ymm0, %ymm14, %ymm0
vcmpnltps 0x180(%rsp), %ymm2, %ymm2
vandps %ymm0, %ymm2, %ymm0
vandps %ymm0, %ymm1, %ymm0
vcmpleps 0x120(%rsp), %ymm4, %ymm1
vcmpnltps 0x140(%rsp), %ymm5, %ymm2
vandps %ymm2, %ymm1, %ymm1
vcmpleps 0xe0(%rsp), %ymm6, %ymm2
vandps %ymm2, %ymm1, %ymm1
vandps %ymm1, %ymm0, %ymm0
vcmpnltps 0x100(%rsp), %ymm7, %ymm1
jmp 0x14c16d4
movl $0x6, %eax
jmp 0x14c171c
movl $0x4, %eax
jmp 0x14c171c
movl 0x40(%rsp,%rdx,4), %r10d
bsfq %r8, %r9
leaq -0x1(%r8), %rdx
movq (%r13,%r9,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
movl 0x40(%rsp,%r9,4), %r9d
andq %r8, %rdx
jne 0x14c199f
leaq 0x10(%r11), %rdx
cmpl %r9d, %r10d
jae 0x14c198d
movq %rdi, (%r11)
movl %r9d, 0x8(%r11)
movq %rdx, %r11
jmp 0x14c1719
movq %rcx, (%r11)
movl %r10d, 0x8(%r11)
movq %rdx, %r11
movq %rdi, %r13
jmp 0x14c171c
vmovq %rcx, %xmm0
vmovd %r10d, %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovq %rdi, %xmm1
vmovd %r9d, %xmm2
vpunpcklqdq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0]
bsfq %rdx, %rcx
leaq -0x1(%rdx), %r8
movq (%r13,%rcx,8), %rdi
prefetcht0 (%rdi)
prefetcht0 0x40(%rdi)
prefetcht0 0x80(%rdi)
vmovq %rdi, %xmm2
vmovd 0x40(%rsp,%rcx,4), %xmm3
vpunpcklqdq %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm3[0]
andq %rdx, %r8
jne 0x14c1a42
vpcmpgtd %xmm0, %xmm1, %xmm3
vpshufd $0xaa, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vblendvps %xmm3, %xmm0, %xmm1, %xmm4
vblendvps %xmm3, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm4, %xmm2, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm4, %xmm2, %xmm3
vblendvps %xmm1, %xmm2, %xmm4, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm4
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vmovaps %xmm0, (%r11)
vmovaps %xmm4, 0x10(%r11)
vmovq %xmm3, %r13
addq $0x20, %r11
jmp 0x14c171c
movq %rsi, %r9
bsfq %r8, %rdx
movq (%r13,%rdx,8), %rcx
prefetcht0 (%rcx)
prefetcht0 0x40(%rcx)
prefetcht0 0x80(%rcx)
vmovq %rcx, %xmm3
leaq -0x1(%r8), %rcx
vmovd 0x40(%rsp,%rdx,4), %xmm4
vpunpcklqdq %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0],xmm4[0]
andq %r8, %rcx
jne 0x14c1b08
vpcmpgtd %xmm0, %xmm1, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm0, %xmm1, %xmm5
vblendvps %xmm4, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm2, %xmm3, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm2, %xmm3, %xmm4
vblendvps %xmm1, %xmm3, %xmm2, %xmm1
vpcmpgtd %xmm0, %xmm1, %xmm2
vpshufd $0xaa, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vblendvps %xmm2, %xmm0, %xmm1, %xmm3
vblendvps %xmm2, %xmm1, %xmm0, %xmm0
vpcmpgtd %xmm5, %xmm4, %xmm1
vpshufd $0xaa, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vblendvps %xmm1, %xmm5, %xmm4, %xmm2
vblendvps %xmm1, %xmm4, %xmm5, %xmm1
vpcmpgtd %xmm1, %xmm3, %xmm4
vpshufd $0xaa, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vblendvps %xmm4, %xmm1, %xmm3, %xmm5
vblendvps %xmm4, %xmm3, %xmm1, %xmm1
vmovaps %xmm0, (%r11)
vmovaps %xmm1, 0x10(%r11)
vmovaps %xmm5, 0x20(%r11)
vmovq %xmm2, %r13
addq $0x30, %r11
movq %r9, %rsi
vpcmpeqd %xmm13, %xmm13, %xmm13
jmp 0x14c171c
vmovdqa %xmm0, (%r11)
vmovdqa %xmm1, 0x10(%r11)
vmovdqa %xmm2, 0x20(%r11)
vmovdqa %xmm3, 0x30(%r11)
movl $0x30, %edx
bsfq %rcx, %rsi
leaq -0x1(%rcx), %rdi
movq (%r13,%rsi,8), %r8
prefetcht0 (%r8)
prefetcht0 0x40(%r8)
prefetcht0 0x80(%r8)
vmovq %r8, %xmm0
vmovd 0x40(%rsp,%rsi,4), %xmm1
vpunpcklqdq %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0]
vmovdqa %xmm0, 0x10(%r11,%rdx)
addq $0x10, %rdx
andq %rdi, %rcx
jne 0x14c1b24
leaq (%r11,%rdx), %rcx
testq %rdx, %rdx
je 0x14c1bb1
movl $0x10, %edx
movq %r11, %rsi
vmovdqa 0x10(%rsi), %xmm0
movl 0x18(%rsi), %r8d
addq $0x10, %rsi
movq %rdx, %rdi
cmpl %r8d, -0x8(%r11,%rdi)
jae 0x14c1ba1
vmovdqa -0x10(%r11,%rdi), %xmm1
vmovdqa %xmm1, (%r11,%rdi)
addq $-0x10, %rdi
jne 0x14c1b82
movq %r11, %rdi
jmp 0x14c1ba4
addq %r11, %rdi
vmovdqa %xmm0, (%rdi)
addq $0x10, %rdx
cmpq %rsi, %rcx
jne 0x14c1b72
movq (%rcx), %r13
movq %rcx, %r11
jmp 0x14c1afb
cmpl $0x6, %eax
jne 0x14c14e2
movl %r13d, %eax
andl $0xf, %eax
addq $-0x8, %rax
movq %rax, 0x28(%rsp)
je 0x14c14e2
movq %r15, 0x18(%rsp)
movq %r11, 0x20(%rsp)
andq $-0x10, %r13
addq $0xa0, %r13
xorl %eax, %eax
xorl %ecx, %ecx
movq %rcx, 0x30(%rsp)
movq %rax, 0x38(%rsp)
movq $-0x4, %r12
xorl %r14d, %r14d
movl (%r13,%r12,4), %eax
movl $0xffffffff, %ecx # imm = 0xFFFFFFFF
cmpq %rcx, %rax
je 0x14c1c48
movq (%rbx), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rax,8), %rdi
movl %eax, 0x44(%rbx)
movl 0x10(%r13,%r12,4), %eax
movl %eax, 0x40(%rbx)
movq %rbx, %rdx
movq %rsi, %r15
vzeroupper
callq 0x91bd12
movq %r15, %rsi
orb %al, %r14b
incq %r12
jne 0x14c1c07
movq 0x30(%rsp), %rcx
orb %r14b, %cl
movq 0x38(%rsp), %rax
incq %rax
addq $0xb0, %r13
cmpq 0x28(%rsp), %rax
jne 0x14c1bf3
testb $0x1, %cl
vmovaps 0xd0(%rsp), %xmm12
movq 0x20(%rsp), %r11
leaq 0x1c0(%rsp), %r14
vpcmpeqd %xmm13, %xmm13, %xmm13
vbroadcastss 0xa2ed3f(%rip), %ymm14 # 0x1ef09cc
movq 0x18(%rsp), %r15
je 0x14c14e2
vbroadcastss 0x50(%rbx), %ymm0
vbroadcastss 0x54(%rbx), %ymm2
vbroadcastss 0x58(%rbx), %ymm1
cmpl $0x1, 0x18(%rbx)
jne 0x14c1cbb
vmovss 0x10(%rsi), %xmm3
vmulss %xmm3, %xmm3, %xmm12
jmp 0x14c1cc6
vmovaps 0x50(%rbx), %xmm3
vdpps $0x7f, %xmm3, %xmm3, %xmm12
movb $0x1, %al
movl %eax, 0x14(%rsp)
jmp 0x14c1466
movl 0x14(%rsp), %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
andb $0x1, %al
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_intersector1.cpp
|
embree::BVHNStatistics<8>::BVHNStatistics(embree::BVHN<8>*)
|
BVHNStatistics<N>::BVHNStatistics (BVH* bvh) : bvh(bvh)
{
double A = max(0.0f,bvh->getLinearBounds().expectedHalfArea());
stat = statistics(bvh->root,A,BBox1f(0.0f,1.0f));
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x330, %rsp # imm = 0x330
movq %rsi, %r15
movq %rdi, %r14
movq %rsi, (%rdi)
leaq 0x8(%rdi), %rbx
xorl %r13d, %r13d
leaq 0x1b8(%rsp), %r12
vxorps %xmm0, %xmm0, %xmm0
movq %r12, %rdi
xorl %esi, %esi
xorl %edx, %edx
xorl %ecx, %ecx
xorl %r8d, %r8d
xorl %r9d, %r9d
callq 0x14d0248
movq %r13, 0x1b0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vmovaps %xmm0, 0x180(%rsp)
movq %r13, 0x190(%rsp)
vmovaps %xmm0, 0x160(%rsp)
movq %r13, 0x170(%rsp)
vmovaps %xmm0, 0x140(%rsp)
movq %r13, 0x150(%rsp)
vmovaps %xmm0, 0x120(%rsp)
movq %r13, 0x130(%rsp)
vmovaps %xmm0, 0x100(%rsp)
movq %r13, 0x110(%rsp)
movq 0x110(%rsp), %rax
movq %rax, 0xf8(%rsp)
vmovaps 0x100(%rsp), %xmm0
vmovups %xmm0, 0xe8(%rsp)
movq 0x130(%rsp), %rax
movq %rax, 0xe0(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovups %xmm0, 0xd0(%rsp)
movq 0x150(%rsp), %rax
movq %rax, 0xc8(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovups %xmm0, 0xb8(%rsp)
movq 0x170(%rsp), %rax
movq %rax, 0xb0(%rsp)
vmovaps 0x160(%rsp), %xmm0
vmovups %xmm0, 0xa0(%rsp)
movq 0x190(%rsp), %rax
movq %rax, 0x98(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovups %xmm0, 0x88(%rsp)
movq 0x1b0(%rsp), %rax
movq %rax, 0x80(%rsp)
vmovaps 0x1a0(%rsp), %xmm0
vmovups %xmm0, 0x70(%rsp)
vmovups (%r12), %ymm0
vmovups 0x20(%r12), %ymm1
vmovups 0x40(%r12), %ymm2
vmovups 0x50(%r12), %ymm3
vmovups %ymm3, 0x50(%rsp)
vmovups %ymm2, 0x40(%rsp)
vmovups %ymm1, 0x20(%rsp)
vmovups %ymm0, (%rsp)
movq %rbx, %rdi
xorl %esi, %esi
vzeroupper
callq 0x14d0972
vmovaps 0x20(%r15), %xmm0
vmovaps 0x40(%r15), %xmm1
vsubps 0x10(%r15), %xmm0, %xmm0
vsubps 0x30(%r15), %xmm1, %xmm1
vxorps %xmm4, %xmm4, %xmm4
vblendps $0x8, %xmm4, %xmm0, %xmm2 # xmm2 = xmm0[0,1,2],xmm4[3]
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vblendps $0x8, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[3]
vshufps $0xc9, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,2,0,3]
vsubps %xmm2, %xmm1, %xmm1
vsubps %xmm3, %xmm4, %xmm2
vmulps %xmm3, %xmm0, %xmm4
vmulps %xmm2, %xmm0, %xmm0
vmulps %xmm1, %xmm3, %xmm3
vaddps %xmm3, %xmm0, %xmm0
vbroadcastss 0xa1b9a7(%rip), %xmm3 # 0x1eecb80
vmulps %xmm3, %xmm0, %xmm0
vaddps %xmm0, %xmm4, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vbroadcastss 0xa20cce(%rip), %xmm2 # 0x1ef1ebc
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vhaddps %xmm0, %xmm0, %xmm1
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vaddss %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmaxss %xmm1, %xmm0, %xmm0
vcvtss2sd %xmm0, %xmm0, %xmm0
movq 0x70(%r15), %rdx
vmovsd 0xa1b4d5(%rip), %xmm1 # 0x1eec6f0
leaq 0x228(%rsp), %r15
movq %r15, %rdi
movq %r14, %rsi
callq 0x14d1250
movl $0x108, %edx # imm = 0x108
movq %rbx, %rdi
movq %r15, %rsi
callq 0x6a0f0
addq $0x330, %rsp # imm = 0x330
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_statistics.cpp
|
embree::BVHNStatistics<8>::Statistics::NodeStat<embree::AABBNodeMB_t<embree::NodeRefPtr<8>, 8>>::toString[abi:cxx11](embree::BVHN<8>*, double, unsigned long) const
|
std::string toString(BVH* bvh, double sahTotal, size_t bytesTotal) const
{
std::ostringstream stream;
stream.setf(std::ios::fixed, std::ios::floatfield);
stream << "sah = " << std::setw(7) << std::setprecision(3) << sah(bvh);
stream << " (" << std::setw(6) << std::setprecision(2) << 100.0*sah(bvh)/sahTotal << "%), ";
stream << "#bytes = " << std::setw(7) << std::setprecision(2) << bytes()/1E6 << " MB ";
stream << "(" << std::setw(6) << std::setprecision(2) << 100.0*double(bytes())/double(bytesTotal) << "%), ";
stream << "#nodes = " << std::setw(7) << numNodes << " (" << std::setw(6) << std::setprecision(2) << 100.0*fillRate() << "% filled), ";
stream << "#bytes/prim = " << std::setw(6) << std::setprecision(2) << double(bytes())/double(bvh->numPrimitives);
return stream.str();
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x180, %rsp # imm = 0x180
movq %rcx, %r12
vmovsd %xmm0, (%rsp)
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
leaq 0x8(%rsp), %r13
movq %r13, %rdi
callq 0x6a980
movq (%r13), %rax
movq -0x18(%rax), %rax
movl $0xfffffefb, %ecx # imm = 0xFFFFFEFB
andl 0x20(%rsp,%rax), %ecx
orl $0x4, %ecx
movl %ecx, 0x20(%rsp,%rax)
leaq 0xa236dd(%rip), %rsi # 0x1ef7312
movl $0x6, %edx
movq %r13, %rdi
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x3, 0x10(%rsp,%rax)
vmovsd (%r15), %xmm0
vmovaps 0x20(%r14), %xmm1
vsubps 0x10(%r14), %xmm1, %xmm1
vmovaps 0x40(%r14), %xmm2
vsubps 0x30(%r14), %xmm2, %xmm2
vxorps %xmm3, %xmm3, %xmm3
vblendps $0x8, %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0,1,2],xmm3[3]
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vblendps $0x8, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm3[3]
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vsubps %xmm4, %xmm2, %xmm2
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm5, %xmm1, %xmm4
vmulps %xmm3, %xmm1, %xmm1
vmulps %xmm2, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vbroadcastss 0xa18ec1(%rip), %xmm5 # 0x1eecb80
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa1e1e8(%rip), %xmm3 # 0x1ef1ebc
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vhaddps %xmm1, %xmm1, %xmm2
vshufpd $0x1, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,0]
vaddss %xmm2, %xmm1, %xmm1
vcvtss2sd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0xa177d1(%rip), %rsi # 0x1eeb4ce
leaq 0x8(%rsp), %rdi
movl $0x2, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
vmovaps 0x20(%r14), %xmm0
vmovaps 0x40(%r14), %xmm1
vsubps 0x10(%r14), %xmm0, %xmm0
vsubps 0x30(%r14), %xmm1, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vblendps $0x8, %xmm2, %xmm0, %xmm3 # xmm3 = xmm0[0,1,2],xmm2[3]
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vblendps $0x8, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[3]
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vsubps %xmm3, %xmm1, %xmm1
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm4, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm0, %xmm0
vbroadcastss 0xa18dfc(%rip), %xmm4 # 0x1eecb80
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vbroadcastss 0xa1e123(%rip), %xmm2 # 0x1ef1ebc
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vhaddps %xmm0, %xmm0, %xmm1
vmovsd (%r15), %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vaddss %xmm1, %xmm0, %xmm0
vcvtss2sd %xmm0, %xmm0, %xmm0
vdivsd %xmm0, %xmm2, %xmm0
vmulsd 0xa234bd(%rip), %xmm0, %xmm0 # 0x1ef7280
vdivsd (%rsp), %xmm0, %xmm0
callq 0x6a760
leaq 0xa23550(%rip), %rsi # 0x1ef7324
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa234c4(%rip), %rsi # 0x1ef72ac
leaq 0x8(%rsp), %rdi
movl $0x9, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
imulq $0x1c0, 0x8(%r15), %rax # imm = 0x1C0
vmovq %rax, %xmm0
vpunpckldq 0xa18c8f(%rip), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
vsubpd 0xa18c97(%rip), %xmm0, %xmm0 # 0x1eecad0
vhaddpd %xmm0, %xmm0, %xmm0
vdivsd 0xa23443(%rip), %xmm0, %xmm0 # 0x1ef7288
callq 0x6a760
leaq 0xa23465(%rip), %rsi # 0x1ef72b6
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa1766a(%rip), %rsi # 0x1eeb4cf
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
imulq $0x1c0, 0x8(%r15), %rax # imm = 0x1C0
vmovq %rax, %xmm0
vmovq 0xa18c12(%rip), %xmm2 # 0x1eecac0
vpunpckldq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa18c16(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
vmulsd 0xa233b6(%rip), %xmm0, %xmm0 # 0x1ef7280
vmovq %r12, %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0xa23439(%rip), %rsi # 0x1ef7324
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa233bc(%rip), %rsi # 0x1ef72bb
leaq 0x8(%rsp), %rdi
movl $0x9, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq 0x8(%r15), %rsi
callq 0x6a4c0
movq %rax, %r12
leaq 0xa17598(%rip), %rsi # 0x1eeb4ce
movl $0x2, %edx
movq %rax, %rdi
callq 0x6a9f0
movq (%r12), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x10(%r12,%rax)
movq (%r12), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x8(%r12,%rax)
vmovsd 0x10(%r15), %xmm0
vmovsd 0xa18b4d(%rip), %xmm2 # 0x1eecac0
vunpcklps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa18b51(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
movq 0x8(%r15), %rax
shlq $0x3, %rax
vmovq %rax, %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
vmulsd 0xa232d4(%rip), %xmm0, %xmm0 # 0x1ef7280
movq %r12, %rdi
callq 0x6a760
leaq 0xa2330a(%rip), %rsi # 0x1ef72c5
movl $0xb, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa2312f(%rip), %rsi # 0x1ef70fe
leaq 0x8(%rsp), %rdi
movl $0xe, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
imulq $0x1c0, 0x8(%r15), %rax # imm = 0x1C0
vmovq %rax, %xmm0
vmovq 0xa18aa8(%rip), %xmm2 # 0x1eecac0
vpunpckldq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa18aac(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
vmovq 0x1f0(%r14), %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0x10(%rsp), %rsi
movq %rbx, %rdi
callq 0x6a3c0
movq 0xc50682(%rip), %rsi # 0x21246e0
leaq 0x8(%rsp), %rdi
callq 0x6a700
leaq 0x78(%rsp), %rdi
callq 0x6a6f0
movq %rbx, %rax
addq $0x180, %rsp # imm = 0x180
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0xc50650(%rip), %rsi # 0x21246e0
leaq 0x8(%rsp), %rdi
callq 0x6a700
leaq 0x78(%rsp), %rdi
callq 0x6a6f0
movq %rbx, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/bvh_statistics.h
|
embree::BVHNStatistics<8>::Statistics::NodeStat<embree::OBBNodeMB_t<embree::NodeRefPtr<8>, 8>>::toString[abi:cxx11](embree::BVHN<8>*, double, unsigned long) const
|
std::string toString(BVH* bvh, double sahTotal, size_t bytesTotal) const
{
std::ostringstream stream;
stream.setf(std::ios::fixed, std::ios::floatfield);
stream << "sah = " << std::setw(7) << std::setprecision(3) << sah(bvh);
stream << " (" << std::setw(6) << std::setprecision(2) << 100.0*sah(bvh)/sahTotal << "%), ";
stream << "#bytes = " << std::setw(7) << std::setprecision(2) << bytes()/1E6 << " MB ";
stream << "(" << std::setw(6) << std::setprecision(2) << 100.0*double(bytes())/double(bytesTotal) << "%), ";
stream << "#nodes = " << std::setw(7) << numNodes << " (" << std::setw(6) << std::setprecision(2) << 100.0*fillRate() << "% filled), ";
stream << "#bytes/prim = " << std::setw(6) << std::setprecision(2) << double(bytes())/double(bvh->numPrimitives);
return stream.str();
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x180, %rsp # imm = 0x180
movq %rcx, %r12
vmovsd %xmm0, (%rsp)
movq %rdx, %r14
movq %rsi, %r15
movq %rdi, %rbx
leaq 0x8(%rsp), %r13
movq %r13, %rdi
callq 0x6a980
movq (%r13), %rax
movq -0x18(%rax), %rax
movl $0xfffffefb, %ecx # imm = 0xFFFFFEFB
andl 0x20(%rsp,%rax), %ecx
orl $0x4, %ecx
movl %ecx, 0x20(%rsp,%rax)
leaq 0xa22d55(%rip), %rsi # 0x1ef7312
movl $0x6, %edx
movq %r13, %rdi
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x3, 0x10(%rsp,%rax)
vmovsd (%r15), %xmm0
vmovaps 0x20(%r14), %xmm1
vsubps 0x10(%r14), %xmm1, %xmm1
vmovaps 0x40(%r14), %xmm2
vsubps 0x30(%r14), %xmm2, %xmm2
vxorps %xmm3, %xmm3, %xmm3
vblendps $0x8, %xmm3, %xmm1, %xmm4 # xmm4 = xmm1[0,1,2],xmm3[3]
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vblendps $0x8, %xmm3, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm3[3]
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vsubps %xmm4, %xmm2, %xmm2
vsubps %xmm5, %xmm3, %xmm3
vmulps %xmm5, %xmm1, %xmm4
vmulps %xmm3, %xmm1, %xmm1
vmulps %xmm2, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vbroadcastss 0xa18539(%rip), %xmm5 # 0x1eecb80
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm1, %xmm4, %xmm1
vmulps %xmm3, %xmm2, %xmm2
vbroadcastss 0xa1d860(%rip), %xmm3 # 0x1ef1ebc
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vhaddps %xmm1, %xmm1, %xmm2
vshufpd $0x1, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,0]
vaddss %xmm2, %xmm1, %xmm1
vcvtss2sd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0xa16e49(%rip), %rsi # 0x1eeb4ce
leaq 0x8(%rsp), %rdi
movl $0x2, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
vmovaps 0x20(%r14), %xmm0
vmovaps 0x40(%r14), %xmm1
vsubps 0x10(%r14), %xmm0, %xmm0
vsubps 0x30(%r14), %xmm1, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vblendps $0x8, %xmm2, %xmm0, %xmm3 # xmm3 = xmm0[0,1,2],xmm2[3]
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vblendps $0x8, %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm2[3]
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vsubps %xmm3, %xmm1, %xmm1
vsubps %xmm4, %xmm2, %xmm2
vmulps %xmm4, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm4, %xmm0, %xmm0
vbroadcastss 0xa18474(%rip), %xmm4 # 0x1eecb80
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm0
vmulps %xmm2, %xmm1, %xmm1
vbroadcastss 0xa1d79b(%rip), %xmm2 # 0x1ef1ebc
vmulps %xmm2, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vhaddps %xmm0, %xmm0, %xmm1
vmovsd (%r15), %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vaddss %xmm1, %xmm0, %xmm0
vcvtss2sd %xmm0, %xmm0, %xmm0
vdivsd %xmm0, %xmm2, %xmm0
vmulsd 0xa22b35(%rip), %xmm0, %xmm0 # 0x1ef7280
vdivsd (%rsp), %xmm0, %xmm0
callq 0x6a760
leaq 0xa22bc8(%rip), %rsi # 0x1ef7324
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa22b3c(%rip), %rsi # 0x1ef72ac
leaq 0x8(%rsp), %rdi
movl $0x9, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
movq 0x8(%r15), %rax
shlq $0x7, %rax
leaq (%rax,%rax,4), %rax
vmovq %rax, %xmm0
vpunpckldq 0xa18303(%rip), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
vsubpd 0xa1830b(%rip), %xmm0, %xmm0 # 0x1eecad0
vhaddpd %xmm0, %xmm0, %xmm0
vdivsd 0xa22ab7(%rip), %xmm0, %xmm0 # 0x1ef7288
callq 0x6a760
leaq 0xa22ad9(%rip), %rsi # 0x1ef72b6
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa16cde(%rip), %rsi # 0x1eeb4cf
leaq 0x8(%rsp), %rdi
movl $0x1, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
movq 0x8(%r15), %rax
shlq $0x7, %rax
leaq (%rax,%rax,4), %rax
vmovq %rax, %xmm0
vmovq 0xa18282(%rip), %xmm2 # 0x1eecac0
vpunpckldq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa18286(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
vmulsd 0xa22a26(%rip), %xmm0, %xmm0 # 0x1ef7280
vmovq %r12, %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0xa22aa9(%rip), %rsi # 0x1ef7324
movl $0x4, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa22a2c(%rip), %rsi # 0x1ef72bb
leaq 0x8(%rsp), %rdi
movl $0x9, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x7, 0x18(%rsp,%rax)
movq 0x8(%r15), %rsi
callq 0x6a4c0
movq %rax, %r12
leaq 0xa16c08(%rip), %rsi # 0x1eeb4ce
movl $0x2, %edx
movq %rax, %rdi
callq 0x6a9f0
movq (%r12), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x10(%r12,%rax)
movq (%r12), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x8(%r12,%rax)
vmovsd 0x10(%r15), %xmm0
vmovsd 0xa181bd(%rip), %xmm2 # 0x1eecac0
vunpcklps %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa181c1(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
movq 0x8(%r15), %rax
shlq $0x3, %rax
vmovq %rax, %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
vmulsd 0xa22944(%rip), %xmm0, %xmm0 # 0x1ef7280
movq %r12, %rdi
callq 0x6a760
leaq 0xa2297a(%rip), %rsi # 0x1ef72c5
movl $0xb, %edx
movq %rax, %rdi
callq 0x6a9f0
leaq 0xa2279f(%rip), %rsi # 0x1ef70fe
leaq 0x8(%rsp), %rdi
movl $0xe, %edx
callq 0x6a9f0
leaq 0x8(%rsp), %rdi
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x6, 0x18(%rsp,%rax)
movq (%rdi), %rax
movq -0x18(%rax), %rax
movq $0x2, 0x10(%rsp,%rax)
movq 0x8(%r15), %rax
shlq $0x7, %rax
leaq (%rax,%rax,4), %rax
vmovq %rax, %xmm0
vmovq 0xa18114(%rip), %xmm2 # 0x1eecac0
vpunpckldq %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
vmovapd 0xa18118(%rip), %xmm3 # 0x1eecad0
vsubpd %xmm3, %xmm0, %xmm0
vhaddpd %xmm0, %xmm0, %xmm0
vmovq 0x1f0(%r14), %xmm1
vpunpckldq %xmm2, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
vsubpd %xmm3, %xmm1, %xmm1
vhaddpd %xmm1, %xmm1, %xmm1
vdivsd %xmm1, %xmm0, %xmm0
callq 0x6a760
leaq 0x10(%rsp), %rsi
movq %rbx, %rdi
callq 0x6a3c0
movq 0xc4fcee(%rip), %rsi # 0x21246e0
leaq 0x8(%rsp), %rdi
callq 0x6a700
leaq 0x78(%rsp), %rdi
callq 0x6a6f0
movq %rbx, %rax
addq $0x180, %rsp # imm = 0x180
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
retq
movq %rax, %rbx
movq 0xc4fcbc(%rip), %rsi # 0x21246e0
leaq 0x8(%rsp), %rdi
callq 0x6a700
leaq 0x78(%rsp), %rdi
callq 0x6a6f0
movq %rbx, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/bvh_statistics.h
|
embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::create<embree::avx::PatchEval<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEval(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, float, float, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*)::'lambda'()::operator()() const::'lambda'(unsigned long)>(embree::avx::PatchEval<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEval(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, float, float, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*, embree::vfloat_impl<4>*)::'lambda'()::operator()() const::'lambda'(unsigned long) const&, embree::CatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>&, embree::HalfEdge const*, char const*, unsigned long, unsigned long, embree::CubicBezierCurve<embree::vfloat_impl<4>> const*, embree::CubicBezierCurve<embree::vfloat_impl<4>> const*, embree::CubicBezierCurve<embree::vfloat_impl<4>> const*, embree::CubicBezierCurve<embree::vfloat_impl<4>> const*)
|
__noinline static Ref create(const Allocator& alloc, CatmullClarkPatch& patch, const HalfEdge* edge, const char* vertices, size_t stride, size_t depth,
const BezierCurve* border0 = nullptr, const BezierCurve* border1 = nullptr, const BezierCurve* border2 = nullptr, const BezierCurve* border3 = nullptr)
{
const typename CatmullClarkPatch::Type ty = patch.type();
if (unlikely(final(patch,ty,depth))) {
if (ty & CatmullClarkRing::TYPE_REGULAR) return RegularPatch::create(alloc,patch,border0,border1,border2,border3);
else return IrregularFillPatch::create(alloc,patch,border0,border1,border2,border3);
}
else if (ty & CatmullClarkRing::TYPE_REGULAR_CREASES) {
assert(depth > 0); return RegularPatch::create(alloc,patch,border0,border1,border2,border3);
}
#if PATCH_USE_GREGORY == 2
else if (ty & CatmullClarkRing::TYPE_GREGORY_CREASES) {
assert(depth > 0); return GregoryPatch::create(alloc,patch,border0,border1,border2,border3);
}
#endif
else if (depth >= PATCH_MAX_CACHE_DEPTH) {
return EvalPatch::create(alloc,patch);
}
else
{
Ref child[4];
array_t<CatmullClarkPatch,4> patches;
patch.subdivide(patches);
for (size_t i=0; i<4; i++)
child[i] = PatchT::create(alloc,patches[i],edge,vertices,stride,depth+1);
return SubdividedQuadPatch::create(alloc,child);
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x34c0, %rsp # imm = 0x34C0
movq %r9, %r14
movq %r8, 0x50(%rsp)
movq %rcx, 0x58(%rsp)
movq %rdx, %r13
movq %rdi, %rbx
movabsq $0x1fffffffc, %rcx # imm = 0x1FFFFFFFC
movl 0x4(%rsi), %edx
testq %rdx, %rdx
je 0x14dc896
movq 0x80(%rsi), %rax
leaq 0x3(%rdx), %rdi
andq %rcx, %rdi
leaq -0x1(%rdx), %r8
vmovq %r8, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm2, %xmm2, %xmm2
xorl %r8d, %r8d
vmovaps 0xa4567a(%rip), %ymm3 # 0x1f21e40
vextractf128 $0x1, %ymm0, %xmm4
vmovddup 0xa456b4(%rip), %xmm1 # xmm1 = mem[0,0]
vxorps %xmm1, %xmm4, %xmm4
vxorps %xmm1, %xmm0, %xmm5
vpcmpeqd %xmm6, %xmm6, %xmm6
vxorps %xmm7, %xmm7, %xmm7
vmovddup 0xa7e0b4(%rip), %xmm8 # xmm8 = mem[0,0]
vmovq %r8, %xmm9
vpshufd $0x44, %xmm9, %xmm9 # xmm9 = xmm9[0,1,0,1]
vinsertf128 $0x1, %xmm9, %ymm9, %ymm9
vorps %ymm3, %ymm9, %ymm9
vextractf128 $0x1, %ymm9, %xmm10
vxorps %xmm1, %xmm10, %xmm10
vpcmpgtq %xmm4, %xmm10, %xmm10
vxorps %xmm1, %xmm9, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vpackssdw %xmm10, %xmm11, %xmm11
vpxor %xmm6, %xmm11, %xmm11
vmaskmovps (%rax,%r8,4), %xmm11, %xmm12
vmovdqa %ymm2, %ymm11
vcmpltps %xmm12, %xmm7, %xmm2
vshufps $0xd4, %xmm2, %xmm2, %xmm13 # xmm13 = xmm2[0,1,1,3]
vshufps $0xfa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,3,3]
vandps %xmm2, %xmm8, %xmm2
vextractf128 $0x1, %ymm11, %xmm12
vpaddq %xmm2, %xmm12, %xmm2
vandps %xmm8, %xmm13, %xmm13
vpaddq %xmm13, %xmm11, %xmm13
vinsertf128 $0x1, %xmm2, %ymm13, %ymm2
addq $0x4, %r8
cmpq %r8, %rdi
jne 0x14dc7ec
vxorps %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm9, %xmm1
vpcmpgtq %xmm0, %xmm1, %xmm0
vextractf128 $0x1, %ymm2, %xmm1
vblendvpd %xmm10, %xmm12, %xmm1, %xmm1
vblendvpd %xmm0, %xmm11, %xmm2, %xmm0
vpaddq %xmm1, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rax
jmp 0x14dc898
xorl %eax, %eax
movl (%rsi), %r8d
xorl %edi, %edi
cmpl $-0x1, %r8d
setne %dil
sete %r9b
addl %edi, %edi
cmpq %rdi, %rax
movl $0xf, %eax
movl $0x5, %edi
cmovel %eax, %edi
cmpl $0x2, %edx
setne %al
orb %r9b, %al
vmovss 0xc(%rsi), %xmm0
je 0x14dc903
movl $0x10, %eax
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dc91f
jp 0x14dc91f
cmpl $-0x1, %r8d
sete %al
cmpl $0x3, %edx
setne %r9b
orb %al, %r9b
cmpb $0x1, %r9b
jne 0x14dc91d
notl %r8d
xorl $0x4, %edx
xorl %eax, %eax
orl %r8d, %edx
cmovel %edi, %eax
jmp 0x14dc91f
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dc90f
jnp 0x14dc91d
vucomiss 0xa0f109(%rip), %xmm0 # 0x1eeba20
jb 0x14dcfb4
movl %edi, %eax
movl 0x344(%rsi), %edx
testq %rdx, %rdx
je 0x14dca2f
movq 0x3c0(%rsi), %rdi
leaq 0x3(%rdx), %r8
andq %rcx, %r8
leaq -0x1(%rdx), %r9
vmovq %r9, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm2, %xmm2, %xmm2
xorl %r9d, %r9d
vmovaps 0xa454e1(%rip), %ymm3 # 0x1f21e40
vextractf128 $0x1, %ymm0, %xmm4
vmovddup 0xa4551b(%rip), %xmm1 # xmm1 = mem[0,0]
vxorps %xmm1, %xmm4, %xmm4
vxorps %xmm1, %xmm0, %xmm5
vpcmpeqd %xmm6, %xmm6, %xmm6
vxorps %xmm7, %xmm7, %xmm7
vmovddup 0xa7df1b(%rip), %xmm8 # xmm8 = mem[0,0]
vmovq %r9, %xmm9
vpshufd $0x44, %xmm9, %xmm9 # xmm9 = xmm9[0,1,0,1]
vinsertf128 $0x1, %xmm9, %ymm9, %ymm9
vorps %ymm3, %ymm9, %ymm9
vextractf128 $0x1, %ymm9, %xmm10
vxorps %xmm1, %xmm10, %xmm10
vpcmpgtq %xmm4, %xmm10, %xmm10
vxorps %xmm1, %xmm9, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vpackssdw %xmm10, %xmm11, %xmm11
vpxor %xmm6, %xmm11, %xmm11
vmaskmovps (%rdi,%r9,4), %xmm11, %xmm12
vmovdqa %ymm2, %ymm11
vcmpltps %xmm12, %xmm7, %xmm2
vshufps $0xd4, %xmm2, %xmm2, %xmm13 # xmm13 = xmm2[0,1,1,3]
vshufps $0xfa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,3,3]
vandps %xmm2, %xmm8, %xmm2
vextractf128 $0x1, %ymm11, %xmm12
vpaddq %xmm2, %xmm12, %xmm2
vandps %xmm8, %xmm13, %xmm13
vpaddq %xmm13, %xmm11, %xmm13
vinsertf128 $0x1, %xmm2, %ymm13, %ymm2
addq $0x4, %r9
cmpq %r9, %r8
jne 0x14dc985
vxorps %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm9, %xmm1
vpcmpgtq %xmm0, %xmm1, %xmm0
vextractf128 $0x1, %ymm2, %xmm1
vblendvpd %xmm10, %xmm12, %xmm1, %xmm1
vblendvpd %xmm0, %xmm11, %xmm2, %xmm0
vpaddq %xmm1, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rdi
jmp 0x14dca31
xorl %edi, %edi
movl 0x340(%rsi), %r9d
xorl %r8d, %r8d
cmpl $-0x1, %r9d
setne %r8b
sete %r10b
addl %r8d, %r8d
cmpq %r8, %rdi
movl $0xf, %edi
movl $0x5, %r8d
cmovel %edi, %r8d
cmpl $0x2, %edx
setne %dil
orb %r10b, %dil
vmovss 0x34c(%rsi), %xmm0
je 0x14dcaaa
movl $0x10, %edi
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dcac7
jp 0x14dcac7
cmpl $-0x1, %r9d
sete %dil
cmpl $0x3, %edx
setne %r10b
orb %dil, %r10b
cmpb $0x1, %r10b
jne 0x14dcac4
notl %r9d
xorl $0x4, %edx
xorl %edi, %edi
orl %r9d, %edx
cmovel %r8d, %edi
jmp 0x14dcac7
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dcab6
jnp 0x14dcac4
vucomiss 0xa0ef62(%rip), %xmm0 # 0x1eeba20
jb 0x14dcfbe
movl %r8d, %edi
movl 0x684(%rsi), %edx
testq %rdx, %rdx
je 0x14dcbd7
movq 0x700(%rsi), %r8
leaq 0x3(%rdx), %r9
andq %rcx, %r9
leaq -0x1(%rdx), %r10
vmovq %r10, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm2, %xmm2, %xmm2
xorl %r10d, %r10d
vmovaps 0xa45339(%rip), %ymm3 # 0x1f21e40
vextractf128 $0x1, %ymm0, %xmm4
vmovddup 0xa45373(%rip), %xmm1 # xmm1 = mem[0,0]
vxorps %xmm1, %xmm4, %xmm4
vxorps %xmm1, %xmm0, %xmm5
vpcmpeqd %xmm6, %xmm6, %xmm6
vxorps %xmm7, %xmm7, %xmm7
vmovddup 0xa7dd73(%rip), %xmm8 # xmm8 = mem[0,0]
vmovq %r10, %xmm9
vpshufd $0x44, %xmm9, %xmm9 # xmm9 = xmm9[0,1,0,1]
vinsertf128 $0x1, %xmm9, %ymm9, %ymm9
vorps %ymm3, %ymm9, %ymm9
vextractf128 $0x1, %ymm9, %xmm10
vxorps %xmm1, %xmm10, %xmm10
vpcmpgtq %xmm4, %xmm10, %xmm10
vxorps %xmm1, %xmm9, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vpackssdw %xmm10, %xmm11, %xmm11
vpxor %xmm6, %xmm11, %xmm11
vmaskmovps (%r8,%r10,4), %xmm11, %xmm12
vmovdqa %ymm2, %ymm11
vcmpltps %xmm12, %xmm7, %xmm2
vshufps $0xd4, %xmm2, %xmm2, %xmm13 # xmm13 = xmm2[0,1,1,3]
vshufps $0xfa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,3,3]
vandps %xmm2, %xmm8, %xmm2
vextractf128 $0x1, %ymm11, %xmm12
vpaddq %xmm2, %xmm12, %xmm2
vandps %xmm8, %xmm13, %xmm13
vpaddq %xmm13, %xmm11, %xmm13
vinsertf128 $0x1, %xmm2, %ymm13, %ymm2
addq $0x4, %r10
cmpq %r10, %r9
jne 0x14dcb2d
vxorps %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm9, %xmm1
vpcmpgtq %xmm0, %xmm1, %xmm0
vextractf128 $0x1, %ymm2, %xmm1
vblendvpd %xmm10, %xmm12, %xmm1, %xmm1
vblendvpd %xmm0, %xmm11, %xmm2, %xmm0
vpaddq %xmm1, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %r8
jmp 0x14dcbda
xorl %r8d, %r8d
movl 0x680(%rsi), %r9d
xorl %r10d, %r10d
cmpl $-0x1, %r9d
setne %r10b
sete %r11b
addl %r10d, %r10d
cmpq %r10, %r8
movl $0xf, %r10d
movl $0x5, %r8d
cmovel %r10d, %r8d
cmpl $0x2, %edx
setne %r10b
orb %r11b, %r10b
vmovss 0x68c(%rsi), %xmm0
je 0x14dcc56
movl $0x10, %r10d
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dcc73
jp 0x14dcc73
cmpl $-0x1, %r9d
sete %r10b
cmpl $0x3, %edx
setne %r11b
orb %r10b, %r11b
cmpb $0x1, %r11b
jne 0x14dcc70
notl %r9d
xorl $0x4, %edx
xorl %r10d, %r10d
orl %r9d, %edx
cmovel %r8d, %r10d
jmp 0x14dcc73
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dcc62
jnp 0x14dcc70
vucomiss 0xa0edb6(%rip), %xmm0 # 0x1eeba20
jb 0x14dcfc8
movl %r8d, %r10d
movl 0x9c4(%rsi), %edx
testq %rdx, %rdx
je 0x14dcd82
movq 0xa40(%rsi), %r8
leaq 0x3(%rdx), %r9
andq %rcx, %r9
leaq -0x1(%rdx), %rcx
vmovq %rcx, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm2, %xmm2, %xmm2
xorl %ecx, %ecx
vmovaps 0xa4518e(%rip), %ymm3 # 0x1f21e40
vextractf128 $0x1, %ymm0, %xmm4
vmovddup 0xa451c8(%rip), %xmm1 # xmm1 = mem[0,0]
vxorps %xmm1, %xmm4, %xmm4
vxorps %xmm1, %xmm0, %xmm5
vpcmpeqd %xmm6, %xmm6, %xmm6
vxorps %xmm7, %xmm7, %xmm7
vmovddup 0xa7dbc8(%rip), %xmm8 # xmm8 = mem[0,0]
vmovq %rcx, %xmm9
vpshufd $0x44, %xmm9, %xmm9 # xmm9 = xmm9[0,1,0,1]
vinsertf128 $0x1, %xmm9, %ymm9, %ymm9
vorps %ymm3, %ymm9, %ymm9
vextractf128 $0x1, %ymm9, %xmm10
vxorps %xmm1, %xmm10, %xmm10
vpcmpgtq %xmm4, %xmm10, %xmm10
vxorps %xmm1, %xmm9, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vpackssdw %xmm10, %xmm11, %xmm11
vpxor %xmm6, %xmm11, %xmm11
vmaskmovps (%r8,%rcx,4), %xmm11, %xmm12
vmovdqa %ymm2, %ymm11
vcmpltps %xmm12, %xmm7, %xmm2
vshufps $0xd4, %xmm2, %xmm2, %xmm13 # xmm13 = xmm2[0,1,1,3]
vshufps $0xfa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,3,3]
vandps %xmm2, %xmm8, %xmm2
vextractf128 $0x1, %ymm11, %xmm12
vpaddq %xmm2, %xmm12, %xmm2
vandps %xmm8, %xmm13, %xmm13
vpaddq %xmm13, %xmm11, %xmm13
vinsertf128 $0x1, %xmm2, %ymm13, %ymm2
addq $0x4, %rcx
cmpq %rcx, %r9
jne 0x14dccd8
vxorps %xmm1, %xmm0, %xmm0
vxorps %xmm1, %xmm9, %xmm1
vpcmpgtq %xmm0, %xmm1, %xmm0
vextractf128 $0x1, %ymm2, %xmm1
vblendvpd %xmm10, %xmm12, %xmm1, %xmm1
vblendvpd %xmm0, %xmm11, %xmm2, %xmm0
vpaddq %xmm1, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rcx
jmp 0x14dcd84
xorl %ecx, %ecx
movl 0x9c0(%rsi), %r8d
xorl %r9d, %r9d
cmpl $-0x1, %r8d
setne %r9b
sete %r11b
addl %r9d, %r9d
cmpq %r9, %rcx
movl $0xf, %r9d
movl $0x5, %ecx
cmovel %r9d, %ecx
cmpl $0x2, %edx
setne %r9b
orb %r11b, %r9b
vmovss 0x9cc(%rsi), %xmm0
je 0x14dcdff
movl $0x10, %r11d
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dce1c
jp 0x14dce1c
cmpl $-0x1, %r8d
sete %r9b
cmpl $0x3, %edx
setne %r11b
orb %r9b, %r11b
cmpb $0x1, %r11b
jne 0x14dce19
notl %r8d
xorl $0x4, %edx
xorl %r11d, %r11d
orl %r8d, %edx
cmovel %ecx, %r11d
jmp 0x14dce1c
vpxor %xmm1, %xmm1, %xmm1
vucomiss %xmm1, %xmm0
jne 0x14dce0b
jnp 0x14dce19
vucomiss 0xa0ec0d(%rip), %xmm0 # 0x1eeba20
jb 0x14dcfd3
movl %ecx, %r11d
movq 0x28(%rbp), %r9
movq 0x20(%rbp), %r8
movq 0x18(%rbp), %rcx
movq 0x10(%rbp), %rdx
andl %eax, %edi
andl %r10d, %edi
andl %r11d, %edi
cmpq $0xa, %r14
jae 0x14dcfde
testb $0x2, %dil
jne 0x14dcff1
cmpq $0x2, %r14
jb 0x14dce5e
movq %rbx, %rdi
vzeroupper
callq 0x14dd138
jmp 0x14dcffc
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x60(%rsp)
leaq 0x380(%rsp), %rax
xorl %ecx, %ecx
movl $0xd00, %edx # imm = 0xD00
movq %rax, %rdi
movl $0xd00, %r8d # imm = 0xD00
leaq -0x2c0(%rdi), %r9
movq %r9, -0x280(%rdi)
movq $0x0, -0x238(%rdi)
leaq -0x200(%rdi), %r9
movq %r9, (%rdi)
addq $0x340, %rdi # imm = 0x340
addq $-0x340, %r8 # imm = 0xFCC0
jne 0x14dce80
addq %rdx, %rcx
addq %rdx, %rax
cmpq $0x3400, %rcx # imm = 0x3400
jne 0x14dce77
leaq 0x80(%rsp), %r15
movq %rsi, %rdi
movq %r15, %rsi
vzeroupper
callq 0xd2a2f0
incq %r14
xorl %r12d, %r12d
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, (%rsp)
movq %rbx, %rdi
movq %r15, %rsi
movq %r13, %rdx
movq 0x58(%rsp), %rcx
movq 0x50(%rsp), %r8
movq %r14, %r9
vzeroupper
callq 0x14dc754
movq %rax, 0x60(%rsp,%r12,8)
incq %r12
addq $0xd00, %r15 # imm = 0xD00
cmpq $0x4, %r12
jne 0x14dcede
leaq 0x60(%rsp), %rsi
movq %rbx, %rdi
callq 0x14dd02e
movq %rax, 0x50(%rsp)
leaq 0x3480(%rsp), %r14
leaq 0x3180(%rsp), %rbx
leaq 0x80(%rsp), %r12
addq $-0xd00, %r14 # imm = 0xF300
movq $-0xd00, %r13 # imm = 0xF300
movq %rbx, %r15
leaq 0xc0(%r15), %rax
movq 0x2c0(%r15), %rdi
cmpq %rdi, %rax
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14dcf78
callq 0x1ee612d
movq 0x40(%r15), %rdi
cmpq %rdi, %r15
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14dcf91
callq 0x6a370
addq $-0x340, %r15 # imm = 0xFCC0
addq $0x340, %r13 # imm = 0x340
jne 0x14dcf55
addq $-0xd00, %rbx # imm = 0xF300
cmpq %r12, %r14
jne 0x14dcf44
movq 0x50(%rsp), %rax
jmp 0x14dcffc
movl $0x10, %eax
jmp 0x14dc91f
movl $0x10, %edi
jmp 0x14dcac7
movl $0x10, %r10d
jmp 0x14dcc73
movl $0x10, %r11d
jmp 0x14dce1c
testb $0x1, %dil
jne 0x14dcff1
movq %rbx, %rdi
vzeroupper
callq 0x14dd0e4
jmp 0x14dcffc
movq %rbx, %rdi
vzeroupper
callq 0x14dd0b6
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x14dd00d
movq %rax, %rbx
leaq 0x80(%rsp), %rdi
callq 0xd2e1b2
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
nop
|
/embree[P]embree/kernels/common/../subdiv/patch.h
|
embree::avx::FeatureAdaptiveEval<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::eval(embree::CatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>&, embree::Vec2<float>, float, unsigned long, embree::CubicBezierCurve<embree::vfloat_impl<4>>*, embree::CubicBezierCurve<embree::vfloat_impl<4>>*, embree::CubicBezierCurve<embree::vfloat_impl<4>>*, embree::CubicBezierCurve<embree::vfloat_impl<4>>*)
|
void eval(CatmullClarkPatch& patch, Vec2f uv, float dscale, size_t depth,
BezierCurve* border0 = nullptr, BezierCurve* border1 = nullptr, BezierCurve* border2 = nullptr, BezierCurve* border3 = nullptr)
{
while (true)
{
typename CatmullClarkPatch::Type ty = patch.type();
if (unlikely(final(patch,ty,depth)))
{
if (ty & CatmullClarkRing::TYPE_REGULAR) {
RegularPatch(patch,border0,border1,border2,border3).eval(uv.x,uv.y,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,dscale);
PATCH_DEBUG_SUBDIVISION(234423,c,c,-1);
return;
} else {
IrregularFillPatch(patch,border0,border1,border2,border3).eval(uv.x,uv.y,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,dscale);
PATCH_DEBUG_SUBDIVISION(34534,c,-1,c);
return;
}
}
else if (ty & CatmullClarkRing::TYPE_REGULAR_CREASES) {
assert(depth > 0);
RegularPatch(patch,border0,border1,border2,border3).eval(uv.x,uv.y,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,dscale);
PATCH_DEBUG_SUBDIVISION(43524,c,c,-1);
return;
}
#if PATCH_USE_GREGORY == 2
else if (ty & CatmullClarkRing::TYPE_GREGORY_CREASES) {
assert(depth > 0);
GregoryPatch(patch,border0,border1,border2,border3).eval(uv.x,uv.y,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,dscale);
PATCH_DEBUG_SUBDIVISION(23498,c,-1,c);
return;
}
#endif
else
{
array_t<CatmullClarkPatch,4> patches;
patch.subdivide(patches); // FIXME: only have to generate one of the patches
const float u = uv.x, v = uv.y;
if (v < 0.5f) {
if (u < 0.5f) { patch = patches[0]; uv = Vec2f(2.0f*u,2.0f*v); dscale *= 2.0f; }
else { patch = patches[1]; uv = Vec2f(2.0f*u-1.0f,2.0f*v); dscale *= 2.0f; }
} else {
if (u > 0.5f) { patch = patches[2]; uv = Vec2f(2.0f*u-1.0f,2.0f*v-1.0f); dscale *= 2.0f; }
else { patch = patches[3]; uv = Vec2f(2.0f*u,2.0f*v-1.0f); dscale *= 2.0f; }
}
depth++;
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x3540, %rsp # imm = 0x3540
movq %r9, 0xd8(%rsp)
movq %r8, 0xd0(%rsp)
movq %rcx, %r14
vmovaps %xmm0, 0x90(%rsp)
movq %rdx, 0x58(%rsp)
movq %rsi, %r13
movq %rdi, 0x78(%rsp)
movabsq $0x1fffffffc, %rbx # imm = 0x1FFFFFFFC
vmovddup 0xa44b29(%rip), %xmm0 # xmm0 = mem[0,0]
vmovaps %xmm0, 0xb0(%rsp)
vmovddup 0xa7d530(%rip), %xmm0 # xmm0 = mem[0,0]
vmovaps %xmm0, 0xa0(%rsp)
movl $0xf, %r12d
movl 0x4(%r13), %ecx
testq %rcx, %rcx
je 0x14dd48b
movq 0x80(%r13), %rax
leaq 0x3(%rcx), %rdx
andq %rbx, %rdx
leaq -0x1(%rcx), %rsi
vmovq %rsi, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vmovdqa 0xb0(%rsp), %xmm9
vpxor %xmm0, %xmm9, %xmm2
xorl %esi, %esi
vmovaps 0xa44a77(%rip), %ymm8 # 0x1f21e40
vpcmpeqd %xmm10, %xmm10, %xmm10
vxorps %xmm11, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm12
vmovq %rsi, %xmm3
vpshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vorps %ymm3, %ymm8, %ymm3
vextractf128 $0x1, %ymm0, %xmm4
vpxor %xmm4, %xmm9, %xmm4
vextractf128 $0x1, %ymm3, %xmm5
vpxor %xmm5, %xmm9, %xmm5
vpcmpgtq %xmm4, %xmm5, %xmm4
vpxor %xmm3, %xmm9, %xmm5
vpcmpgtq %xmm2, %xmm5, %xmm5
vpackssdw %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm10, %xmm5
vmaskmovps (%rax,%rsi,4), %xmm5, %xmm6
vmovdqa %ymm1, %ymm5
vcmpltps %xmm6, %xmm11, %xmm1
vshufps $0xd4, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[0,1,1,3]
vshufps $0xfa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
vandps %xmm1, %xmm12, %xmm1
vextractf128 $0x1, %ymm5, %xmm6
vpaddq %xmm1, %xmm6, %xmm1
vandps %xmm7, %xmm12, %xmm7
vpaddq %xmm7, %xmm5, %xmm7
vinsertf128 $0x1, %xmm1, %ymm7, %ymm1
addq $0x4, %rsi
cmpq %rsi, %rdx
jne 0x14dd3dc
vpxor %xmm0, %xmm9, %xmm0
vpxor %xmm3, %xmm9, %xmm2
vpcmpgtq %xmm0, %xmm2, %xmm0
vextractf128 $0x1, %ymm1, %xmm2
vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
vblendvpd %xmm0, %xmm5, %xmm1, %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rax
jmp 0x14dd48d
xorl %eax, %eax
movl (%r13), %esi
xorl %edx, %edx
cmpl $-0x1, %esi
setne %dl
sete %dil
addl %edx, %edx
cmpq %rdx, %rax
movl $0x5, %edx
cmovel %r12d, %edx
cmpl $0x2, %ecx
setne %al
orb %dil, %al
je 0x14dd4f4
vmovss 0xc(%r13), %xmm0
movl $0x10, %eax
vucomiss 0xa0e55b(%rip), %xmm0 # 0x1eeba24
jne 0x14dd516
jp 0x14dd516
cmpl $-0x1, %esi
sete %al
cmpl $0x3, %ecx
setne %dil
orb %al, %dil
cmpb $0x1, %dil
jne 0x14dd514
notl %esi
xorl $0x4, %ecx
orl %esi, %ecx
movl $0x0, %eax
cmovnel %eax, %edx
jmp 0x14dd514
vmovss 0xc(%r13), %xmm0
vucomiss 0xa0e522(%rip), %xmm0 # 0x1eeba24
jne 0x14dd506
jnp 0x14dd514
vucomiss 0xa0e512(%rip), %xmm0 # 0x1eeba20
jb 0x14dde12
movl %edx, %eax
movl 0x344(%r13), %edx
testq %rdx, %rdx
je 0x14dd625
movq 0x3c0(%r13), %rcx
leaq 0x3(%rdx), %rsi
andq %rbx, %rsi
leaq -0x1(%rdx), %rdi
vmovq %rdi, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vmovdqa 0xb0(%rsp), %xmm9
vpxor %xmm0, %xmm9, %xmm2
xorl %edi, %edi
vmovaps 0xa448dd(%rip), %ymm8 # 0x1f21e40
vpcmpeqd %xmm10, %xmm10, %xmm10
vxorps %xmm11, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm12
vmovq %rdi, %xmm3
vpshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vorps %ymm3, %ymm8, %ymm3
vextractf128 $0x1, %ymm0, %xmm4
vpxor %xmm4, %xmm9, %xmm4
vextractf128 $0x1, %ymm3, %xmm5
vpxor %xmm5, %xmm9, %xmm5
vpcmpgtq %xmm4, %xmm5, %xmm4
vpxor %xmm3, %xmm9, %xmm5
vpcmpgtq %xmm2, %xmm5, %xmm5
vpackssdw %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm10, %xmm5
vmaskmovps (%rcx,%rdi,4), %xmm5, %xmm6
vmovdqa %ymm1, %ymm5
vcmpltps %xmm6, %xmm11, %xmm1
vshufps $0xd4, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[0,1,1,3]
vshufps $0xfa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
vandps %xmm1, %xmm12, %xmm1
vextractf128 $0x1, %ymm5, %xmm6
vpaddq %xmm1, %xmm6, %xmm1
vandps %xmm7, %xmm12, %xmm7
vpaddq %xmm7, %xmm5, %xmm7
vinsertf128 $0x1, %xmm1, %ymm7, %ymm1
addq $0x4, %rdi
cmpq %rdi, %rsi
jne 0x14dd576
vpxor %xmm0, %xmm9, %xmm0
vpxor %xmm3, %xmm9, %xmm2
vpcmpgtq %xmm0, %xmm2, %xmm0
vextractf128 $0x1, %ymm1, %xmm2
vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
vblendvpd %xmm0, %xmm5, %xmm1, %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rcx
jmp 0x14dd627
xorl %ecx, %ecx
movl 0x340(%r13), %edi
xorl %esi, %esi
cmpl $-0x1, %edi
setne %sil
sete %r8b
addl %esi, %esi
cmpq %rsi, %rcx
movl $0x5, %esi
cmovel %r12d, %esi
cmpl $0x2, %edx
setne %cl
orb %r8b, %cl
je 0x14dd695
vmovss 0x34c(%r13), %xmm0
movl $0x10, %ecx
vucomiss 0xa0e3ba(%rip), %xmm0 # 0x1eeba24
jne 0x14dd6ba
jp 0x14dd6ba
cmpl $-0x1, %edi
sete %cl
cmpl $0x3, %edx
setne %r8b
orb %cl, %r8b
cmpb $0x1, %r8b
jne 0x14dd6b8
notl %edi
xorl $0x4, %edx
orl %edi, %edx
movl $0x0, %ecx
cmovnel %ecx, %esi
jmp 0x14dd6b8
vmovss 0x34c(%r13), %xmm0
vucomiss 0xa0e37e(%rip), %xmm0 # 0x1eeba24
jne 0x14dd6aa
jnp 0x14dd6b8
vucomiss 0xa0e36e(%rip), %xmm0 # 0x1eeba20
jb 0x14dde1c
movl %esi, %ecx
movl 0x684(%r13), %esi
testq %rsi, %rsi
je 0x14dd7ca
movq 0x700(%r13), %rdx
leaq 0x3(%rsi), %rdi
andq %rbx, %rdi
leaq -0x1(%rsi), %r8
vmovq %r8, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vmovdqa 0xb0(%rsp), %xmm9
vpxor %xmm0, %xmm9, %xmm2
xorl %r8d, %r8d
vmovaps 0xa44738(%rip), %ymm8 # 0x1f21e40
vpcmpeqd %xmm10, %xmm10, %xmm10
vxorps %xmm11, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm12
vmovq %r8, %xmm3
vpshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vorps %ymm3, %ymm8, %ymm3
vextractf128 $0x1, %ymm0, %xmm4
vpxor %xmm4, %xmm9, %xmm4
vextractf128 $0x1, %ymm3, %xmm5
vpxor %xmm5, %xmm9, %xmm5
vpcmpgtq %xmm4, %xmm5, %xmm4
vpxor %xmm3, %xmm9, %xmm5
vpcmpgtq %xmm2, %xmm5, %xmm5
vpackssdw %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm10, %xmm5
vmaskmovps (%rdx,%r8,4), %xmm5, %xmm6
vmovdqa %ymm1, %ymm5
vcmpltps %xmm6, %xmm11, %xmm1
vshufps $0xd4, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[0,1,1,3]
vshufps $0xfa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
vandps %xmm1, %xmm12, %xmm1
vextractf128 $0x1, %ymm5, %xmm6
vpaddq %xmm1, %xmm6, %xmm1
vandps %xmm7, %xmm12, %xmm7
vpaddq %xmm7, %xmm5, %xmm7
vinsertf128 $0x1, %xmm1, %ymm7, %ymm1
addq $0x4, %r8
cmpq %r8, %rdi
jne 0x14dd71b
vpxor %xmm0, %xmm9, %xmm0
vpxor %xmm3, %xmm9, %xmm2
vpcmpgtq %xmm0, %xmm2, %xmm0
vextractf128 $0x1, %ymm1, %xmm2
vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
vblendvpd %xmm0, %xmm5, %xmm1, %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rdx
jmp 0x14dd7cc
xorl %edx, %edx
movl 0x680(%r13), %r8d
xorl %edi, %edi
cmpl $-0x1, %r8d
setne %dil
sete %r9b
addl %edi, %edi
cmpq %rdi, %rdx
movl $0x5, %edi
cmovel %r12d, %edi
cmpl $0x2, %esi
setne %dl
orb %r9b, %dl
je 0x14dd83e
vmovss 0x68c(%r13), %xmm0
movl $0x10, %edx
vucomiss 0xa0e214(%rip), %xmm0 # 0x1eeba24
jne 0x14dd863
jp 0x14dd863
cmpl $-0x1, %r8d
sete %dl
cmpl $0x3, %esi
setne %r9b
orb %dl, %r9b
cmpb $0x1, %r9b
jne 0x14dd861
notl %r8d
xorl $0x4, %esi
orl %r8d, %esi
movl $0x0, %edx
cmovnel %edx, %edi
jmp 0x14dd861
vmovss 0x68c(%r13), %xmm0
vucomiss 0xa0e1d5(%rip), %xmm0 # 0x1eeba24
jne 0x14dd853
jnp 0x14dd861
vucomiss 0xa0e1c5(%rip), %xmm0 # 0x1eeba20
jb 0x14dde26
movl %edi, %edx
movl 0x9c4(%r13), %esi
testq %rsi, %rsi
je 0x14dd973
movq 0xa40(%r13), %rdi
leaq 0x3(%rsi), %r8
andq %rbx, %r8
leaq -0x1(%rsi), %r9
vmovq %r9, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vpxor %xmm1, %xmm1, %xmm1
vmovdqa 0xb0(%rsp), %xmm9
vpxor %xmm0, %xmm9, %xmm2
xorl %r9d, %r9d
vmovaps 0xa4458f(%rip), %ymm8 # 0x1f21e40
vpcmpeqd %xmm10, %xmm10, %xmm10
vxorps %xmm11, %xmm11, %xmm11
vmovaps 0xa0(%rsp), %xmm12
vmovq %r9, %xmm3
vpshufd $0x44, %xmm3, %xmm3 # xmm3 = xmm3[0,1,0,1]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vorps %ymm3, %ymm8, %ymm3
vextractf128 $0x1, %ymm0, %xmm4
vpxor %xmm4, %xmm9, %xmm4
vextractf128 $0x1, %ymm3, %xmm5
vpxor %xmm5, %xmm9, %xmm5
vpcmpgtq %xmm4, %xmm5, %xmm4
vpxor %xmm3, %xmm9, %xmm5
vpcmpgtq %xmm2, %xmm5, %xmm5
vpackssdw %xmm4, %xmm5, %xmm5
vpxor %xmm5, %xmm10, %xmm5
vmaskmovps (%rdi,%r9,4), %xmm5, %xmm6
vmovdqa %ymm1, %ymm5
vcmpltps %xmm6, %xmm11, %xmm1
vshufps $0xd4, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[0,1,1,3]
vshufps $0xfa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,3,3]
vandps %xmm1, %xmm12, %xmm1
vextractf128 $0x1, %ymm5, %xmm6
vpaddq %xmm1, %xmm6, %xmm1
vandps %xmm7, %xmm12, %xmm7
vpaddq %xmm7, %xmm5, %xmm7
vinsertf128 $0x1, %xmm1, %ymm7, %ymm1
addq $0x4, %r9
cmpq %r9, %r8
jne 0x14dd8c4
vpxor %xmm0, %xmm9, %xmm0
vpxor %xmm3, %xmm9, %xmm2
vpcmpgtq %xmm0, %xmm2, %xmm0
vextractf128 $0x1, %ymm1, %xmm2
vblendvpd %xmm4, %xmm6, %xmm2, %xmm2
vblendvpd %xmm0, %xmm5, %xmm1, %xmm0
vpaddq %xmm2, %xmm0, %xmm0
vpshufd $0xee, %xmm0, %xmm1 # xmm1 = xmm0[2,3,2,3]
vpaddq %xmm1, %xmm0, %xmm0
vmovq %xmm0, %rdi
jmp 0x14dd975
xorl %edi, %edi
movl 0x9c0(%r13), %r8d
xorl %r9d, %r9d
cmpl $-0x1, %r8d
setne %r9b
sete %r10b
addl %r9d, %r9d
cmpq %r9, %rdi
movl $0x5, %edi
cmovel %r12d, %edi
cmpl $0x2, %esi
setne %r9b
orb %r10b, %r9b
je 0x14dd9ec
vmovss 0x9cc(%r13), %xmm0
movl $0x10, %r9d
vucomiss 0xa0e067(%rip), %xmm0 # 0x1eeba24
jne 0x14dda12
jp 0x14dda12
cmpl $-0x1, %r8d
sete %r9b
cmpl $0x3, %esi
setne %r10b
orb %r9b, %r10b
cmpb $0x1, %r10b
jne 0x14dda0f
notl %r8d
xorl $0x4, %esi
orl %r8d, %esi
movl $0x0, %esi
cmovnel %esi, %edi
jmp 0x14dda0f
vmovss 0x9cc(%r13), %xmm0
vucomiss 0xa0e027(%rip), %xmm0 # 0x1eeba24
jne 0x14dda01
jnp 0x14dda0f
vucomiss 0xa0e017(%rip), %xmm0 # 0x1eeba20
jb 0x14dde30
movl %edi, %r9d
andl %eax, %ecx
andl %edx, %ecx
andl %r9d, %ecx
cmpq $0xa, %r14
jae 0x14dde3b
movq %r14, %r15
testb $0x2, %cl
jne 0x14ddb52
leaq 0x400(%rsp), %rax
xorl %ecx, %ecx
movl $0xd00, %r8d # imm = 0xD00
movl $0x100, %r14d # imm = 0x100
movl $0xd00, %edx # imm = 0xD00
movq %rax, %rsi
leaq -0x2c0(%rsi), %rdi
movq %rdi, -0x280(%rsi)
movq $0x0, -0x238(%rsi)
leaq -0x200(%rsi), %rdi
movq %rdi, (%rsi)
addq $0x340, %rsi # imm = 0x340
addq $-0x340, %rdx # imm = 0xFCC0
jne 0x14dda4d
addq %r8, %rcx
addq %r8, %rax
cmpq $0x3400, %rcx # imm = 0x3400
jne 0x14dda45
movq %r13, %rdi
leaq 0x100(%rsp), %rsi
vzeroupper
callq 0xd2a2f0
movq 0x58(%rsp), %rax
vmovsd (%rax), %xmm1
vmovshdup %xmm1, %xmm2 # xmm2 = xmm1[1,1,3,3]
vmovss 0xa0f0c9(%rip), %xmm0 # 0x1eecb80
vucomiss %xmm2, %xmm0
vmovaps %xmm1, 0x40(%rsp)
jbe 0x14ddbb1
vucomiss %xmm1, %xmm0
jbe 0x14ddc37
xorl %ebx, %ebx
leaq (%rsp,%rbx), %rsi
addq $0x140, %rsi # imm = 0x140
vmovaps -0x40(%rsi), %xmm0
leaq (%rbx,%r13), %rdi
addq $0x40, %rdi
vmovaps %xmm0, -0x40(%rdi)
callq 0xd2fe54
leaq (%rbx,%r13), %rdi
leaq (%rsp,%rbx), %rsi
addq $0x100, %rsi # imm = 0x100
vmovaps 0xc0(%rsi), %xmm0
vmovaps %xmm0, 0xc0(%r13,%rbx)
vmovaps 0xd0(%rsi), %xmm0
vmovaps %xmm0, 0xd0(%r13,%rbx)
addq %r14, %rdi
addq %r14, %rsi
callq 0xd27910
addq $0x340, %rbx # imm = 0x340
cmpq $0xd00, %rbx # imm = 0xD00
jne 0x14ddad3
vmovaps 0x40(%rsp), %xmm0
vaddps %xmm0, %xmm0, %xmm0
jmp 0x14ddd46
leaq 0x100(%rsp), %r14
movq %r14, %rdi
movq %r13, %rsi
vzeroupper
callq 0xd2e248
movq 0x58(%rsp), %rax
vmovss (%rax), %xmm0
vmovss 0x4(%rax), %xmm1
movq 0x78(%rsp), %rax
movq (%rax), %rsi
movq 0x8(%rax), %rdx
movq 0x10(%rax), %rcx
movq 0x18(%rax), %r8
movq 0x20(%rax), %r9
movq 0x28(%rax), %rax
movq %rax, (%rsp)
movq %r14, %rdi
vmovaps 0x90(%rsp), %xmm2
callq 0xd2fed2
xorl %eax, %eax
movq %r15, %r14
jmp 0x14dde05
vucomiss %xmm0, %xmm1
jbe 0x14ddcbe
xorl %ebx, %ebx
leaq (%rsp,%rbx), %rsi
addq $0x1b40, %rsi # imm = 0x1B40
vmovaps -0x40(%rsi), %xmm0
leaq (%rbx,%r13), %rdi
addq $0x40, %rdi
vmovaps %xmm0, -0x40(%rdi)
callq 0xd2fe54
leaq (%rsp,%rbx), %rsi
addq $0x1c00, %rsi # imm = 0x1C00
vmovaps -0x40(%rsi), %xmm0
leaq 0x100(%r13,%rbx), %rdi
vmovaps %xmm0, -0x40(%rdi)
vmovaps -0x30(%rsi), %xmm0
vmovaps %xmm0, -0x30(%rdi)
callq 0xd27910
addq $0x340, %rbx # imm = 0x340
cmpq $0xd00, %rbx # imm = 0xD00
jne 0x14ddbbd
vmovaps 0x40(%rsp), %xmm0
vaddps %xmm0, %xmm0, %xmm0
vbroadcastss 0xa12d9e(%rip), %xmm1 # 0x1ef09cc
vaddps %xmm1, %xmm0, %xmm0
jmp 0x14ddd46
vmovaps %xmm2, 0x60(%rsp)
xorl %ebx, %ebx
leaq (%rsp,%rbx), %rsi
addq $0xe40, %rsi # imm = 0xE40
vmovaps -0x40(%rsi), %xmm0
leaq (%rbx,%r13), %rdi
addq $0x40, %rdi
vmovaps %xmm0, -0x40(%rdi)
callq 0xd2fe54
leaq (%rsp,%rbx), %rsi
addq $0xf00, %rsi # imm = 0xF00
vmovaps -0x40(%rsi), %xmm0
leaq 0x100(%r13,%rbx), %rdi
vmovaps %xmm0, -0x40(%rdi)
vmovaps -0x30(%rsi), %xmm0
vmovaps %xmm0, -0x30(%rdi)
callq 0xd27910
addq $0x340, %rbx # imm = 0x340
cmpq $0xd00, %rbx # imm = 0xD00
jne 0x14ddc3f
vmovaps 0x40(%rsp), %xmm0
vaddss %xmm0, %xmm0, %xmm0
vaddss 0xa12d1d(%rip), %xmm0, %xmm0 # 0x1ef09cc
vmovaps 0x60(%rsp), %xmm1
vaddss %xmm1, %xmm1, %xmm1
jmp 0x14ddd40
vmovaps %xmm2, 0x60(%rsp)
xorl %ebx, %ebx
leaq (%rsp,%rbx), %rsi
addq $0x2840, %rsi # imm = 0x2840
vmovaps -0x40(%rsi), %xmm0
leaq (%rbx,%r13), %rdi
addq $0x40, %rdi
vmovaps %xmm0, -0x40(%rdi)
callq 0xd2fe54
leaq (%rsp,%rbx), %rsi
addq $0x2900, %rsi # imm = 0x2900
vmovaps -0x40(%rsi), %xmm0
leaq 0x100(%r13,%rbx), %rdi
vmovaps %xmm0, -0x40(%rdi)
vmovaps -0x30(%rsi), %xmm0
vmovaps %xmm0, -0x30(%rdi)
callq 0xd27910
addq $0x340, %rbx # imm = 0x340
cmpq $0xd00, %rbx # imm = 0xD00
jne 0x14ddcc6
vmovaps 0x40(%rsp), %xmm0
vaddss %xmm0, %xmm0, %xmm0
vmovaps 0x60(%rsp), %xmm1
vaddss %xmm1, %xmm1, %xmm1
vaddss 0xa12c8c(%rip), %xmm1, %xmm1 # 0x1ef09cc
vinsertps $0x10, %xmm1, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm1[0],xmm0[2,3]
movq 0x58(%rsp), %rax
vmovlps %xmm0, (%rax)
vmovaps 0x90(%rsp), %xmm0
vaddss %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x90(%rsp)
incq %r15
movq %r15, 0x40(%rsp)
leaq 0x3200(%rsp), %r15
leaq 0x3500(%rsp), %r12
addq $-0xd00, %r12 # imm = 0xF300
movq $-0xd00, %r14 # imm = 0xF300
movq %r15, %rbx
leaq 0xc0(%rbx), %rax
movq 0x2c0(%rbx), %rdi
cmpq %rdi, %rax
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14dddb1
callq 0x1ee612d
movq 0x40(%rbx), %rdi
cmpq %rdi, %rbx
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14dddca
callq 0x6a370
addq $-0x340, %rbx # imm = 0xFCC0
addq $0x340, %r14 # imm = 0x340
jne 0x14ddd8e
addq $-0xd00, %r15 # imm = 0xF300
leaq 0x100(%rsp), %rax
cmpq %rax, %r12
jne 0x14ddd7d
movb $0x1, %al
movq 0x40(%rsp), %r14
movabsq $0x1fffffffc, %rbx # imm = 0x1FFFFFFFC
movl $0xf, %r12d
testb %al, %al
jne 0x14dd37f
jmp 0x14defdd
movl $0x10, %eax
jmp 0x14dd516
movl $0x10, %ecx
jmp 0x14dd6ba
movl $0x10, %edx
jmp 0x14dd863
movl $0x10, %r9d
jmp 0x14dda12
testb $0x1, %cl
jne 0x14dee5f
leaq 0x100(%rsp), %rdi
movq %r13, %rsi
movq 0xd0(%rsp), %rdx
movq 0xd8(%rsp), %rcx
movq 0x10(%rbp), %r8
movq 0x18(%rbp), %r9
vzeroupper
callq 0xd2e6c8
movq 0x58(%rsp), %rax
vmovss (%rax), %xmm1
vmovss 0x4(%rax), %xmm0
movq 0x78(%rsp), %rax
movq (%rax), %r8
movq 0x8(%rax), %rdi
movq 0x10(%rax), %rsi
movq 0x18(%rax), %rdx
movq 0x20(%rax), %rcx
movq 0x28(%rax), %rax
testq %r8, %r8
je 0x14de140
vmovss 0xa0e86a(%rip), %xmm2 # 0x1eec714
vucomiss %xmm2, %xmm0
jne 0x14ddeb6
jnp 0x14defa6
vucomiss 0xa0db66(%rip), %xmm1 # 0x1eeba24
jne 0x14ddec6
jnp 0x14defa6
vmovss 0xa0e846(%rip), %xmm2 # 0x1eec714
vucomiss %xmm2, %xmm1
jne 0x14ddeda
jnp 0x14defa6
vucomiss 0xa0db42(%rip), %xmm0 # 0x1eeba24
jne 0x14ddeea
jnp 0x14defa6
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm4, %xmm2
vshufps $0x0, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm3, %xmm5
vaddps %xmm5, %xmm2, %xmm2
vaddss %xmm0, %xmm1, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vmovss 0xa130da(%rip), %xmm9 # 0x1ef0ff8
vsubss %xmm5, %xmm9, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm2, %xmm2
vmovaps %xmm2, 0x40(%rsp)
vmovss 0xa0e7d7(%rip), %xmm2 # 0x1eec714
vsubss %xmm1, %xmm2, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,0,0,0]
vmulps 0x210(%rsp), %xmm6, %xmm7
vmulps 0x160(%rsp), %xmm3, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vaddss %xmm0, %xmm5, %xmm5
vrcpss %xmm5, %xmm5, %xmm7
vmulss %xmm7, %xmm5, %xmm5
vsubss %xmm5, %xmm9, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm3, %xmm3
vmulps 0x1a0(%rsp), %xmm6, %xmm5
vsubss %xmm0, %xmm2, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps 0x230(%rsp), %xmm6, %xmm7
vaddps %xmm7, %xmm5, %xmm5
vsubss %xmm1, %xmm9, %xmm7
vsubss %xmm0, %xmm7, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm9, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps 0x220(%rsp), %xmm4, %xmm8
vmulps %xmm7, %xmm5, %xmm4
vmulps 0x190(%rsp), %xmm6, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vaddss %xmm2, %xmm1, %xmm6
vsubss %xmm0, %xmm6, %xmm6
vrcpss %xmm6, %xmm6, %xmm7
vmulss %xmm7, %xmm6, %xmm6
vsubss %xmm6, %xmm9, %xmm6
vmulss %xmm6, %xmm7, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm5, %xmm5
vsubss %xmm1, %xmm2, %xmm6
vmulss %xmm6, %xmm6, %xmm7
vmulss %xmm7, %xmm6, %xmm8
vmovss 0xa12fe8(%rip), %xmm15 # 0x1ef0fec
vmulss %xmm1, %xmm15, %xmm9
vmulss %xmm7, %xmm9, %xmm10
vmulss %xmm1, %xmm1, %xmm7
vmulss %xmm7, %xmm15, %xmm9
vmulss %xmm6, %xmm9, %xmm12
vmulss %xmm7, %xmm1, %xmm11
vsubss %xmm0, %xmm2, %xmm7
vmulss %xmm7, %xmm7, %xmm6
vmulss %xmm6, %xmm7, %xmm13
vmulss %xmm0, %xmm15, %xmm9
vmulss %xmm6, %xmm9, %xmm14
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm15, %xmm9
vmulss %xmm7, %xmm9, %xmm9
vshufps $0x0, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[0,0,0,0]
vshufps $0x0, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[0,0,0,0]
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps 0x130(%rsp), %xmm11, %xmm10
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps 0x120(%rsp), %xmm12, %xmm15
vaddps %xmm15, %xmm10, %xmm10
vmulps 0x110(%rsp), %xmm8, %xmm15
vmulps 0x100(%rsp), %xmm7, %xmm2
vaddps %xmm10, %xmm15, %xmm10
vaddps %xmm2, %xmm10, %xmm2
vshufps $0x0, %xmm13, %xmm13, %xmm10 # xmm10 = xmm13[0,0,0,0]
vmulps %xmm2, %xmm10, %xmm10
vmulps 0x170(%rsp), %xmm11, %xmm2
vmulps %xmm3, %xmm12, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps 0x40(%rsp), %xmm8, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps 0x140(%rsp), %xmm7, %xmm3
vshufps $0x0, %xmm14, %xmm14, %xmm13 # xmm13 = xmm14[0,0,0,0]
vaddps %xmm2, %xmm3, %xmm2
vmulps 0x1b0(%rsp), %xmm11, %xmm3
vmulps %xmm2, %xmm13, %xmm2
vmulps %xmm4, %xmm12, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vmulps %xmm5, %xmm8, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vmulps 0x180(%rsp), %xmm7, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vshufps $0x0, %xmm9, %xmm9, %xmm4 # xmm4 = xmm9[0,0,0,0]
vmulps %xmm3, %xmm4, %xmm3
vmulps 0x1f0(%rsp), %xmm11, %xmm4
vmulps 0x1e0(%rsp), %xmm12, %xmm5
vmulps 0x1d0(%rsp), %xmm8, %xmm8
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm4, %xmm8, %xmm4
vmulps 0x1c0(%rsp), %xmm7, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vmulss %xmm6, %xmm0, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm10, %xmm2
vmovaps %xmm2, (%r8)
testq %rdi, %rdi
je 0x14de6a8
vmovss 0xa0e5c3(%rip), %xmm9 # 0x1eec714
vcmpeqps %xmm1, %xmm9, %xmm2
vxorpd %xmm4, %xmm4, %xmm4
vcmpeqps %xmm4, %xmm1, %xmm3
vorps %xmm2, %xmm3, %xmm2
vucomiss %xmm4, %xmm0
vmovd %xmm2, %r9d
setnp %r8b
sete %r10b
andb %r8b, %r10b
vucomiss %xmm9, %xmm0
setnp %r11b
sete %r8b
andb %r11b, %r8b
orb %r10b, %r8b
orb %r9b, %r8b
testb $0x1, %r8b
jne 0x14deebb
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm2, %xmm3
vshufps $0x0, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm4, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vaddss %xmm0, %xmm1, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vmovss 0xa12e2d(%rip), %xmm7 # 0x1ef0ff8
vsubss %xmm5, %xmm7, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm3, %xmm12
vsubss %xmm1, %xmm9, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[0,0,0,0]
vmulps 0x210(%rsp), %xmm5, %xmm6
vmulps 0x160(%rsp), %xmm4, %xmm4
vaddps %xmm6, %xmm4, %xmm4
vaddss %xmm0, %xmm3, %xmm3
vrcpss %xmm3, %xmm3, %xmm6
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm3, %xmm7, %xmm3
vmulss %xmm3, %xmm6, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm4, %xmm11
vmulps 0x1a0(%rsp), %xmm5, %xmm3
vsubss %xmm0, %xmm9, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps 0x230(%rsp), %xmm4, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vsubss %xmm1, %xmm7, %xmm5
vsubss %xmm0, %xmm5, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm7, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps %xmm5, %xmm3, %xmm3
vmovaps %xmm3, 0x40(%rsp)
vmulps 0x190(%rsp), %xmm4, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vaddss %xmm1, %xmm9, %xmm3
vsubss %xmm0, %xmm3, %xmm3
vrcpss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm3
vsubss %xmm3, %xmm7, %xmm3
vmulss %xmm3, %xmm4, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm2, %xmm2
vmovaps %xmm2, 0x80(%rsp)
vsubss %xmm1, %xmm9, %xmm5
vmulss %xmm5, %xmm5, %xmm6
vmulss %xmm5, %xmm1, %xmm2
vaddss %xmm2, %xmm2, %xmm2
vsubss %xmm2, %xmm6, %xmm3
vmulss %xmm1, %xmm1, %xmm10
vsubss %xmm10, %xmm2, %xmm2
vmulss 0xa12d2d(%rip), %xmm6, %xmm4 # 0x1ef0ff0
vmovss 0xa12d21(%rip), %xmm7 # 0x1ef0fec
vmulss %xmm7, %xmm3, %xmm14
vmulss %xmm7, %xmm2, %xmm8
vmovss %xmm10, 0x60(%rsp)
vmulss %xmm7, %xmm10, %xmm10
vsubss %xmm0, %xmm9, %xmm3
vmulss %xmm3, %xmm3, %xmm2
vshufps $0x0, %xmm8, %xmm8, %xmm13 # xmm13 = xmm8[0,0,0,0]
vmovaps %xmm10, 0xc0(%rsp)
vshufps $0x0, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[0,0,0,0]
vmulps 0x130(%rsp), %xmm8, %xmm10
vmulps 0x120(%rsp), %xmm13, %xmm9
vshufps $0x0, %xmm14, %xmm14, %xmm15 # xmm15 = xmm14[0,0,0,0]
vaddps %xmm9, %xmm10, %xmm9
vmulps 0x110(%rsp), %xmm15, %xmm10
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vaddps %xmm9, %xmm10, %xmm9
vmulps 0x100(%rsp), %xmm4, %xmm10
vaddps %xmm9, %xmm10, %xmm9
vmulss %xmm2, %xmm3, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm9, %xmm10, %xmm14
vmulps 0x170(%rsp), %xmm8, %xmm9
vmulps %xmm11, %xmm13, %xmm10
vaddps %xmm9, %xmm10, %xmm9
vmulss %xmm7, %xmm0, %xmm10
vmulss %xmm2, %xmm10, %xmm11
vmulps %xmm12, %xmm15, %xmm10
vaddps %xmm9, %xmm10, %xmm9
vmulps 0x140(%rsp), %xmm4, %xmm10
vaddps %xmm9, %xmm10, %xmm9
vmulss %xmm0, %xmm0, %xmm10
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm9, %xmm11, %xmm9
vmulps 0x1b0(%rsp), %xmm8, %xmm11
vmulps 0x40(%rsp), %xmm13, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmulss %xmm7, %xmm10, %xmm7
vmulps 0x80(%rsp), %xmm15, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmulps 0x180(%rsp), %xmm4, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmovaps %xmm7, 0x40(%rsp)
vmulss %xmm7, %xmm3, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps 0x1f0(%rsp), %xmm8, %xmm8
vmulps %xmm11, %xmm12, %xmm11
vmulps 0x1e0(%rsp), %xmm13, %xmm12
vaddps %xmm12, %xmm8, %xmm8
vmulps 0x1d0(%rsp), %xmm15, %xmm12
vmulps 0x1c0(%rsp), %xmm4, %xmm4
vaddps %xmm8, %xmm12, %xmm8
vaddps %xmm4, %xmm8, %xmm4
vmulss %xmm0, %xmm10, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm4
vaddps %xmm4, %xmm11, %xmm4
vaddps %xmm4, %xmm9, %xmm4
vaddps %xmm4, %xmm14, %xmm4
vpermilps $0x0, 0x90(%rsp), %xmm7 # xmm7 = mem[0,0,0,0]
vmovaps %xmm7, 0x80(%rsp)
vmulps %xmm4, %xmm7, %xmm4
vmovaps %xmm4, (%rdi)
testb $0x1, %r8b
jne 0x14deef3
vshufps $0x0, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm4, %xmm8
vshufps $0x0, %xmm0, %xmm0, %xmm11 # xmm11 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm11, %xmm12
vaddps %xmm12, %xmm8, %xmm8
vaddss %xmm0, %xmm1, %xmm12
vrcpss %xmm12, %xmm12, %xmm13
vmulss %xmm13, %xmm12, %xmm12
vmovss 0xa12b7d(%rip), %xmm7 # 0x1ef0ff8
vsubss %xmm12, %xmm7, %xmm12
vmulss %xmm12, %xmm13, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm13 # xmm13 = xmm5[0,0,0,0]
vmulps 0x210(%rsp), %xmm13, %xmm14
vmulps 0x160(%rsp), %xmm11, %xmm15
vmulps %xmm12, %xmm8, %xmm11
vaddps %xmm14, %xmm15, %xmm8
vaddss %xmm0, %xmm5, %xmm12
vrcpss %xmm12, %xmm12, %xmm14
vmulss %xmm14, %xmm12, %xmm12
vsubss %xmm12, %xmm7, %xmm12
vmulss %xmm12, %xmm14, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm12, %xmm8, %xmm12
vmulps 0x1a0(%rsp), %xmm13, %xmm8
vshufps $0x0, %xmm3, %xmm3, %xmm14 # xmm14 = xmm3[0,0,0,0]
vmulps 0x230(%rsp), %xmm14, %xmm13
vaddps %xmm13, %xmm8, %xmm8
vsubss %xmm1, %xmm7, %xmm13
vsubss %xmm0, %xmm13, %xmm13
vrcpss %xmm13, %xmm13, %xmm15
vmulss %xmm15, %xmm13, %xmm13
vsubss %xmm13, %xmm7, %xmm13
vmulss %xmm13, %xmm15, %xmm13
vshufps $0x0, %xmm13, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
vmulps 0x220(%rsp), %xmm4, %xmm4
vmulps %xmm13, %xmm8, %xmm13
vmulps 0x190(%rsp), %xmm14, %xmm8
vaddps %xmm4, %xmm8, %xmm4
vmovss 0xa0e1e4(%rip), %xmm8 # 0x1eec714
vaddss %xmm1, %xmm8, %xmm8
vsubss %xmm0, %xmm8, %xmm8
vrcpss %xmm8, %xmm8, %xmm14
vmulss %xmm14, %xmm8, %xmm8
vsubss %xmm8, %xmm7, %xmm8
vmulss %xmm8, %xmm14, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm14
vmulss %xmm6, %xmm5, %xmm4
vmovss 0xa12a8a(%rip), %xmm9 # 0x1ef0fec
vmulss %xmm1, %xmm9, %xmm8
vmulss %xmm6, %xmm8, %xmm6
vmulss 0xc0(%rsp), %xmm5, %xmm8
vmulss 0x60(%rsp), %xmm1, %xmm15
vshufps $0x0, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[0,0,0,0]
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[0,0,0,0]
vshufps $0x0, %xmm15, %xmm15, %xmm4 # xmm4 = xmm15[0,0,0,0]
vmulps 0x130(%rsp), %xmm4, %xmm8
vmulps 0x120(%rsp), %xmm7, %xmm15
vaddps %xmm15, %xmm8, %xmm8
vmulps 0x110(%rsp), %xmm6, %xmm15
vaddps %xmm8, %xmm15, %xmm8
vmulps 0x100(%rsp), %xmm5, %xmm15
vaddps %xmm8, %xmm15, %xmm8
vmulss %xmm3, %xmm0, %xmm3
vaddss %xmm3, %xmm3, %xmm3
vsubss %xmm10, %xmm3, %xmm10
vsubss %xmm3, %xmm2, %xmm3
vmulss 0xa12a15(%rip), %xmm2, %xmm2 # 0x1ef0ff0
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm8, %xmm2
vmulps 0x170(%rsp), %xmm4, %xmm8
vmulps %xmm7, %xmm12, %xmm12
vaddps %xmm8, %xmm12, %xmm8
vmulps %xmm6, %xmm11, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulps 0x140(%rsp), %xmm5, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulss %xmm3, %xmm9, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm8, %xmm3
vmulps 0x1b0(%rsp), %xmm4, %xmm8
vmulps %xmm7, %xmm13, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulps %xmm6, %xmm14, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulps 0x180(%rsp), %xmm5, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulss %xmm9, %xmm10, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm8, %xmm10, %xmm8
vmulps 0x1f0(%rsp), %xmm4, %xmm4
vmulps 0x1e0(%rsp), %xmm7, %xmm7
vmulps 0x1d0(%rsp), %xmm6, %xmm6
vaddps %xmm7, %xmm4, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmulps 0x1c0(%rsp), %xmm5, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vpermilps $0x0, 0x40(%rsp), %xmm5 # xmm5 = mem[0,0,0,0]
vmulps %xmm4, %xmm5, %xmm4
vaddps %xmm4, %xmm8, %xmm4
vaddps %xmm4, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x80(%rsp), %xmm2, %xmm2
vmovaps %xmm2, (%rsi)
testq %rdx, %rdx
je 0x14deeb4
vmovss 0xa0e05b(%rip), %xmm12 # 0x1eec714
vcmpeqps %xmm1, %xmm12, %xmm2
vxorpd %xmm4, %xmm4, %xmm4
vcmpeqps %xmm4, %xmm1, %xmm3
vorps %xmm2, %xmm3, %xmm2
vucomiss %xmm4, %xmm0
vmovd %xmm2, %edi
setnp %sil
sete %r8b
andb %sil, %r8b
vucomiss %xmm12, %xmm0
setnp %r9b
sete %sil
andb %r9b, %sil
orb %r8b, %sil
orb %dil, %sil
testb $0x1, %sil
jne 0x14def1c
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm2, %xmm3
vshufps $0x0, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm4, %xmm5
vaddps %xmm5, %xmm3, %xmm3
vaddss %xmm0, %xmm1, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vmovss 0xa128c6(%rip), %xmm9 # 0x1ef0ff8
vsubss %xmm5, %xmm9, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm3, %xmm3
vsubss %xmm1, %xmm12, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[0,0,0,0]
vmulps 0x210(%rsp), %xmm7, %xmm6
vmulps 0x160(%rsp), %xmm4, %xmm4
vaddps %xmm6, %xmm4, %xmm4
vaddss %xmm0, %xmm5, %xmm5
vrcpss %xmm5, %xmm5, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vsubss %xmm5, %xmm9, %xmm5
vmulss %xmm5, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm4, %xmm6
vmulps 0x1a0(%rsp), %xmm7, %xmm4
vsubss %xmm0, %xmm12, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps 0x230(%rsp), %xmm5, %xmm7
vaddps %xmm7, %xmm4, %xmm4
vsubss %xmm1, %xmm9, %xmm7
vsubss %xmm0, %xmm7, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm9, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps %xmm7, %xmm4, %xmm4
vmovaps %xmm4, 0x60(%rsp)
vmulps 0x190(%rsp), %xmm5, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vaddss %xmm1, %xmm12, %xmm4
vsubss %xmm0, %xmm4, %xmm4
vrcpss %xmm4, %xmm4, %xmm5
vmulss %xmm5, %xmm4, %xmm4
vsubss %xmm4, %xmm9, %xmm4
vmulss %xmm4, %xmm5, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm2, %xmm9
vsubss %xmm1, %xmm12, %xmm4
vaddss %xmm4, %xmm4, %xmm2
vsubss %xmm2, %xmm1, %xmm2
vaddss %xmm1, %xmm1, %xmm5
vsubss %xmm5, %xmm4, %xmm5
vmovss 0xa127dc(%rip), %xmm11 # 0x1ef0ff4
vmulss %xmm4, %xmm11, %xmm7
vmulss %xmm2, %xmm11, %xmm2
vmulss %xmm5, %xmm11, %xmm10
vmulss %xmm1, %xmm11, %xmm11
vsubss %xmm0, %xmm12, %xmm5
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm5, %xmm8, %xmm13
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vshufps $0x0, %xmm11, %xmm11, %xmm12 # xmm12 = xmm11[0,0,0,0]
vmulps 0x130(%rsp), %xmm12, %xmm14
vmulps 0x120(%rsp), %xmm10, %xmm15
vshufps $0x0, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[0,0,0,0]
vaddps %xmm15, %xmm14, %xmm2
vmulps 0x110(%rsp), %xmm11, %xmm14
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vaddps %xmm2, %xmm14, %xmm2
vmulps 0x100(%rsp), %xmm7, %xmm14
vshufps $0x0, %xmm13, %xmm13, %xmm13 # xmm13 = xmm13[0,0,0,0]
vaddps %xmm2, %xmm14, %xmm2
vmulps 0x170(%rsp), %xmm12, %xmm14
vmulps %xmm2, %xmm13, %xmm2
vmulps %xmm6, %xmm10, %xmm6
vaddps %xmm6, %xmm14, %xmm6
vmovss 0xa1274e(%rip), %xmm14 # 0x1ef0fec
vmulss %xmm0, %xmm14, %xmm13
vmulps %xmm3, %xmm11, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vmulps 0x140(%rsp), %xmm7, %xmm6
vmovss %xmm8, 0x40(%rsp)
vmulss %xmm8, %xmm13, %xmm13
vaddps %xmm3, %xmm6, %xmm3
vmulss %xmm0, %xmm0, %xmm15
vshufps $0x0, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[0,0,0,0]
vmulps %xmm3, %xmm6, %xmm3
vmulps 0x1b0(%rsp), %xmm12, %xmm6
vmulps 0x60(%rsp), %xmm10, %xmm8
vaddps %xmm6, %xmm8, %xmm6
vmulss %xmm14, %xmm15, %xmm13
vmulps %xmm9, %xmm11, %xmm8
vaddps %xmm6, %xmm8, %xmm6
vmulps 0x180(%rsp), %xmm7, %xmm8
vaddps %xmm6, %xmm8, %xmm6
vmovaps %xmm13, 0x60(%rsp)
vmulss %xmm5, %xmm13, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm6, %xmm8, %xmm6
vmulps 0x1f0(%rsp), %xmm12, %xmm8
vmulps 0x1e0(%rsp), %xmm10, %xmm9
vmulps 0x1d0(%rsp), %xmm11, %xmm10
vaddps %xmm9, %xmm8, %xmm8
vaddps %xmm8, %xmm10, %xmm8
vmulps 0x1c0(%rsp), %xmm7, %xmm7
vaddps %xmm7, %xmm8, %xmm7
vmovss %xmm15, 0x80(%rsp)
vmulss %xmm0, %xmm15, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm7, %xmm8, %xmm7
vaddps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm2, %xmm2
vmovaps 0x90(%rsp), %xmm3
vmulss %xmm3, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm2, %xmm3, %xmm2
vmovaps %xmm2, (%rdx)
testb $0x1, %sil
vmovaps %xmm3, 0xc0(%rsp)
jne 0x14def4b
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm2, %xmm3
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm6, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vaddss %xmm0, %xmm1, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vmovss 0xa12630(%rip), %xmm10 # 0x1ef0ff8
vsubss %xmm7, %xmm10, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[0,0,0,0]
vmulps 0x210(%rsp), %xmm8, %xmm9
vmulps 0x160(%rsp), %xmm6, %xmm6
vmulps %xmm7, %xmm3, %xmm11
vaddps %xmm6, %xmm9, %xmm3
vaddss %xmm0, %xmm4, %xmm6
vrcpss %xmm6, %xmm6, %xmm7
vmulss %xmm7, %xmm6, %xmm6
vsubss %xmm6, %xmm10, %xmm6
vmulss %xmm6, %xmm7, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm3, %xmm12
vmulps 0x1a0(%rsp), %xmm8, %xmm3
vshufps $0x0, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,0,0,0]
vmulps 0x230(%rsp), %xmm6, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vsubss %xmm1, %xmm10, %xmm7
vsubss %xmm0, %xmm7, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm10, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps %xmm7, %xmm3, %xmm3
vmovaps %xmm3, 0xe0(%rsp)
vmulps 0x190(%rsp), %xmm6, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmovss 0xa0dca0(%rip), %xmm3 # 0x1eec714
vaddss %xmm3, %xmm1, %xmm3
vsubss %xmm0, %xmm3, %xmm3
vrcpss %xmm3, %xmm3, %xmm6
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm3, %xmm10, %xmm3
vmulss %xmm3, %xmm6, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm2, %xmm14
vmulss %xmm4, %xmm4, %xmm9
vmulss %xmm4, %xmm9, %xmm2
vmovss 0xa12547(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm1, %xmm3
vmulss %xmm3, %xmm9, %xmm3
vmulss %xmm1, %xmm1, %xmm10
vmulss %xmm6, %xmm10, %xmm6
vmovaps %xmm6, 0xf0(%rsp)
vmulss %xmm6, %xmm4, %xmm6
vmulss %xmm1, %xmm10, %xmm7
vshufps $0x0, %xmm6, %xmm6, %xmm15 # xmm15 = xmm6[0,0,0,0]
vshufps $0x0, %xmm7, %xmm7, %xmm6 # xmm6 = xmm7[0,0,0,0]
vmulps 0x130(%rsp), %xmm6, %xmm7
vmulps 0x120(%rsp), %xmm15, %xmm8
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vaddps %xmm7, %xmm8, %xmm7
vmulps 0x110(%rsp), %xmm3, %xmm8
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vaddps %xmm7, %xmm8, %xmm7
vmulps 0x100(%rsp), %xmm2, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmovss 0xa124e2(%rip), %xmm13 # 0x1ef0ff4
vmulss %xmm5, %xmm13, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm7, %xmm8, %xmm7
vmulps 0x170(%rsp), %xmm6, %xmm8
vmulps %xmm12, %xmm15, %xmm12
vaddps %xmm8, %xmm12, %xmm8
vmulps %xmm3, %xmm11, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulps 0x140(%rsp), %xmm2, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vaddss %xmm5, %xmm5, %xmm11
vsubss %xmm11, %xmm0, %xmm11
vmulss %xmm13, %xmm11, %xmm11
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vmulps %xmm8, %xmm11, %xmm8
vmulps 0x1b0(%rsp), %xmm6, %xmm11
vmulps 0xe0(%rsp), %xmm15, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmulps %xmm3, %xmm14, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vmulps 0x180(%rsp), %xmm2, %xmm12
vaddps %xmm11, %xmm12, %xmm11
vaddss %xmm0, %xmm0, %xmm12
vsubss %xmm12, %xmm5, %xmm12
vmulss %xmm13, %xmm12, %xmm12
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps %xmm11, %xmm12, %xmm11
vmulps 0x1f0(%rsp), %xmm6, %xmm6
vmulps 0x1e0(%rsp), %xmm15, %xmm12
vaddps %xmm6, %xmm12, %xmm6
vmulps 0x1d0(%rsp), %xmm3, %xmm3
vaddps %xmm6, %xmm3, %xmm3
vmulps 0x1c0(%rsp), %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmulss %xmm0, %xmm13, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm11, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vaddps %xmm2, %xmm7, %xmm2
vmovaps 0xc0(%rsp), %xmm15
vmulps %xmm2, %xmm15, %xmm2
vmovaps %xmm2, (%rcx)
testb $0x1, %sil
jne 0x14def7d
vshufps $0x0, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,0,0,0]
vmulps 0x150(%rsp), %xmm2, %xmm3
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
vmulps 0x200(%rsp), %xmm6, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vaddss %xmm0, %xmm1, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vmovss 0xa123b6(%rip), %xmm14 # 0x1ef0ff8
vsubss %xmm7, %xmm14, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[0,0,0,0]
vmulps 0x210(%rsp), %xmm8, %xmm12
vmulps 0x160(%rsp), %xmm6, %xmm6
vmulps %xmm7, %xmm3, %xmm11
vaddps %xmm6, %xmm12, %xmm3
vaddss %xmm0, %xmm4, %xmm6
vrcpss %xmm6, %xmm6, %xmm7
vmulss %xmm7, %xmm6, %xmm6
vsubss %xmm6, %xmm14, %xmm6
vmulss %xmm6, %xmm7, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm3, %xmm12
vmulps 0x1a0(%rsp), %xmm8, %xmm3
vshufps $0x0, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,0,0,0]
vmulps 0x230(%rsp), %xmm6, %xmm7
vaddps %xmm7, %xmm3, %xmm3
vsubss %xmm1, %xmm14, %xmm7
vsubss %xmm0, %xmm7, %xmm7
vrcpss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm7
vsubss %xmm7, %xmm14, %xmm7
vmulss %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps 0x220(%rsp), %xmm2, %xmm2
vmulps %xmm7, %xmm3, %xmm13
vmulps 0x190(%rsp), %xmm6, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmovss 0xa0da2f(%rip), %xmm3 # 0x1eec714
vaddss %xmm3, %xmm1, %xmm3
vsubss %xmm0, %xmm3, %xmm3
vrcpss %xmm3, %xmm3, %xmm6
vmulss %xmm6, %xmm3, %xmm3
vsubss %xmm3, %xmm14, %xmm3
vmulss %xmm3, %xmm6, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm2, %xmm14
vmulss %xmm4, %xmm1, %xmm1
vaddss %xmm1, %xmm1, %xmm1
vsubss %xmm1, %xmm9, %xmm2
vsubss %xmm10, %xmm1, %xmm1
vmovss 0xa122d1(%rip), %xmm8 # 0x1ef0ff0
vmulss %xmm8, %xmm9, %xmm3
vmovss 0xa122c0(%rip), %xmm9 # 0x1ef0fec
vmulss %xmm2, %xmm9, %xmm2
vmulss %xmm1, %xmm9, %xmm1
vmulss %xmm5, %xmm0, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm0 # xmm0 = xmm3[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vpermilps $0x0, 0xf0(%rsp), %xmm3 # xmm3 = mem[0,0,0,0]
vmulps 0x130(%rsp), %xmm3, %xmm5
vmulps 0x120(%rsp), %xmm1, %xmm6
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vaddps %xmm6, %xmm5, %xmm5
vmulps 0x110(%rsp), %xmm2, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vmulps 0x100(%rsp), %xmm0, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vaddss %xmm4, %xmm4, %xmm4
vsubss 0x80(%rsp), %xmm4, %xmm6
vmovss 0x40(%rsp), %xmm10
vsubss %xmm4, %xmm10, %xmm7
vmulss %xmm8, %xmm10, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm5, %xmm4, %xmm4
vmulps 0x170(%rsp), %xmm3, %xmm5
vmulps %xmm1, %xmm12, %xmm8
vaddps %xmm5, %xmm8, %xmm5
vmulps %xmm2, %xmm11, %xmm8
vaddps %xmm5, %xmm8, %xmm5
vmulps 0x140(%rsp), %xmm0, %xmm8
vaddps %xmm5, %xmm8, %xmm5
vmulss %xmm7, %xmm9, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm5, %xmm7, %xmm5
vmulps 0x1b0(%rsp), %xmm3, %xmm7
vmulps %xmm1, %xmm13, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps %xmm2, %xmm14, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps 0x180(%rsp), %xmm0, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulss %xmm6, %xmm9, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm7, %xmm6, %xmm6
vmulps 0x1f0(%rsp), %xmm3, %xmm3
vmulps 0x1e0(%rsp), %xmm1, %xmm1
vmulps 0x1d0(%rsp), %xmm2, %xmm2
vaddps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm2, %xmm1
vmulps 0x1c0(%rsp), %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vpermilps $0x0, 0x60(%rsp), %xmm1 # xmm1 = mem[0,0,0,0]
vmulps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm6, %xmm0
vaddps %xmm0, %xmm5, %xmm0
vaddps %xmm0, %xmm4, %xmm0
vmulps %xmm0, %xmm15, %xmm0
vmovaps %xmm0, (%rax)
jmp 0x14deeb4
leaq 0x100(%rsp), %r15
movq %r15, %rdi
movq %r13, %rsi
vzeroupper
callq 0xd2e248
movq 0x58(%rsp), %rax
vmovss (%rax), %xmm0
vmovss 0x4(%rax), %xmm1
movq 0x78(%rsp), %rax
movq (%rax), %rsi
movq 0x8(%rax), %rdx
movq 0x10(%rax), %rcx
movq 0x18(%rax), %r8
movq 0x20(%rax), %r9
movq 0x28(%rax), %rax
movq %rax, (%rsp)
movq %r15, %rdi
vmovapd 0x90(%rsp), %xmm2
callq 0xd2fed2
xorl %eax, %eax
jmp 0x14dde05
vmovaps 0x150(%rsp), %xmm12
vmovaps 0x160(%rsp), %xmm11
vmovaps 0x190(%rsp), %xmm2
vmovaps %xmm2, 0x80(%rsp)
vmovaps 0x1a0(%rsp), %xmm2
vmovaps %xmm2, 0x40(%rsp)
jmp 0x14de29e
vmovaps 0x150(%rsp), %xmm11
vmovaps 0x160(%rsp), %xmm12
vmovaps 0x190(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %xmm13
jmp 0x14de556
vmovaps 0x150(%rsp), %xmm3
vmovaps 0x160(%rsp), %xmm6
vmovaps 0x190(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm2
vmovaps %xmm2, 0x60(%rsp)
jmp 0x14de7fc
vmovaps 0x150(%rsp), %xmm11
vmovaps 0x160(%rsp), %xmm12
vmovaps 0x190(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %xmm2
vmovaps %xmm2, 0xe0(%rsp)
jmp 0x14dea95
vmovaps 0x150(%rsp), %xmm11
vmovaps 0x160(%rsp), %xmm12
vmovaps 0x190(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %xmm13
jmp 0x14ded06
vmovaps 0x150(%rsp), %xmm2
vmovaps %xmm2, 0x40(%rsp)
vmovaps 0x160(%rsp), %xmm3
vmovaps 0x190(%rsp), %xmm5
vmovaps 0x1a0(%rsp), %xmm4
vmovss 0xa0d73c(%rip), %xmm2 # 0x1eec714
jmp 0x14ddff0
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
jmp 0x14deff4
jmp 0x14deff4
jmp 0x14deff4
jmp 0x14deff4
movq %rax, %rbx
leaq 0x100(%rsp), %rdi
callq 0xd2e1b2
movq %rbx, %rdi
callq 0x6a600
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/common/../subdiv/feature_adaptive_eval.h
|
embree::avx::FeatureAdaptiveEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::FeatureAdaptiveEvalSimd(embree::HalfEdge const*, char const*, unsigned long, embree::vboolf_impl<4> const&, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float*, float*, float*, float*, float*, float*, unsigned long, unsigned long)
|
FeatureAdaptiveEvalSimd (const HalfEdge* edge, const char* vertices, size_t stride, const vbool& valid, const vfloat& u, const vfloat& v,
float* P, float* dPdu, float* dPdv, float* ddPdudu, float* ddPdvdv, float* ddPdudv, const size_t dstride, const size_t N)
: P(P), dPdu(dPdu), dPdv(dPdv), ddPdudu(ddPdudu), ddPdvdv(ddPdvdv), ddPdudv(ddPdudv), dstride(dstride), N(N)
{
switch (edge->patch_type) {
case HalfEdge::BILINEAR_PATCH: BilinearPatch(edge,vertices,stride).eval(valid,u,v,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,1.0f,dstride,N); break;
case HalfEdge::REGULAR_QUAD_PATCH: RegularPatchT(edge,vertices,stride).eval(valid,u,v,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,1.0f,dstride,N); break;
#if PATCH_USE_GREGORY == 2
case HalfEdge::IRREGULAR_QUAD_PATCH: GregoryPatchT<Vertex,Vertex_t>(edge,vertices,stride).eval(valid,u,v,P,dPdu,dPdv,ddPdudu,ddPdvdv,ddPdudv,1.0f,dstride,N); break;
#endif
default: {
GeneralCatmullClarkPatch patch(edge,vertices,stride);
eval_direct(valid,patch,Vec2<vfloat>(u,v),0);
break;
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x1d00, %rsp # imm = 0x1D00
movq 0x48(%rbp), %r10
movq 0x40(%rbp), %r12
movq 0x38(%rbp), %r11
movq 0x30(%rbp), %r14
movq 0x28(%rbp), %rbx
movq 0x20(%rbp), %r15
movq 0x18(%rbp), %r13
movq %r13, (%rdi)
movq %r15, 0x8(%rdi)
movq %rbx, 0x10(%rdi)
movq %r14, 0x18(%rdi)
movq %r11, 0x20(%rdi)
movq %r12, 0x28(%rdi)
movq %r10, 0x30(%rdi)
movq 0x50(%rbp), %rax
movq %rdi, 0x10(%rsp)
movq %rax, 0x38(%rdi)
movzbl 0x1c(%rsi), %eax
cmpl $0x1, %eax
movq %r9, 0x18(%rsp)
je 0x14e2086
testl %eax, %eax
jne 0x14e20cf
movl (%rsi), %eax
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm0
vmovaps %xmm0, 0x40(%rsp)
movslq 0x4(%rsi), %rax
shlq $0x5, %rax
leaq (%rsi,%rax), %rdi
movl (%rsi,%rax), %r9d
imulq %rcx, %r9
vmovups (%rdx,%r9), %xmm0
vmovaps %xmm0, 0x50(%rsp)
movslq 0x4(%rsi,%rax), %rax
shlq $0x5, %rax
leaq (%rdi,%rax), %rsi
movl (%rax,%rdi), %r9d
imulq %rcx, %r9
vmovups (%rdx,%r9), %xmm0
vmovaps %xmm0, 0x60(%rsp)
movslq 0x4(%rax,%rdi), %rax
movq 0x50(%rbp), %rdi
shlq $0x5, %rax
movl (%rax,%rsi), %eax
imulq %rcx, %rax
vmovups (%rdx,%rax), %xmm0
vmovaps %xmm0, 0x70(%rsp)
testq %r13, %r13
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
movq 0x18(%rsp), %rdx
movq 0x10(%rbp), %rsi
jne 0x14e1f7e
leaq (,%r10,4), %rax
xorl %ecx, %ecx
vbroadcastss 0x40(%rsp,%rcx,4), %xmm0
vmovss 0x50(%rsp,%rcx,4), %xmm1
vmovss 0x60(%rsp,%rcx,4), %xmm2
vsubss %xmm0, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps (%rdx), %xmm3
vmulps %xmm1, %xmm3, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vbroadcastss 0x70(%rsp,%rcx,4), %xmm1
vsubss %xmm1, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm2, %xmm1, %xmm1
vsubps %xmm0, %xmm1, %xmm1
vmulps (%rsi), %xmm1, %xmm1
vaddps %xmm1, %xmm0, %xmm0
vmovaps (%r8), %xmm1
vmaskmovps %xmm0, %xmm1, (%r13)
incq %rcx
addq %rax, %r13
cmpq %rcx, %rdi
jne 0x14e1f1c
testq %r15, %r15
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14e2012
leaq (,%r10,4), %rax
xorl %ecx, %ecx
vmovss 0x40(%rsp,%rcx,4), %xmm0
vmovss 0x50(%rsp,%rcx,4), %xmm1
vsubss %xmm0, %xmm1, %xmm2
vmovss 0x60(%rsp,%rcx,4), %xmm3
vmovss 0x70(%rsp,%rcx,4), %xmm4
vsubss %xmm4, %xmm3, %xmm5
vsubss %xmm2, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps (%rsi), %xmm5, %xmm5
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps (%r8), %xmm6
vsubss %xmm0, %xmm4, %xmm0
vaddps %xmm5, %xmm2, %xmm2
vmaskmovps %xmm2, %xmm6, (%r15)
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vsubss %xmm1, %xmm3, %xmm1
vsubss %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps (%rdx), %xmm0, %xmm0
vmovaps (%r8), %xmm1
vaddps %xmm0, %xmm2, %xmm0
vmaskmovps %xmm0, %xmm1, (%rbx)
incq %rcx
addq %rax, %rbx
addq %rax, %r15
cmpq %rcx, %rdi
jne 0x14e1f9c
testq %r14, %r14
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14e211b
shlq $0x2, %r10
xorl %eax, %eax
vxorps %xmm0, %xmm0, %xmm0
vmovaps (%r8), %xmm1
vmaskmovps %xmm0, %xmm1, (%r14)
vmovaps (%r8), %xmm1
vmaskmovps %xmm0, %xmm1, (%r11)
vmovss 0x50(%rsp,%rax,4), %xmm1
vmovss 0x60(%rsp,%rax,4), %xmm2
vsubss 0x70(%rsp,%rax,4), %xmm2, %xmm2
vsubss 0x40(%rsp,%rax,4), %xmm1, %xmm1
vsubss %xmm1, %xmm2, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps (%r8), %xmm2
vmaskmovps %xmm1, %xmm2, (%r12)
incq %rax
addq %r10, %r12
addq %r10, %r11
addq %r10, %r14
cmpq %rax, %rdi
jne 0x14e2030
jmp 0x14e211b
leaq 0x40(%rsp), %rdi
movq %r8, 0x10(%rsp)
callq 0xd27366
vmovss 0xa0a677(%rip), %xmm0 # 0x1eec714
leaq 0x40(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %rdx
movq 0x10(%rbp), %rcx
movq %r13, %r8
movq %r15, %r9
pushq 0x50(%rbp)
pushq 0x48(%rbp)
pushq %r12
pushq 0x38(%rbp)
pushq %r14
pushq %rbx
callq 0xd369c2
addq $0x30, %rsp
jmp 0x14e211b
leaq 0x40(%rsp), %r15
movq %r15, %rdi
movq %r8, %rbx
callq 0xd2676a
movq 0x18(%rsp), %rax
vmovaps (%rax), %xmm0
leaq 0x20(%rsp), %rcx
vmovaps %xmm0, (%rcx)
movq 0x10(%rbp), %rax
vmovaps (%rax), %xmm0
vmovaps %xmm0, 0x10(%rcx)
movq 0x10(%rsp), %rdi
movq %rbx, %rsi
movq %r15, %rdx
xorl %r8d, %r8d
callq 0x14e56ce
leaq 0x40(%rsp), %rdi
callq 0xd270fc
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
leaq 0x40(%rsp), %rdi
callq 0xd270fc
movq %rbx, %rdi
callq 0x6a600
nop
|
/embree[P]embree/kernels/common/../subdiv/feature_adaptive_eval_simd.h
|
embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::create<embree::avx::PatchEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEvalSimd(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, embree::vboolf_impl<4> const&, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float*, float*, float*, float*, float*, float*, unsigned long, unsigned long)::'lambda'()::operator()() const::'lambda'(unsigned long)>(embree::avx::PatchEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEvalSimd(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, embree::vboolf_impl<4> const&, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float*, float*, float*, float*, float*, float*, unsigned long, unsigned long)::'lambda'()::operator()() const::'lambda'(unsigned long) const&, embree::GeneralCatmullClarkPatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>&, embree::HalfEdge const*, char const*, unsigned long, unsigned long)
|
__noinline static Ref create(const Allocator& alloc, GeneralCatmullClarkPatch& patch, const HalfEdge* edge, const char* vertices, size_t stride, size_t depth)
{
/* convert into standard quad patch if possible */
if (likely(patch.isQuadPatch()))
{
CatmullClarkPatch qpatch; patch.init(qpatch);
return PatchT::create(alloc,qpatch,edge,vertices,stride,depth);
}
/* do only cache up to some depth */
if (depth >= PATCH_MAX_CACHE_DEPTH)
return nullptr;
/* subdivide patch */
unsigned N;
array_t<CatmullClarkPatch,GeneralCatmullClarkPatch::SIZE> patches;
patch.subdivide(patches,N);
if (N == 4)
{
Ref child[4];
#if PATCH_USE_GREGORY == 2
BezierCurve borders[GeneralCatmullClarkPatch::SIZE]; patch.getLimitBorder(borders);
BezierCurve border0l,border0r; borders[0].subdivide(border0l,border0r);
BezierCurve border1l,border1r; borders[1].subdivide(border1l,border1r);
BezierCurve border2l,border2r; borders[2].subdivide(border2l,border2r);
BezierCurve border3l,border3r; borders[3].subdivide(border3l,border3r);
GeneralCatmullClarkPatch::fix_quad_ring_order(patches);
child[0] = PatchT::create(alloc,patches[0],edge,vertices,stride,depth+1,&border0l,nullptr,nullptr,&border3r);
child[1] = PatchT::create(alloc,patches[1],edge,vertices,stride,depth+1,&border0r,&border1l,nullptr,nullptr);
child[2] = PatchT::create(alloc,patches[2],edge,vertices,stride,depth+1,nullptr,&border1r,&border2l,nullptr);
child[3] = PatchT::create(alloc,patches[3],edge,vertices,stride,depth+1,nullptr,nullptr,&border2r,&border3l);
#else
GeneralCatmullClarkPatch::fix_quad_ring_order(patches);
for (size_t i=0; i<4; i++)
child[i] = PatchT::create(alloc,patches[i],edge,vertices,stride,depth+1);
#endif
return SubdividedQuadPatch::create(alloc,child);
}
else
{
assert(N<MAX_PATCH_VALENCE);
Ref child[MAX_PATCH_VALENCE];
#if PATCH_USE_GREGORY == 2
BezierCurve borders[GeneralCatmullClarkPatch::SIZE];
patch.getLimitBorder(borders);
for (size_t i0=0; i0<N; i0++) {
const size_t i2 = i0==0 ? N-1 : i0-1;
BezierCurve border0l,border0r; borders[i0].subdivide(border0l,border0r);
BezierCurve border2l,border2r; borders[i2].subdivide(border2l,border2r);
child[i0] = PatchT::create(alloc,patches[i0],edge,vertices,stride,depth+1, &border0l, nullptr, nullptr, &border2r);
}
#else
for (size_t i=0; i<N; i++)
child[i] = PatchT::create(alloc,patches[i],edge,vertices,stride,depth+1);
#endif
return SubdividedGeneralPatch::create(alloc,child,N);
}
return nullptr;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xd100, %rsp # imm = 0xD100
movq %r9, %r14
movq %r8, 0x38(%rsp)
movq %rcx, 0x30(%rsp)
movq %rdx, %r13
movq %rdi, %rbx
cmpl $0x4, 0x1c40(%rsi)
jne 0x14e2417
movq 0x1c00(%rsi), %rax
cmpb $0x1, 0x358(%rax)
jne 0x14e2417
cmpb $0x1, 0x6d8(%rax)
jne 0x14e2417
cmpb $0x1, 0xa58(%rax)
jne 0x14e2417
cmpb $0x1, 0xdd8(%rax)
jne 0x14e2417
leaq 0x1c0(%rsp), %rax
leaq 0x100(%rsp), %rcx
xorl %edx, %edx
leaq (%rcx,%rdx), %rdi
movq %rdi, 0x140(%rsp,%rdx)
movq $0x0, 0x188(%rsp,%rdx)
leaq (%rax,%rdx), %rdi
movq %rdi, 0x3c0(%rsp,%rdx)
addq $0x340, %rdx # imm = 0x340
cmpq $0xd00, %rdx # imm = 0xD00
jne 0x14e2341
leaq 0xc0(%rsp), %rax
movq %rsi, %rdi
movq %rax, %rsi
callq 0xd27a18
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, (%rsp)
leaq 0xc0(%rsp), %rsi
movq %rbx, %rdi
movq %r13, %rdx
movq 0x30(%rsp), %rcx
movq 0x38(%rsp), %r8
movq %r14, %r9
vzeroupper
callq 0x14e273c
movq %rax, %rbx
movq $-0xd00, %r14 # imm = 0xF300
leaq 0xac0(%rsp), %r15
leaq 0xc0(%r15), %rax
movq 0x2c0(%r15), %rdi
cmpq %rdi, %rax
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14e23e9
callq 0x1ee612d
movq 0x40(%r15), %rdi
cmpq %rdi, %r15
sete %al
testq %rdi, %rdi
sete %cl
orb %al, %cl
jne 0x14e2402
callq 0x6a370
addq $-0x340, %r15 # imm = 0xFCC0
addq $0x340, %r14 # imm = 0x340
jne 0x14e23c6
jmp 0x14e259a
cmpq $0x1, %r14
jbe 0x14e2424
xorl %ebx, %ebx
jmp 0x14e259a
leaq 0x3c0(%rsp), %rax
xorl %ecx, %ecx
movl $0xd00, %edx # imm = 0xD00
movq %rax, %rdi
movl $0xd00, %r8d # imm = 0xD00
leaq -0x2c0(%rdi), %r9
movq %r9, -0x280(%rdi)
movq $0x0, -0x238(%rdi)
leaq -0x200(%rdi), %r9
movq %r9, (%rdi)
addq $0x340, %rdi # imm = 0x340
addq $-0x340, %r8 # imm = 0xFCC0
jne 0x14e243c
addq %rdx, %rcx
addq %rdx, %rax
cmpq $0xd000, %rcx # imm = 0xD000
jne 0x14e2433
leaq 0xc0(%rsp), %r15
leaq 0x2c(%rsp), %rdx
movq %rsi, %rdi
movq %r15, %rsi
callq 0xd28032
cmpl $0x4, 0x2c(%rsp)
jne 0x14e250b
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x40(%rsp)
leaq 0xc0(%rsp), %r15
movq %r15, %rdi
vzeroupper
callq 0xd2986c
incq %r14
xorl %r12d, %r12d
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, (%rsp)
movq %rbx, %rdi
movq %r15, %rsi
movq %r13, %rdx
movq 0x30(%rsp), %rcx
movq 0x38(%rsp), %r8
movq %r14, %r9
vzeroupper
callq 0x14e273c
movq %rax, 0x40(%rsp,%r12,8)
incq %r12
addq $0xd00, %r15 # imm = 0xD00
cmpq $0x4, %r12
jne 0x14e24c0
leaq 0x40(%rsp), %rsi
movq %rbx, %rdi
callq 0x14e3016
jmp 0x14e258a
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0xa0(%rsp)
vmovaps %ymm0, 0x80(%rsp)
vmovaps %ymm0, 0x60(%rsp)
vmovaps %ymm0, 0x40(%rsp)
movl 0x2c(%rsp), %edx
testl %edx, %edx
je 0x14e257a
incq %r14
xorl %r12d, %r12d
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, (%rsp)
movq %rbx, %rdi
movq %r15, %rsi
movq %r13, %rdx
movq 0x30(%rsp), %rcx
movq 0x38(%rsp), %r8
movq %r14, %r9
vzeroupper
callq 0x14e273c
movq %rax, 0x40(%rsp,%r12,8)
incq %r12
movl 0x2c(%rsp), %edx
addq $0xd00, %r15 # imm = 0xD00
cmpq %rdx, %r12
jb 0x14e253b
leaq 0x40(%rsp), %rsi
movq %rbx, %rdi
vzeroupper
callq 0x14e3048
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0xd29ece
movq %rbx, %rax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0xd27fc0
jmp 0x14e25de
jmp 0x14e25ce
jmp 0x14e25ce
jmp 0x14e25ce
movq %rax, %rdi
callq 0x8d6de8
jmp 0x14e25ce
movq %rax, %rbx
leaq 0xc0(%rsp), %rdi
callq 0xd29ece
movq %rbx, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/common/../subdiv/patch.h
|
embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::SubdividedGeneralPatch::create<embree::avx::PatchEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEvalSimd(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, embree::vboolf_impl<4> const&, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float*, float*, float*, float*, float*, float*, unsigned long, unsigned long)::'lambda'()::operator()() const::'lambda'(unsigned long)>(embree::avx::PatchEvalSimd<embree::vboolf_impl<4>, embree::vint_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>, embree::vfloat_impl<4>>::PatchEvalSimd(embree::SharedLazyTessellationCache::CacheEntry&, unsigned long, embree::HalfEdge const*, char const*, unsigned long, embree::vboolf_impl<4> const&, embree::vfloat_impl<4> const&, embree::vfloat_impl<4> const&, float*, float*, float*, float*, float*, float*, unsigned long, unsigned long)::'lambda'()::operator()() const::'lambda'(unsigned long) const&, embree::PatchT<embree::vfloat_impl<4>, embree::vfloat_impl<4>>::Ref*, unsigned int)
|
__noinline static Ref create(const Allocator& alloc, Ref* children, const unsigned N) {
return Ref(SUBDIVIDED_GENERAL_PATCH, new (alloc(sizeof(SubdividedGeneralPatch))) SubdividedGeneralPatch(children,N));
}
|
pushq %rbp
pushq %rbx
pushq %rax
movl %edx, %ebp
movq %rsi, %rbx
movl $0x88, %esi
callq 0x14e25e6
movl %ebp, (%rax)
vxorps %xmm0, %xmm0, %xmm0
vmovups %ymm0, 0x8(%rax)
vmovups %ymm0, 0x28(%rax)
vmovups %ymm0, 0x48(%rax)
vmovups %ymm0, 0x68(%rax)
testl %ebp, %ebp
je 0x14e3090
leaq 0x8(%rax), %rcx
movl %ebp, %edx
xorl %esi, %esi
movq (%rbx,%rsi,8), %rdi
movq %rdi, (%rcx,%rsi,8)
incq %rsi
cmpq %rsi, %rdx
jne 0x14e3080
addq $0x7, %rax
addq $0x8, %rsp
popq %rbx
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/common/../subdiv/patch.h
|
embree::avx::GridSOA::buildBVH(embree::GridRange const&, unsigned long&)
|
std::pair<BVH4::NodeRef,BBox3fa> GridSOA::buildBVH(const GridRange& range, size_t& allocator)
{
/*! create leaf node */
if (unlikely(range.hasLeafSize()))
{
/* we store index of first subgrid vertex as leaf node */
BVH4::NodeRef curNode = BVH4::encodeTypedLeaf(encodeLeaf(range.u_start,range.v_start),0);
/* return bounding box */
return std::make_pair(curNode,calculateBounds(0,range));
}
/* create internal node */
else
{
/* allocate new bvh4 node */
BVH4::AABBNode* node = (BVH4::AABBNode *)&bvhData()[allocator];
allocator += sizeof(BVH4::AABBNode);
node->clear();
/* split range */
GridRange r[4];
const unsigned children = range.splitIntoSubRanges(r);
/* recurse into subtrees */
BBox3fa bounds( empty );
for (unsigned i=0; i<children; i++)
{
std::pair<BVH4::NodeRef,BBox3fa> node_bounds = buildBVH(r[i], allocator);
node->set(i,node_bounds.first,node_bounds.second);
bounds.extend(node_bounds.second);
}
assert(is_finite(bounds));
return std::make_pair(BVH4::encodeNode(node),bounds);
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xa8, %rsp
movq %rdi, %rax
movl (%rdx), %r12d
movl 0x4(%rdx), %ebp
movl %ebp, %edi
subl %r12d, %edi
incl %edi
cmpl $0x3, %edi
ja 0x14e6586
movl 0xc(%rdx), %r13d
movl 0x8(%rdx), %edi
movl %r13d, %r8d
subl %edi, %r8d
incl %r8d
cmpl $0x3, %r8d
jbe 0x14e682e
movq (%rcx), %rbp
leaq 0x80(%rbp), %rdi
movq %rcx, %r14
movq %rdi, (%rcx)
vbroadcastss 0xa05481(%rip), %xmm1 # 0x1eeba20
vmovaps %xmm1, 0x90(%rsi,%rbp)
vmovaps %xmm1, 0x70(%rsi,%rbp)
vmovaps %xmm1, 0x50(%rsi,%rbp)
vbroadcastss 0xa065c7(%rip), %xmm2 # 0x1eecb84
vmovaps %xmm2, 0xa0(%rsi,%rbp)
vmovaps %xmm2, 0x80(%rsi,%rbp)
vmovaps %xmm2, 0x60(%rsi,%rbp)
vbroadcastsd 0xa3b89a(%rip), %ymm0 # 0x1f21e78
vmovups %ymm0, 0x30(%rsi,%rbp)
movl (%rdx), %edi
movl 0x4(%rdx), %r12d
movl %r12d, %r9d
subl %edi, %r9d
incl %r9d
movl 0xc(%rdx), %ecx
movl 0x8(%rdx), %r8d
movl %ecx, %edx
subl %r8d, %edx
incl %edx
cmpl %edx, %r9d
jae 0x14e6617
leal (%r8,%rcx), %edx
shrl %edx
movl %edi, %r13d
movl %r12d, %r10d
movl %edx, %r9d
jmp 0x14e6627
leal (%rdi,%r12), %r13d
shrl %r13d
movl %r8d, %edx
movl %r13d, %r10d
movl %ecx, %r9d
movl %r10d, %r11d
subl %edi, %r11d
incl %r11d
movl %r9d, %ebx
subl %r8d, %ebx
incl %ebx
movl %ebx, %r15d
orl %r11d, %r15d
cmpl $0x3, %r15d
ja 0x14e665e
movl %edi, 0x40(%rsp)
movl %r10d, 0x44(%rsp)
movl %r8d, 0x48(%rsp)
movl %r9d, 0x4c(%rsp)
movl $0x1, %ebx
jmp 0x14e66ac
vmovd %edi, %xmm0
vpinsrd $0x1, %r10d, %xmm0, %xmm0
vpinsrd $0x2, %r8d, %xmm0, %xmm0
vpinsrd $0x3, %r9d, %xmm0, %xmm0
vmovdqa %xmm0, 0x50(%rsp)
vmovdqa %xmm0, 0x40(%rsp)
cmpl %ebx, %r11d
jae 0x14e6697
addl %r8d, %r9d
shrl %r9d
movl %r9d, 0x4c(%rsp)
movl %r9d, 0x58(%rsp)
jmp 0x14e66a7
addl %edi, %r10d
shrl %r10d
movl %r10d, 0x44(%rsp)
movl %r10d, 0x50(%rsp)
movl $0x2, %ebx
movl %r12d, %r8d
subl %r13d, %r8d
incl %r8d
movl %ecx, %r9d
subl %edx, %r9d
incl %r9d
movl %r8d, %r10d
orl %r9d, %r10d
movl %ebx, %edi
shll $0x4, %edi
addq %rsp, %rdi
addq $0x40, %rdi
cmpl $0x3, %r10d
movq %rax, 0x8(%rsp)
ja 0x14e66ef
movl %r13d, (%rdi)
movl %r12d, 0x4(%rdi)
movl %edx, 0x8(%rdi)
movl %ecx, 0xc(%rdi)
movl $0x1, %eax
jmp 0x14e6733
vmovd %r13d, %xmm0
vpinsrd $0x1, %r12d, %xmm0, %xmm0
vpinsrd $0x2, %edx, %xmm0, %xmm0
vpinsrd $0x3, %ecx, %xmm0, %xmm0
vmovdqa %xmm0, 0x10(%rdi)
vmovdqa %xmm0, (%rdi)
cmpl %r9d, %r8d
jae 0x14e6720
addl %ecx, %edx
shrl %edx
movl %edx, 0xc(%rdi)
movl %edx, 0x18(%rdi)
jmp 0x14e672e
addl %r12d, %r13d
shrl %r13d
movl %r13d, 0x4(%rdi)
movl %r13d, 0x10(%rdi)
movl $0x2, %eax
leaq (%rsi,%rbp), %rcx
addq $0x30, %rcx
movq %rcx, (%rsp)
addl %eax, %ebx
addq %rsi, %rbp
leaq 0x40(%rsp), %r12
xorl %r15d, %r15d
movq %rsi, %r13
vmovaps %xmm2, 0x80(%rsp)
vmovaps %xmm1, 0x90(%rsp)
leaq 0x10(%rsp), %rdi
movq %r13, %rsi
movq %r12, %rdx
movq %r14, %rcx
vzeroupper
callq 0x14e6546
vmovaps 0x80(%rsp), %xmm2
vmovaps 0x90(%rsp), %xmm1
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x50(%rbp,%r15,4)
vmovss 0x24(%rsp), %xmm0
vmovss %xmm0, 0x70(%rbp,%r15,4)
vmovss 0x28(%rsp), %xmm0
vmovss %xmm0, 0x90(%rbp,%r15,4)
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x60(%rbp,%r15,4)
vmovss 0x34(%rsp), %xmm0
vmovss %xmm0, 0x80(%rbp,%r15,4)
vmovss 0x38(%rsp), %xmm0
vmovss %xmm0, 0xa0(%rbp,%r15,4)
movq 0x10(%rsp), %rax
movq %rax, 0x30(%rbp,%r15,8)
vminps 0x20(%rsp), %xmm1, %xmm1
vmaxps 0x30(%rsp), %xmm2, %xmm2
incq %r15
addq $0x10, %r12
cmpq %r15, %rbx
jne 0x14e674f
movq 0x8(%rsp), %rax
movq (%rsp), %rcx
movq %rcx, (%rax)
vmovaps %xmm1, 0x10(%rax)
vmovaps %xmm2, 0x20(%rax)
addq $0xa8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
movl 0xc(%rsi), %r8d
movq %r8, %rdx
imulq %rdi, %rdx
addq %r12, %rdx
shlq $0x4, %rdx
addq $0x18, %rdx
cmpl %r13d, %edi
jbe 0x14e685d
vbroadcastss 0xa051ce(%rip), %xmm1 # 0x1eeba20
vbroadcastss 0xa06329(%rip), %xmm0 # 0x1eecb84
jmp 0x14e68d2
movl 0x14(%rsi), %r11d
movl 0x24(%rsi), %r9d
leaq (%rsi,%r9), %rbx
addq %rsi, %r9
addq $0x30, %r9
leaq 0x30(%rbx,%r11,4), %r10
addl %r11d, %r11d
leaq (%rbx,%r11,4), %r11
addq $0x30, %r11
movl %edi, %ebx
imull %r8d, %ebx
vbroadcastss 0xa062f5(%rip), %xmm0 # 0x1eecb84
vbroadcastss 0xa05188(%rip), %xmm1 # 0x1eeba20
movl %r12d, %r14d
cmpl %ebp, %r12d
ja 0x14e68c8
leal (%rbx,%r14), %r15d
vmovss (%r9,%r15,4), %xmm2
vinsertps $0x1c, (%r10,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, (%r11,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vminps %xmm2, %xmm1, %xmm1
vmaxps %xmm2, %xmm0, %xmm0
incl %r14d
cmpl %ebp, %r14d
jbe 0x14e68a0
incl %edi
addl %r8d, %ebx
cmpl %r13d, %edi
jbe 0x14e6898
movq %rdx, (%rax)
vmovaps %xmm1, 0x10(%rax)
vmovaps %xmm0, 0x20(%rax)
jmp 0x14e681c
|
/embree[P]embree/kernels/geometry/grid_soa.cpp
|
embree::avx::patchEval(embree::SubdivPatch1Base const&, float, float)
|
Vec3fa patchEval(const SubdivPatch1Base& patch, const float uu, const float vv)
{
if (likely(patch.type == SubdivPatch1Base::BEZIER_PATCH))
return ((BezierPatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::BSPLINE_PATCH))
return ((BSplinePatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::GREGORY_PATCH))
return ((DenseGregoryPatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::BILINEAR_PATCH))
return ((BilinearPatch3fa*)patch.patch_v)->eval(uu,vv);
return Vec3fa( zero );
}
|
movq %rdi, %rax
movzbl 0x2d(%rsi), %ecx
cmpl $0x2, %ecx
jne 0x14e7555
vmovss 0xa05300(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm3
vmulss %xmm3, %xmm3, %xmm4
vmulss %xmm4, %xmm3, %xmm5
vmovss 0xa09bc4(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm0, %xmm7
vmulss %xmm4, %xmm7, %xmm4
vmulss %xmm0, %xmm0, %xmm7
vmulss %xmm6, %xmm7, %xmm8
vmulss %xmm3, %xmm8, %xmm8
vmulss %xmm0, %xmm7, %xmm7
vsubss %xmm1, %xmm2, %xmm3
vmulss %xmm3, %xmm3, %xmm2
vmulss %xmm2, %xmm3, %xmm0
vmulss %xmm6, %xmm1, %xmm9
vmulss %xmm2, %xmm9, %xmm2
vmulss %xmm1, %xmm1, %xmm9
vmulss %xmm6, %xmm9, %xmm6
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm1, %xmm9, %xmm1
vshufps $0x0, %xmm7, %xmm7, %xmm6 # xmm6 = xmm7[0,0,0,0]
vmulps 0x70(%rsi), %xmm6, %xmm7
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps 0x60(%rsi), %xmm8, %xmm9
vaddps %xmm7, %xmm9, %xmm7
vshufps $0x0, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[0,0,0,0]
vmulps 0x50(%rsi), %xmm9, %xmm4
vaddps %xmm7, %xmm4, %xmm4
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps 0x40(%rsi), %xmm5, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vmulps 0xb0(%rsi), %xmm6, %xmm7
vmulps 0xa0(%rsi), %xmm8, %xmm10
vaddps %xmm7, %xmm10, %xmm7
vmulps 0x90(%rsi), %xmm9, %xmm10
vaddps %xmm7, %xmm10, %xmm7
vmulps 0x80(%rsi), %xmm5, %xmm10
vmulps 0xf0(%rsi), %xmm6, %xmm11
vaddps %xmm7, %xmm10, %xmm7
vmulps 0xe0(%rsi), %xmm8, %xmm10
vaddps %xmm10, %xmm11, %xmm10
vmulps 0xd0(%rsi), %xmm9, %xmm11
vmulps 0xc0(%rsi), %xmm5, %xmm12
vaddps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmulps 0x130(%rsi), %xmm6, %xmm6
vmulps 0x120(%rsi), %xmm8, %xmm8
vmulps 0x110(%rsi), %xmm9, %xmm9
vaddps %xmm6, %xmm8, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vmulps 0x100(%rsi), %xmm5, %xmm5
vaddps %xmm6, %xmm5, %xmm5
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm5, %xmm1, %xmm1
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm2
vmovaps %xmm2, (%rax)
retq
cmpl $0x1, %ecx
jne 0x14e7740
vmovss 0xa051ae(%rip), %xmm4 # 0x1eec714
vsubss %xmm1, %xmm4, %xmm3
vmulss %xmm3, %xmm3, %xmm2
vmulss %xmm2, %xmm3, %xmm6
vmulss %xmm1, %xmm1, %xmm2
vmulss %xmm1, %xmm2, %xmm7
vmovss 0xa0560a(%rip), %xmm2 # 0x1eecb8c
vmulss %xmm2, %xmm6, %xmm5
vaddss %xmm7, %xmm5, %xmm5
vmulss %xmm1, %xmm3, %xmm8
vmulss %xmm3, %xmm8, %xmm9
vmulss %xmm1, %xmm8, %xmm8
vmovss 0xa09a56(%rip), %xmm1 # 0x1ef0ff4
vmulss %xmm1, %xmm8, %xmm10
vmovss 0xa09a52(%rip), %xmm3 # 0x1ef0ffc
vmulss %xmm3, %xmm9, %xmm11
vaddss %xmm10, %xmm11, %xmm10
vaddss %xmm5, %xmm10, %xmm10
vmulss %xmm2, %xmm7, %xmm5
vaddss %xmm6, %xmm5, %xmm5
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm3, %xmm8, %xmm8
vaddss %xmm9, %xmm8, %xmm8
vaddss %xmm5, %xmm8, %xmm8
vmovss 0xa09a28(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm6, %xmm6
vmulss %xmm5, %xmm10, %xmm9
vmulss %xmm5, %xmm8, %xmm8
vmulss %xmm5, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm10 # xmm10 = xmm7[0,0,0,0]
vmulps 0x100(%rsi), %xmm10, %xmm7
vshufps $0x0, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[0,0,0,0]
vmulps 0xc0(%rsi), %xmm11, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps 0x80(%rsi), %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0x0, %xmm6, %xmm6, %xmm12 # xmm12 = xmm6[0,0,0,0]
vmulps 0x40(%rsi), %xmm12, %xmm6
vaddps %xmm7, %xmm6, %xmm6
vmulps 0x110(%rsi), %xmm10, %xmm7
vmulps 0xd0(%rsi), %xmm11, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps 0x90(%rsi), %xmm9, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vmulps 0x50(%rsi), %xmm12, %xmm8
vmulps 0x120(%rsi), %xmm10, %xmm13
vaddps %xmm7, %xmm8, %xmm7
vmulps 0xe0(%rsi), %xmm11, %xmm8
vaddps %xmm8, %xmm13, %xmm8
vmulps 0xa0(%rsi), %xmm9, %xmm13
vmulps 0x60(%rsi), %xmm12, %xmm14
vaddps %xmm8, %xmm13, %xmm8
vaddps %xmm8, %xmm14, %xmm8
vmulps 0x130(%rsi), %xmm10, %xmm10
vmulps 0xf0(%rsi), %xmm11, %xmm11
vmulps 0xb0(%rsi), %xmm9, %xmm9
vaddps %xmm11, %xmm10, %xmm10
vaddps %xmm10, %xmm9, %xmm9
vmulps 0x70(%rsi), %xmm12, %xmm10
vaddps %xmm9, %xmm10, %xmm9
vsubss %xmm0, %xmm4, %xmm4
vmulss %xmm4, %xmm4, %xmm10
vmulss %xmm4, %xmm10, %xmm10
vmulss %xmm0, %xmm0, %xmm11
vmulss %xmm0, %xmm11, %xmm11
vmulss %xmm2, %xmm10, %xmm12
vaddss %xmm11, %xmm12, %xmm12
vmulss %xmm0, %xmm4, %xmm13
vmulss %xmm4, %xmm13, %xmm4
vmulss %xmm0, %xmm13, %xmm0
vmulss %xmm1, %xmm0, %xmm13
vmulss %xmm3, %xmm4, %xmm14
vaddss %xmm13, %xmm14, %xmm13
vaddss %xmm13, %xmm12, %xmm12
vmulss %xmm2, %xmm11, %xmm2
vaddss %xmm2, %xmm10, %xmm2
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm3, %xmm0, %xmm0
vaddss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm2, %xmm0
vmulss %xmm5, %xmm10, %xmm1
vmulss %xmm5, %xmm12, %xmm2
vmulss %xmm5, %xmm0, %xmm0
vmulss %xmm5, %xmm11, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm9, %xmm3
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm8, %xmm0
vaddps %xmm3, %xmm0, %xmm0
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm2
jmp 0x14e7550
cmpl $0x3, %ecx
jne 0x14e7a20
vshufps $0x0, %xmm1, %xmm0, %xmm2 # xmm2 = xmm0[0,0],xmm1[0,0]
vcmpeqps 0xa73139(%rip), %xmm2, %xmm2 # 0x1f5a890
vtestps %xmm2, %xmm2
jne 0x14e79fb
vmovss 0x4c(%rsi), %xmm2
vmovss 0x8c(%rsi), %xmm3
vmovss 0xcc(%rsi), %xmm4
vmovss 0x10c(%rsi), %xmm5
vinsertps $0x1c, 0x11c(%rsi), %xmm5, %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x12c(%rsi), %xmm5, %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0xdc(%rsi), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0xec(%rsi), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vinsertps $0x1c, 0x9c(%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0xac(%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],zero
vinsertps $0x1c, 0x5c(%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x6c(%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vshufps $0x0, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[0,0,0,0]
vmulps 0x90(%rsi), %xmm6, %xmm7
vshufps $0x0, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[0,0,0,0]
vmulps %xmm2, %xmm8, %xmm2
vaddps %xmm2, %xmm7, %xmm2
vaddss %xmm1, %xmm0, %xmm7
vrcpss %xmm7, %xmm7, %xmm9
vmulss %xmm7, %xmm9, %xmm10
vmovss 0xa09801(%rip), %xmm7 # 0x1ef0ff8
vsubss %xmm10, %xmm7, %xmm10
vmulss %xmm10, %xmm9, %xmm9
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps %xmm2, %xmm9, %xmm2
vmovss 0xa04f01(%rip), %xmm9 # 0x1eec714
vsubss %xmm0, %xmm9, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm11 # xmm11 = xmm10[0,0,0,0]
vmulps %xmm3, %xmm11, %xmm3
vmulps 0xa0(%rsi), %xmm8, %xmm8
vaddps %xmm3, %xmm8, %xmm3
vaddss %xmm1, %xmm10, %xmm8
vrcpss %xmm8, %xmm8, %xmm10
vmulss %xmm10, %xmm8, %xmm8
vsubss %xmm8, %xmm7, %xmm8
vmulss %xmm8, %xmm10, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm3, %xmm8, %xmm3
vmulps 0xe0(%rsi), %xmm11, %xmm8
vsubss %xmm1, %xmm9, %xmm10
vshufps $0x0, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[0,0,0,0]
vmulps %xmm4, %xmm10, %xmm4
vaddps %xmm4, %xmm8, %xmm4
vsubss %xmm0, %xmm7, %xmm8
vsubss %xmm1, %xmm8, %xmm8
vrcpss %xmm8, %xmm8, %xmm11
vmulss %xmm11, %xmm8, %xmm8
vsubss %xmm8, %xmm7, %xmm8
vmulss %xmm8, %xmm11, %xmm8
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm4
vmulps %xmm5, %xmm6, %xmm5
vmulps 0xd0(%rsi), %xmm10, %xmm6
vaddps %xmm6, %xmm5, %xmm5
vaddss %xmm0, %xmm9, %xmm6
vsubss %xmm1, %xmm6, %xmm6
vrcpss %xmm6, %xmm6, %xmm8
vmulss %xmm6, %xmm8, %xmm6
vsubss %xmm6, %xmm7, %xmm6
vmulss %xmm6, %xmm8, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm5, %xmm5
vmovss 0xa04e4c(%rip), %xmm6 # 0x1eec714
vsubss %xmm0, %xmm6, %xmm7
vmulss %xmm7, %xmm7, %xmm8
vmulss %xmm7, %xmm8, %xmm9
vmovss 0xa09710(%rip), %xmm10 # 0x1ef0fec
vmulss %xmm0, %xmm10, %xmm11
vmulss %xmm8, %xmm11, %xmm8
vmulss %xmm0, %xmm0, %xmm11
vmulss %xmm10, %xmm11, %xmm12
vmulss %xmm7, %xmm12, %xmm12
vmulss %xmm0, %xmm11, %xmm11
vsubss %xmm1, %xmm6, %xmm7
vmulss %xmm7, %xmm7, %xmm6
vmulss %xmm6, %xmm7, %xmm0
vmulss %xmm1, %xmm10, %xmm13
vmulss %xmm6, %xmm13, %xmm6
vmulss %xmm1, %xmm1, %xmm13
vmulss %xmm10, %xmm13, %xmm10
vmulss %xmm7, %xmm10, %xmm7
vmulss %xmm1, %xmm13, %xmm1
vshufps $0x0, %xmm11, %xmm11, %xmm10 # xmm10 = xmm11[0,0,0,0]
vmulps 0x70(%rsi), %xmm10, %xmm11
vshufps $0x0, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,0,0]
vmulps 0x60(%rsi), %xmm12, %xmm13
vaddps %xmm13, %xmm11, %xmm11
vshufps $0x0, %xmm8, %xmm8, %xmm13 # xmm13 = xmm8[0,0,0,0]
vmulps 0x50(%rsi), %xmm13, %xmm8
vaddps %xmm11, %xmm8, %xmm8
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vmulps 0x40(%rsi), %xmm9, %xmm11
vaddps %xmm8, %xmm11, %xmm8
vmulps 0xb0(%rsi), %xmm10, %xmm11
vmulps %xmm3, %xmm12, %xmm3
vaddps %xmm3, %xmm11, %xmm3
vmulps %xmm2, %xmm13, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmulps 0x80(%rsi), %xmm9, %xmm3
vaddps %xmm2, %xmm3, %xmm2
vmulps 0xf0(%rsi), %xmm10, %xmm3
vmulps %xmm4, %xmm12, %xmm4
vaddps %xmm3, %xmm4, %xmm3
vmulps %xmm5, %xmm13, %xmm4
vmulps 0xc0(%rsi), %xmm9, %xmm5
vaddps %xmm3, %xmm4, %xmm3
vaddps %xmm3, %xmm5, %xmm3
vmulps 0x130(%rsi), %xmm10, %xmm4
vmulps 0x120(%rsi), %xmm12, %xmm5
vmulps 0x110(%rsi), %xmm13, %xmm10
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm4, %xmm10, %xmm4
vmulps 0x100(%rsi), %xmm9, %xmm5
vaddps %xmm4, %xmm5, %xmm4
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm1
vshufps $0x0, %xmm7, %xmm7, %xmm4 # xmm4 = xmm7[0,0,0,0]
vmulps %xmm3, %xmm4, %xmm3
vaddps %xmm1, %xmm3, %xmm1
vshufps $0x0, %xmm6, %xmm6, %xmm3 # xmm3 = xmm6[0,0,0,0]
vmulps %xmm2, %xmm3, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm8, %xmm0
jmp 0x14e754c
vmovaps 0x90(%rsi), %xmm2
vmovaps 0xa0(%rsi), %xmm3
vmovaps 0xd0(%rsi), %xmm5
vmovaps 0xe0(%rsi), %xmm4
jmp 0x14e78c0
vxorps %xmm2, %xmm2, %xmm2
cmpl $0x6, %ecx
jne 0x14e7550
vmovss 0xa04cdf(%rip), %xmm2 # 0x1eec714
vshufps $0x0, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[0,0,0,0]
vmulps 0x50(%rsi), %xmm3, %xmm4
vsubss %xmm0, %xmm2, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps 0x40(%rsi), %xmm0, %xmm5
vmulps 0x60(%rsi), %xmm3, %xmm3
vmulps 0x70(%rsi), %xmm0, %xmm0
vaddps %xmm5, %xmm4, %xmm4
vaddps %xmm0, %xmm3, %xmm0
vsubss %xmm1, %xmm2, %xmm2
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm1
jmp 0x14e7737
|
/embree[P]embree/kernels/subdiv/subdivpatch1base_eval.cpp
|
embree::avx::evalGrid(embree::SubdivPatch1Base const&, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, float*, float*, float*, float*, float*, embree::SubdivMesh const*)
|
void evalGrid(const SubdivPatch1Base& patch,
const unsigned x0, const unsigned x1,
const unsigned y0, const unsigned y1,
const unsigned swidth, const unsigned sheight,
float *__restrict__ const grid_x,
float *__restrict__ const grid_y,
float *__restrict__ const grid_z,
float *__restrict__ const grid_u,
float *__restrict__ const grid_v,
const SubdivMesh* const geom)
{
const unsigned dwidth = x1-x0+1;
const unsigned dheight = y1-y0+1;
const unsigned M = dwidth*dheight+VSIZEX;
const unsigned grid_size_simd_blocks = (M-1)/VSIZEX;
if (unlikely(patch.type == SubdivPatch1Base::EVAL_PATCH))
{
const bool displ = geom->displFunc;
const unsigned N = displ ? M : 0;
dynamic_large_stack_array(float,grid_Ng_x,N,32*32*sizeof(float));
dynamic_large_stack_array(float,grid_Ng_y,N,32*32*sizeof(float));
dynamic_large_stack_array(float,grid_Ng_z,N,32*32*sizeof(float));
if (geom->patch_eval_trees.size())
{
feature_adaptive_eval_grid<PatchEvalGrid>
(geom->patch_eval_trees[geom->numTimeSteps*patch.primID()+patch.time()], patch.subPatch(), patch.needsStitching() ? patch.level : nullptr,
x0,x1,y0,y1,swidth,sheight,
grid_x,grid_y,grid_z,grid_u,grid_v,
displ ? (float*)grid_Ng_x : nullptr, displ ? (float*)grid_Ng_y : nullptr, displ ? (float*)grid_Ng_z : nullptr,
dwidth,dheight);
}
else
{
GeneralCatmullClarkPatch3fa ccpatch(patch.edge(),geom->getVertexBuffer(patch.time()));
feature_adaptive_eval_grid<FeatureAdaptiveEvalGrid,GeneralCatmullClarkPatch3fa>
(ccpatch, patch.subPatch(), patch.needsStitching() ? patch.level : nullptr,
x0,x1,y0,y1,swidth,sheight,
grid_x,grid_y,grid_z,grid_u,grid_v,
displ ? (float*)grid_Ng_x : nullptr, displ ? (float*)grid_Ng_y : nullptr, displ ? (float*)grid_Ng_z : nullptr,
dwidth,dheight);
}
/* convert sub-patch UVs to patch UVs*/
const Vec2f uv0 = patch.getUV(0);
const Vec2f uv1 = patch.getUV(1);
const Vec2f uv2 = patch.getUV(2);
const Vec2f uv3 = patch.getUV(3);
for (unsigned i=0; i<grid_size_simd_blocks; i++)
{
const vfloatx u = vfloatx::load(&grid_u[i*VSIZEX]);
const vfloatx v = vfloatx::load(&grid_v[i*VSIZEX]);
const vfloatx patch_u = lerp2(uv0.x,uv1.x,uv3.x,uv2.x,u,v);
const vfloatx patch_v = lerp2(uv0.y,uv1.y,uv3.y,uv2.y,u,v);
vfloatx::store(&grid_u[i*VSIZEX],patch_u);
vfloatx::store(&grid_v[i*VSIZEX],patch_v);
}
/* call displacement shader */
if (unlikely(geom->displFunc)) {
RTCDisplacementFunctionNArguments args;
args.geometryUserPtr = geom->userPtr;
args.geometry = (RTCGeometry)geom;
//args.geomID = patch.geomID();
args.primID = patch.primID();
args.timeStep = patch.time();
args.u = grid_u;
args.v = grid_v;
args.Ng_x = grid_Ng_x;
args.Ng_y = grid_Ng_y;
args.Ng_z = grid_Ng_z;
args.P_x = grid_x;
args.P_y = grid_y;
args.P_z = grid_z;
args.N = dwidth*dheight;
geom->displFunc(&args);
}
/* set last elements in u,v array to 1.0f */
const float last_u = grid_u[dwidth*dheight-1];
const float last_v = grid_v[dwidth*dheight-1];
const float last_x = grid_x[dwidth*dheight-1];
const float last_y = grid_y[dwidth*dheight-1];
const float last_z = grid_z[dwidth*dheight-1];
for (unsigned i=dwidth*dheight;i<grid_size_simd_blocks*VSIZEX;i++)
{
grid_u[i] = last_u;
grid_v[i] = last_v;
grid_x[i] = last_x;
grid_y[i] = last_y;
grid_z[i] = last_z;
}
}
else
{
/* grid_u, grid_v need to be padded as we write with SIMD granularity */
gridUVTessellator(patch.level,swidth,sheight,x0,y0,dwidth,dheight,grid_u,grid_v);
/* set last elements in u,v array to last valid point */
const float last_u = grid_u[dwidth*dheight-1];
const float last_v = grid_v[dwidth*dheight-1];
for (unsigned i=dwidth*dheight;i<grid_size_simd_blocks*VSIZEX;i++) {
grid_u[i] = last_u;
grid_v[i] = last_v;
}
/* stitch edges if necessary */
if (unlikely(patch.needsStitching()))
stitchUVGrid(patch.level,swidth,sheight,x0,y0,dwidth,dheight,grid_u,grid_v);
/* iterates over all grid points */
for (unsigned i=0; i<grid_size_simd_blocks; i++)
{
const vfloatx u = vfloatx::load(&grid_u[i*VSIZEX]);
const vfloatx v = vfloatx::load(&grid_v[i*VSIZEX]);
Vec3vfx vtx = patchEval(patch,u,v);
/* evaluate displacement function */
if (unlikely(geom->displFunc != nullptr))
{
const Vec3vfx normal = normalize_safe(patchNormal(patch, u, v));
RTCDisplacementFunctionNArguments args;
args.geometryUserPtr = geom->userPtr;
args.geometry = (RTCGeometry)geom;
//args.geomID = patch.geomID();
args.primID = patch.primID();
args.timeStep = patch.time();
args.u = &u[0];
args.v = &v[0];
args.Ng_x = &normal.x[0];
args.Ng_y = &normal.y[0];
args.Ng_z = &normal.z[0];
args.P_x = &vtx.x[0];
args.P_y = &vtx.y[0];
args.P_z = &vtx.z[0];
args.N = VSIZEX;
geom->displFunc(&args);
}
vfloatx::store(&grid_x[i*VSIZEX],vtx.x);
vfloatx::store(&grid_y[i*VSIZEX],vtx.y);
vfloatx::store(&grid_z[i*VSIZEX],vtx.z);
}
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x4f00, %rsp # imm = 0x4F00
movq %rdx, 0x78(%rsp)
movq %rsi, 0x68(%rsp)
subl %esi, %edx
leal 0x1(%rdx), %r14d
movl %r8d, %esi
movq %rcx, 0x38(%rsp)
subl %ecx, %esi
leal 0x1(%rsi), %ecx
movl %ecx, %eax
imull %r14d, %eax
movq %rax, 0xd8(%rsp)
leal 0x7(%rax), %r10d
movl %r10d, %r12d
shrl $0x3, %r12d
movq %rdi, 0x40(%rsp)
cmpb $0x5, 0x2d(%rdi)
movq %r9, 0x50(%rsp)
movq 0x40(%rbp), %r15
movq 0x38(%rbp), %r13
movq 0x30(%rbp), %rbx
je 0x14e8d79
movq %rdx, 0xe0(%rsp)
testl %ecx, %ecx
je 0x14e8a0e
vmovd 0x50(%rsp), %xmm0
vpshufd $0x0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vpcmpeqd %xmm0, %xmm0, %xmm0
vpaddd %xmm0, %xmm1, %xmm1
movl 0x10(%rbp), %eax
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vpaddd %xmm0, %xmm2, %xmm2
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vcvtdq2ps %ymm3, %ymm3
vrcpps %ymm3, %ymm4
vmulps %ymm3, %ymm4, %ymm5
vbroadcastss 0xa03e40(%rip), %ymm3 # 0x1eec714
vsubps %ymm5, %ymm3, %ymm5
vinsertf128 $0x1, %xmm2, %ymm2, %ymm6
vcvtdq2ps %ymm6, %ymm6
vrcpps %ymm6, %ymm7
vmulps %ymm5, %ymm4, %ymm5
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm6, %ymm7, %ymm5
vsubps %ymm5, %ymm3, %ymm5
vmulps %ymm5, %ymm7, %ymm5
vaddps %ymm5, %ymm7, %ymm5
vmovaps %ymm5, 0xa0(%rsp)
vmovd 0x68(%rsp), %xmm6
vpshufd $0x0, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmovd 0x38(%rsp), %xmm7
vpshufd $0x0, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
movl %r14d, %eax
vpxor %xmm8, %xmm8, %xmm8
xorl %edx, %edx
vbroadcastss 0xa71fe0(%rip), %xmm10 # 0x1f5a910
xorl %edi, %edi
vextractf128 $0x1, %ymm8, %xmm11
testl %r14d, %r14d
je 0x14e89ee
vpcmpgtd %xmm8, %xmm2, %xmm12
vpackssdw %xmm12, %xmm12, %xmm12
vpcmpgtd %xmm11, %xmm2, %xmm13
vpunpcklwd %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[0,0,1,1,2,2,3,3]
vinsertf128 $0x1, %xmm13, %ymm12, %ymm12
vpaddd %xmm7, %xmm8, %xmm13
vpaddd %xmm7, %xmm11, %xmm14
vinsertf128 $0x1, %xmm14, %ymm13, %ymm13
vcvtdq2ps %ymm13, %ymm13
vmulps 0xa0(%rsp), %ymm13, %ymm13
vblendvps %ymm12, %ymm13, %ymm3, %ymm12
xorl %r11d, %r11d
vmovdqa 0xa71f98(%rip), %ymm13 # 0x1f5a920
vpcmpgtd %xmm13, %xmm1, %xmm14
vpackssdw %xmm14, %xmm14, %xmm14
vextractf128 $0x1, %ymm13, %xmm15
vpcmpgtd %xmm15, %xmm1, %xmm9
vpunpcklwd %xmm14, %xmm14, %xmm14 # xmm14 = xmm14[0,0,1,1,2,2,3,3]
vinsertf128 $0x1, %xmm9, %ymm14, %ymm9
vpaddd %xmm6, %xmm13, %xmm14
vpaddd %xmm6, %xmm15, %xmm5
vinsertf128 $0x1, %xmm5, %ymm14, %ymm5
vcvtdq2ps %ymm5, %ymm5
vmulps %ymm5, %ymm4, %ymm5
vblendvps %ymm9, %ymm5, %ymm3, %ymm5
leal (%rdx,%r11), %r9d
vmovups %ymm5, (%rbx,%r9,4)
vmovups %ymm12, (%r13,%r9,4)
addq $0x8, %r11
vpaddd %xmm10, %xmm15, %xmm5
vpaddd %xmm10, %xmm13, %xmm9
vinsertf128 $0x1, %xmm5, %ymm9, %ymm13
cmpq %rax, %r11
jb 0x14e8988
leal 0x1(%rdi), %r9d
vpsubd %xmm0, %xmm11, %xmm11
vpsubd %xmm0, %xmm8, %xmm8
vinsertf128 $0x1, %xmm11, %ymm8, %ymm8
addq %rax, %rdx
cmpl %esi, %edi
movl %r9d, %edi
jne 0x14e8932
movq %r12, %r15
movl %r10d, %eax
andl $-0x8, %eax
movq 0xd8(%rsp), %rcx
cmpl %eax, %ecx
jae 0x14e8b23
leal -0x1(%rcx), %edx
movl %ecx, %edi
movl %eax, %r9d
subq %rdi, %r9
leaq 0x7(%r9), %rax
andq $-0x8, %rax
decq %r9
vmovq %r9, %xmm0
vpshufd $0x44, %xmm0, %xmm2 # xmm2 = xmm0[0,1,0,1]
vbroadcastss (%rbx,%rdx,4), %ymm0
vbroadcastss (%r13,%rdx,4), %ymm1
vinsertf128 $0x1, %xmm2, %ymm2, %ymm8
leaq (%rbx,%rdi,4), %rdx
leaq (,%rdi,4), %rdi
addq %r13, %rdi
xorl %r9d, %r9d
vmovaps 0xa393cc(%rip), %ymm2 # 0x1f21e40
vmovaps 0xa3f5c4(%rip), %ymm3 # 0x1f28040
vextractf128 $0x1, %ymm8, %xmm5
vmovddup 0xa393fe(%rip), %xmm4 # xmm4 = mem[0,0]
vxorps %xmm4, %xmm5, %xmm5
vxorps %xmm4, %xmm8, %xmm6
vpcmpeqd %xmm7, %xmm7, %xmm7
vxorps %xmm4, %xmm8, %xmm8
vmovq %r9, %xmm9
vpshufd $0x44, %xmm9, %xmm9 # xmm9 = xmm9[0,1,0,1]
vinsertf128 $0x1, %xmm9, %ymm9, %ymm9
vorps %ymm2, %ymm9, %ymm10
vorps %ymm3, %ymm9, %ymm9
vextractf128 $0x1, %ymm9, %xmm11
vxorps %xmm4, %xmm11, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vxorps %xmm4, %xmm9, %xmm9
vpcmpgtq %xmm6, %xmm9, %xmm9
vpackssdw %xmm11, %xmm9, %xmm9
vpxor %xmm7, %xmm9, %xmm9
vextractf128 $0x1, %ymm10, %xmm11
vxorps %xmm4, %xmm11, %xmm11
vpcmpgtq %xmm5, %xmm11, %xmm11
vxorps %xmm4, %xmm10, %xmm10
vpcmpgtq %xmm8, %xmm10, %xmm10
vpackssdw %xmm11, %xmm10, %xmm10
vpxor %xmm7, %xmm10, %xmm10
vpackssdw %xmm9, %xmm10, %xmm9
vpmovsxwd %xmm9, %xmm10
vpunpckhwd %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[4,4,5,5,6,6,7,7]
vinsertf128 $0x1, %xmm9, %ymm10, %ymm9
vmaskmovps %ymm0, %ymm9, (%rdx,%r9,4)
vmaskmovps %ymm1, %ymm9, (%rdi,%r9,4)
addq $0x8, %r9
cmpq %r9, %rax
jne 0x14e8a9a
movq 0x40(%rsp), %rax
testb $0x10, 0x2c(%rax)
jne 0x14e8def
cmpl $0x8, %r10d
jae 0x14e8b4a
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
cmpl $0x1, %r15d
adcl $0x0, %r15d
shlq $0x5, %r15
xorl %r14d, %r14d
leaq 0x160(%rsp), %r12
vmovaps (%rbx,%r14), %ymm0
vmovaps %ymm0, 0x3e80(%rsp)
vmovaps (%r13,%r14), %ymm0
vmovaps %ymm0, 0x160(%rsp)
leaq 0x2200(%rsp), %rdi
movq 0x40(%rsp), %rsi
leaq 0x3e80(%rsp), %rdx
movq %r12, %rcx
vzeroupper
callq 0x14ec9b7
movq 0x40(%rbp), %rax
cmpq $0x0, 0x58(%rax)
jne 0x14e8bf6
vmovaps 0x2200(%rsp), %ymm0
movq 0x18(%rbp), %rax
vmovaps %ymm0, (%rax,%r14)
vmovaps 0x2220(%rsp), %ymm0
movq 0x20(%rbp), %rax
vmovaps %ymm0, (%rax,%r14)
vmovaps 0x2240(%rsp), %ymm0
movq 0x28(%rbp), %rax
vmovaps %ymm0, (%rax,%r14)
addq $0x20, %r14
cmpq %r14, %r15
jne 0x14e8b61
jmp 0x14e8b38
leaq 0x180(%rsp), %rdi
movq 0x40(%rsp), %rsi
leaq 0x3e80(%rsp), %rdx
movq %r12, %rcx
callq 0x14eda13
vmovaps 0x180(%rsp), %ymm0
vmovaps 0x1a0(%rsp), %ymm1
vmovaps 0x1c0(%rsp), %ymm2
vmulps %ymm2, %ymm2, %ymm3
vmulps %ymm1, %ymm1, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vmulps %ymm0, %ymm0, %ymm4
vaddps %ymm3, %ymm4, %ymm3
vrsqrtps %ymm3, %ymm4
vcmpeqps 0xa382b1(%rip), %ymm3, %ymm5 # 0x1f20f00
vbroadcastss 0xa03ac0(%rip), %ymm6 # 0x1eec718
vmulps %ymm6, %ymm4, %ymm6
vbroadcastss 0xa03ab7(%rip), %ymm7 # 0x1eec71c
vmulps %ymm7, %ymm3, %ymm3
vmulps %ymm3, %ymm4, %ymm3
vmulps %ymm4, %ymm4, %ymm4
vmulps %ymm3, %ymm4, %ymm3
vaddps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm0, %ymm4
vmulps %ymm3, %ymm1, %ymm6
vmulps %ymm3, %ymm2, %ymm3
vblendvps %ymm5, %ymm0, %ymm4, %ymm0
vblendvps %ymm5, %ymm1, %ymm6, %ymm1
vblendvps %ymm5, %ymm2, %ymm3, %ymm2
vmovaps %ymm0, 0x11c0(%rsp)
vmovaps %ymm1, 0x11e0(%rsp)
vmovaps %ymm2, 0x1200(%rsp)
movq 0x40(%rbp), %rcx
movq 0x18(%rcx), %rax
movq %rax, 0x180(%rsp)
movq %rcx, 0x188(%rsp)
movq 0x40(%rsp), %rdx
movl 0x34(%rdx), %eax
movl %eax, 0x190(%rsp)
movl 0x3c(%rdx), %eax
movl %eax, 0x194(%rsp)
leaq 0x3e80(%rsp), %rax
movq %rax, 0x198(%rsp)
movq %r12, 0x1a0(%rsp)
leaq 0x11c0(%rsp), %rax
movq %rax, 0x1a8(%rsp)
leaq 0x11e0(%rsp), %rax
movq %rax, 0x1b0(%rsp)
leaq 0x1200(%rsp), %rax
movq %rax, 0x1b8(%rsp)
leaq 0x2200(%rsp), %rax
movq %rax, 0x1c0(%rsp)
leaq 0x2220(%rsp), %rax
movq %rax, 0x1c8(%rsp)
leaq 0x2240(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x8, 0x1d8(%rsp)
leaq 0x180(%rsp), %rdi
vzeroupper
callq *0x58(%rcx)
jmp 0x14e8bab
movq %rcx, 0x128(%rsp)
movq %r14, 0x58(%rsp)
movq %r8, 0x48(%rsp)
movl %r10d, 0x2c(%rsp)
movq 0xd8(%rsp), %rax
leal 0x8(%rax), %r14d
movq 0x58(%r15), %rax
testq %rax, %rax
movq %rax, 0x108(%rsp)
cmoveq %rax, %r14
movq %r14, 0x21c8(%rsp)
leaq (,%r14,4), %rdi
cmpq $0x401, %r14 # imm = 0x401
jae 0x14e8f5b
leaq 0x11c0(%rsp), %rax
movq %rax, 0x1000(%rax)
leaq 0x180(%rsp), %rax
movq %r14, 0x1008(%rax)
jmp 0x14e8f97
movq %r14, 0x58(%rsp)
movl %r10d, 0x2c(%rsp)
movq 0x40(%rsp), %r14
vcvttss2si 0x1c(%r14), %rax
vcvttss2si 0x20(%r14), %r9
movq 0x78(%rsp), %rcx
incl %ecx
movl %ecx, 0xa0(%rsp)
vcvttss2si 0x24(%r14), %r10
movq %r8, 0x48(%rsp)
incl %r8d
leal 0x1(%rax), %edx
leal 0x1(%r10), %r12d
cmpl $0x0, 0x38(%rsp)
sete %dil
movq 0x50(%rsp), %r11
cmpl %r11d, %edx
setb %dl
andb %dil, %dl
vcvttss2si 0x28(%r14), %rdi
cmpb $0x1, %dl
je 0x14e9bd4
leal 0x1(%r9), %r14d
movl 0x10(%rbp), %ecx
cmpl %ecx, %r8d
sete %al
cmpl %r11d, %r12d
setb %dl
andb %al, %dl
cmpb $0x1, %dl
je 0x14e9ea5
leal 0x1(%rdi), %r10d
cmpl $0x0, 0x68(%rsp)
sete %al
cmpl %ecx, %r14d
setb %dl
andb %al, %dl
cmpb $0x1, %dl
je 0x14ea179
cmpl %r11d, 0xa0(%rsp)
sete %al
cmpl %ecx, %r10d
setb %cl
andb %al, %cl
cmpb $0x1, %cl
movl 0x2c(%rsp), %r10d
movq 0x48(%rsp), %rax
jne 0x14e8b32
cmpl 0x38(%rsp), %eax
jb 0x14e8f31
movl %edi, %eax
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rax, %xmm3, %xmm0
vrcpss %xmm0, %xmm0, %xmm1
vmulss %xmm0, %xmm1, %xmm0
vmovss 0xa08123(%rip), %xmm2 # 0x1ef0ff8
vsubss %xmm0, %xmm2, %xmm0
vmulss %xmm0, %xmm1, %xmm0
movl 0x10(%rbp), %eax
leal -0x2(,%rax,2), %r8d
movq 0x38(%rsp), %rax
leal 0x1(,%rax,2), %ecx
imull %edi, %ecx
addl %edi, %edi
xorl %r9d, %r9d
movl %ecx, %eax
cltd
idivl %r8d
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %eax, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm1
movl %r9d, %eax
vmovss %xmm1, (%r13,%rax,4)
movq 0x38(%rsp), %rdx
incl %edx
addl 0x58(%rsp), %r9d
addl %edi, %ecx
movq %rdx, 0x38(%rsp)
cmpl 0x48(%rsp), %edx
jbe 0x14e8efc
movl 0x10(%rbp), %eax
decl %eax
cmpl 0x48(%rsp), %eax
movl 0x2c(%rsp), %r10d
movq 0x58(%rsp), %rax
jne 0x14e8b32
imull %eax, %esi
movl $0x3f800000, (%r13,%rsi,4) # imm = 0x3F800000
jmp 0x14e8b32
movl $0x40, %esi
movq %rdi, 0xa0(%rsp)
callq 0x1ee60ac
movq %rax, 0x21c0(%rsp)
movq %r14, 0x1188(%rsp)
movl $0x40, %esi
movq 0xa0(%rsp), %rdi
callq 0x1ee60ac
movq 0xa0(%rsp), %rdi
movq %rax, 0x1180(%rsp)
movq %r14, 0x4e88(%rsp)
cmpq $0x401, %r14 # imm = 0x401
jae 0x14e8fbf
leaq 0x3e80(%rsp), %rax
movl 0x2c(%rsp), %r14d
jmp 0x14e8fce
movl $0x40, %esi
callq 0x1ee60ac
movl 0x2c(%rsp), %r14d
movq %rax, 0x4e80(%rsp)
movq 0x308(%r15), %rax
cmpq %rax, 0x310(%r15)
je 0x14e9090
movq 0x40(%rsp), %r8
movl 0x34(%r8), %ecx
movl 0x48(%r8), %esi
imull 0x24(%r15), %ecx
addl 0x3c(%r8), %ecx
leaq 0x1c(%r8), %rdi
xorl %edx, %edx
testb $0x10, 0x2c(%r8)
cmovneq %rdi, %rdx
leaq (%rax,%rcx,8), %rdi
movq 0x108(%rsp), %r11
movq %r11, %rax
movq %r11, %r10
testq %r11, %r11
je 0x14e903e
movq 0x21c0(%rsp), %r11
movq 0x1180(%rsp), %rax
movq 0x4e80(%rsp), %r10
subq $0x8, %rsp
movq 0x70(%rsp), %rcx
movq 0x80(%rsp), %r8
movq 0x40(%rsp), %r9
pushq 0x130(%rsp)
pushq 0x68(%rsp)
pushq %r10
pushq %rax
pushq %r11
pushq %r13
pushq %rbx
pushq 0x28(%rbp)
pushq 0x20(%rbp)
pushq 0x18(%rbp)
movl 0x10(%rbp), %eax
pushq %rax
pushq 0xb0(%rsp)
pushq 0xb0(%rsp)
callq 0x14ec20b
addq $0x70, %rsp
jmp 0x14e9839
movq %r12, 0x130(%rsp)
movq 0x40(%rsp), %rax
movq 0x40(%rax), %rcx
movq %rcx, 0x120(%rsp)
movl 0x3c(%rax), %ecx
movq 0xd0(%r15), %rax
movl $0x1c00, %edx # imm = 0x1C00
leaq 0x2564(%rsp), %rsi
leaq -0x324(%rsi), %rdi
movq %rdi, -0x124(%rsi)
leaq -0xe4(%rsi), %rdi
movq %rdi, -0x64(%rsi)
movq $0x0, -0x8(%rsi)
movl $0x0, (%rsi)
addq $0x380, %rsi # imm = 0x380
addq $-0x380, %rdx # imm = 0xFC80
jne 0x14e90c0
imulq $0x38, %rcx, %rcx
leaq 0x3e00(%rsp), %rsi
leaq 0x2200(%rsp), %rdx
movq %rdx, (%rsi)
movq (%rax,%rcx), %rdx
movq %rdx, 0x30(%rsp)
movl 0x10(%rax,%rcx), %eax
movq %rax, 0xf8(%rsp)
movl $0x280, %esi # imm = 0x280
xorl %ecx, %ecx
movq 0x120(%rsp), %rdi
cmpq $0x8, %rcx
movq %rdi, 0xd0(%rsp)
jae 0x14e970c
leaq 0x1(%rcx), %rax
movq %rax, 0x138(%rsp)
movq 0x3e00(%rsp), %r9
movq %rcx, 0x140(%rsp)
imulq $0x380, %rcx, %rax # imm = 0x380
leaq (%r9,%rax), %r8
leaq (%r9,%rax), %r10
addq $0x40, %r10
movb $0x1, 0x318(%r10)
movl $0xffffffff, 0x308(%r10) # imm = 0xFFFFFFFF
movl (%rdi), %ecx
imulq 0xf8(%rsp), %rcx
movq 0x30(%rsp), %rdx
vmovups (%rdx,%rcx), %xmm0
vmovaps %xmm0, -0x40(%r10)
vmovss 0x14(%rdi), %xmm0
vmovss %xmm0, 0x30c(%r10)
vmovss 0x18(%rdi), %xmm0
vmovss %xmm0, 0x314(%r10)
movq %r10, 0x90(%rsp)
movl $0x0, 0x310(%r10)
leaq 0x240(%r9,%rax), %rcx
movq %rcx, 0x70(%rsp)
addq %r9, %rax
addq $0x280, %rax # imm = 0x280
movq %rax, 0x118(%rsp)
movq %rsi, 0x148(%rsp)
addq %rsi, %r9
movq %r9, 0x110(%rsp)
movq $0x0, 0x98(%rsp)
movl $0xffffffff, %r9d # imm = 0xFFFFFFFF
movl $0xffffffff, %r10d # imm = 0xFFFFFFFF
movl $0xffffffff, %r15d # imm = 0xFFFFFFFF
xorl %r12d, %r12d
movq %rdi, %rdx
movq %r8, 0xa0(%rsp)
movslq 0x8(%rdx), %rax
movslq 0x4(%rdx), %rcx
movq %rdx, %rdi
movq %rcx, %rdx
shlq $0x5, %rdx
vmovss 0x18(%rdi), %xmm0
vmaxss 0x350(%r8), %xmm0, %xmm0
movl 0x10(%rdi), %esi
movl %esi, 0x104(%rsp)
vmovss %xmm0, 0x350(%r8)
movl (%rdi,%rdx), %esi
cmpl %r15d, %esi
movq %r12, 0x60(%rsp)
cmovbl %r12d, %r9d
movl %r9d, 0x8c(%rsp)
cmovbl 0x98(%rsp), %r10d
movl %r10d, 0x88(%rsp)
leaq (%rdi,%rdx), %r12
cmovbl %esi, %r15d
movl %r15d, 0x84(%rsp)
cmpl %eax, %ecx
jne 0x14e92a4
xorl %r14d, %r14d
jmp 0x14e9372
shlq $0x5, %rax
addq %rax, %rdi
movq %rdi, 0xe0(%rsp)
xorl %r14d, %r14d
movq 0x60(%rsp), %rdx
movq %r12, %r15
movl (%r12), %eax
imulq 0xf8(%rsp), %rax
movq 0x30(%rsp), %rcx
vmovups (%rcx,%rax), %xmm0
leal (%rdx,%r14), %eax
incl %eax
cmpl $0x21, %eax
jae 0x14e9318
leal (%rdx,%r14), %eax
movq 0xa0(%rsp), %rcx
movq 0x240(%rcx), %rcx
movl %eax, %eax
shlq $0x4, %rax
vmovaps %xmm0, (%rcx,%rax)
incl %r14d
movslq 0x4(%r15), %r12
shlq $0x5, %r12
addq %r15, %r12
cmpq 0xe0(%rsp), %r12
jne 0x14e92bb
jmp 0x14e936a
movq 0x90(%rsp), %rax
movq 0x70(%rsp), %rcx
cmpq %rax, (%rcx)
jne 0x14e92e0
vmovaps %xmm0, 0x150(%rsp)
movl $0x800, %edi # imm = 0x800
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, %rdi
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdx
callq 0x217915
movq 0x60(%rsp), %rdx
vmovaps 0x150(%rsp), %xmm0
jmp 0x14e92e0
addl %r14d, %edx
movq %rdx, 0x60(%rsp)
movq 0x98(%rsp), %r8
leal 0x1(%r8), %r15d
cmpl $0x11, %r15d
jae 0x14e941e
movq 0xa0(%rsp), %rdx
movq 0x300(%rdx), %rax
movl %r8d, %ecx
movl %r14d, (%rax,%rcx,8)
movl 0x104(%rsp), %esi
movl %esi, 0x4(%rax,%rcx,8)
cmpl $0x2, %r14d
sete %al
andb %al, 0x358(%rdx)
movslq 0xc(%r12), %rax
testq %rax, %rax
je 0x14e948b
shlq $0x5, %rax
addq %rax, %r12
movq %r15, 0x98(%rsp)
movq %r12, %rdx
movl 0x2c(%rsp), %r14d
movq 0xd0(%rsp), %rdi
movq 0xa0(%rsp), %r8
cmpq %rdi, %rdx
movq 0x128(%rsp), %r11
movl 0x8c(%rsp), %r9d
movl 0x88(%rsp), %r10d
movl 0x84(%rsp), %r15d
movq 0x60(%rsp), %r12
jne 0x14e922d
jmp 0x14e969d
movq 0xa0(%rsp), %rax
movq 0x118(%rsp), %rcx
cmpq %rcx, 0x300(%rax)
jne 0x14e9388
movl $0x200, %edi # imm = 0x200
callq 0x6a450
movq 0xa0(%rsp), %rcx
movq %rax, 0x300(%rcx)
xorl %eax, %eax
movq 0x98(%rsp), %r8
movq 0xa0(%rsp), %rcx
movq 0x300(%rcx), %rcx
movq 0x110(%rsp), %rdx
movq (%rdx,%rax,8), %rdx
movq %rdx, (%rcx,%rax,8)
incq %rax
cmpq $0x10, %rax
jne 0x14e945e
jmp 0x14e9388
movl (%r12), %eax
movl 0x84(%rsp), %edx
cmpl %edx, %eax
movl 0x8c(%rsp), %ecx
movq 0x60(%rsp), %rdi
cmovbl %edi, %ecx
movl %ecx, 0x8c(%rsp)
movl 0x88(%rsp), %ecx
cmovbl %r15d, %ecx
movl %ecx, 0x88(%rsp)
cmovbl %eax, %edx
movl %edx, 0x84(%rsp)
movq 0xa0(%rsp), %rsi
movl %r15d, 0x348(%rsi)
addl $0x2, %r8d
movq %r8, 0x98(%rsp)
cmpl $0x11, %r8d
jae 0x14e95ae
movq 0x300(%rsi), %rax
movabsq $0x7f80000000000002, %rcx # imm = 0x7F80000000000002
movq %rcx, (%rax,%r15,8)
movl (%r12), %eax
imulq 0xf8(%rsp), %rax
movq 0x30(%rsp), %rcx
vmovups (%rcx,%rax), %xmm0
leal 0x1(%rdi), %r14d
cmpl $0x21, %r14d
jae 0x14e9608
movq 0x70(%rsp), %rax
movq (%rax), %rax
movl %edi, %ecx
shlq $0x4, %rcx
vmovaps %xmm0, (%rax,%rcx)
addl $0x2, %edi
movq %rdi, 0x60(%rsp)
cmpl $0x21, %edi
jae 0x14e965e
movq 0xa0(%rsp), %r8
movq 0x240(%r8), %rax
shlq $0x4, %r14
vmovaps (%r8), %xmm0
vmovaps %xmm0, (%rax,%r14)
movq 0xd0(%rsp), %rdi
movl 0xc(%rdi), %eax
testl %eax, %eax
je 0x14e95a1
movq %rdi, %rdx
cltq
shlq $0x5, %rax
leaq (%rdx,%rax), %rcx
movslq 0x4(%rdx,%rax), %rax
shlq $0x5, %rax
leaq (%rcx,%rax), %rdx
movl 0xc(%rax,%rcx), %eax
testl %eax, %eax
jne 0x14e957d
jmp 0x14e93d6
movq %rdi, %rdx
movl 0x2c(%rsp), %r14d
jmp 0x14e93eb
movq 0x118(%rsp), %rax
cmpq %rax, 0x300(%rsi)
jne 0x14e94ef
movl $0x200, %edi # imm = 0x200
callq 0x6a450
movq 0xa0(%rsp), %rsi
movq %rax, 0x300(%rsi)
xorl %eax, %eax
movq 0x60(%rsp), %rdi
movq 0x300(%rsi), %rcx
movq 0x110(%rsp), %rdx
movq (%rdx,%rax,8), %rdx
movq %rdx, (%rcx,%rax,8)
incq %rax
cmpq $0x10, %rax
jne 0x14e95e3
jmp 0x14e94ef
movq 0x90(%rsp), %rax
movq 0x70(%rsp), %rcx
cmpq %rax, (%rcx)
jne 0x14e9529
vmovaps %xmm0, 0xe0(%rsp)
movl $0x800, %edi # imm = 0x800
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, %rdi
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdx
callq 0x217934
movq 0x60(%rsp), %rdi
vmovaps 0xe0(%rsp), %xmm0
jmp 0x14e9529
movq 0x90(%rsp), %rax
movq 0x70(%rsp), %rcx
cmpq %rax, (%rcx)
jne 0x14e954d
movl $0x800, %edi # imm = 0x800
movl $0x10, %esi
callq 0x1ee60ac
movq %rax, %rdi
movq 0x70(%rsp), %rsi
movq 0x90(%rsp), %rdx
callq 0x217953
jmp 0x14e954d
movl %r12d, 0x344(%r8)
movq 0x98(%rsp), %rax
movl %eax, 0x340(%r8)
movl %r15d, 0x364(%r8)
movl %r10d, 0x35c(%r8)
movl %r9d, 0x360(%r8)
cmpq $0xe, 0x140(%rsp)
movq 0x148(%rsp), %rsi
movq 0x138(%rsp), %rdx
ja 0x14e9788
movslq 0x4(%rdi), %rax
shlq $0x5, %rax
addq %rax, %rdi
addq $0x380, %rsi # imm = 0x380
movq %rdx, %rcx
cmpq 0x120(%rsp), %rdi
jne 0x14e9132
jmp 0x14e9788
leaq 0x2200(%rsp), %rax
cmpq %rax, 0x3e00(%rsp)
jne 0x14e9144
movq %rcx, %r12
movq %rsi, %r15
movl $0x3840, %edi # imm = 0x3840
movl $0x40, %esi
callq 0x1ee60ac
movq %rax, %rdi
leaq 0x3e00(%rsp), %rsi
callq 0x2178bc
xorl %r14d, %r14d
leaq (%rsp,%r14), %rsi
addq $0x2200, %rsi # imm = 0x2200
movq 0x3e00(%rsp), %rdi
addq %r14, %rdi
callq 0xd56784
addq $0x380, %r14 # imm = 0x380
cmpq $0x1c00, %r14 # imm = 0x1C00
movq %r15, %rsi
movq 0xd0(%rsp), %rdi
movq %r12, %rcx
jne 0x14e974a
jmp 0x14e9144
movl %edx, 0x3e40(%rsp)
movq 0x40(%rsp), %rcx
leaq 0x1c(%rcx), %rax
xorl %edx, %edx
testb $0x10, 0x2c(%rcx)
cmovneq %rax, %rdx
movl 0x48(%rcx), %esi
movq 0x108(%rsp), %r15
movq %r15, %rax
movq %r15, %r10
testq %r15, %r15
je 0x14e97d0
movq 0x21c0(%rsp), %r15
movq 0x1180(%rsp), %rax
movq 0x4e80(%rsp), %r10
subq $0x8, %rsp
leaq 0x2208(%rsp), %rdi
movq 0x70(%rsp), %rcx
movq 0x80(%rsp), %r8
movq 0x40(%rsp), %r9
pushq %r11
pushq 0x68(%rsp)
pushq %r10
pushq %rax
pushq %r15
pushq %r13
pushq %rbx
pushq 0x28(%rbp)
pushq 0x20(%rbp)
pushq 0x18(%rbp)
movl 0x10(%rbp), %eax
pushq %rax
pushq 0xb0(%rsp)
pushq 0xb0(%rsp)
callq 0x14ec5d8
addq $0x70, %rsp
movq 0x40(%rbp), %r15
movq 0x130(%rsp), %r12
leaq 0x2200(%rsp), %rdi
callq 0xd426ae
cmpl $0x8, %r14d
jb 0x14e999d
movq 0x40(%rsp), %rcx
movzwl 0x18(%rcx), %eax
vcvtsi2ss %eax, %xmm1, %xmm0
movzwl 0x10(%rcx), %eax
vcvtsi2ss %eax, %xmm1, %xmm1
vmovss 0xa34c80(%rip), %xmm2 # 0x1f1e4e0
vmulss %xmm2, %xmm0, %xmm5
vmulss %xmm2, %xmm1, %xmm1
movzwl 0x16(%rcx), %eax
vcvtsi2ss %eax, %xmm3, %xmm0
vmulss %xmm2, %xmm0, %xmm7
movzwl 0xe(%rcx), %eax
vcvtsi2ss %eax, %xmm3, %xmm0
movzwl 0x14(%rcx), %eax
vcvtsi2ss %eax, %xmm3, %xmm3
movzwl 0xc(%rcx), %eax
vcvtsi2ss %eax, %xmm4, %xmm4
vmulss %xmm2, %xmm0, %xmm6
vmulss %xmm2, %xmm3, %xmm8
vmulss %xmm2, %xmm4, %xmm3
movzwl 0x12(%rcx), %eax
vcvtsi2ss %eax, %xmm9, %xmm0
vmulss %xmm2, %xmm0, %xmm4
movzwl 0xa(%rcx), %eax
vcvtsi2ss %eax, %xmm9, %xmm0
vmulss %xmm2, %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm0
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vshufps $0x0, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vshufps $0x0, %xmm6, %xmm6, %xmm3 # xmm3 = xmm6[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vinsertf128 $0x1, %xmm5, %ymm5, %ymm5
vshufps $0x0, %xmm8, %xmm8, %xmm6 # xmm6 = xmm8[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vinsertf128 $0x1, %xmm7, %ymm7, %ymm7
cmpl $0x1, %r12d
adcl $0x0, %r12d
shlq $0x5, %r12
xorl %eax, %eax
vbroadcastss 0xa02df4(%rip), %ymm8 # 0x1eec714
vmovaps (%rbx,%rax), %ymm9
vmovaps (%r13,%rax), %ymm10
vsubps %ymm9, %ymm8, %ymm11
vsubps %ymm10, %ymm8, %ymm12
vmulps %ymm1, %ymm10, %ymm13
vmulps %ymm0, %ymm12, %ymm14
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm3, %ymm10, %ymm14
vmulps %ymm2, %ymm12, %ymm15
vaddps %ymm15, %ymm14, %ymm14
vmulps %ymm14, %ymm9, %ymm14
vmulps %ymm13, %ymm11, %ymm13
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm5, %ymm10, %ymm14
vmulps %ymm4, %ymm12, %ymm15
vaddps %ymm15, %ymm14, %ymm14
vmulps %ymm7, %ymm10, %ymm10
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm12, %ymm10, %ymm10
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm14, %ymm11, %ymm10
vaddps %ymm9, %ymm10, %ymm9
vmovaps %ymm13, (%rbx,%rax)
vmovaps %ymm9, (%r13,%rax)
addq $0x20, %rax
cmpq %rax, %r12
jne 0x14e9920
movq 0x58(%r15), %rax
testq %rax, %rax
jne 0x14e9b52
andl $-0x8, %r14d
movq 0xd8(%rsp), %rax
cmpl %r14d, %eax
jae 0x14e9af5
leal -0x1(%rax), %ecx
movl %eax, %r8d
movl %r14d, %edx
subq %r8, %rdx
leaq 0x7(%rdx), %rax
andq $-0x8, %rax
decq %rdx
vmovq %rdx, %xmm0
vpshufd $0x44, %xmm0, %xmm0 # xmm0 = xmm0[0,1,0,1]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm11
vbroadcastss (%rbx,%rcx,4), %ymm0
vbroadcastss (%r13,%rcx,4), %ymm1
movq 0x18(%rbp), %rsi
vbroadcastss (%rsi,%rcx,4), %ymm2
movq 0x20(%rbp), %rdi
vbroadcastss (%rdi,%rcx,4), %ymm3
movq 0x28(%rbp), %r9
vbroadcastss (%r9,%rcx,4), %ymm4
leaq (%rbx,%r8,4), %rcx
leaq (%r13,%r8,4), %rdx
leaq (%rsi,%r8,4), %rsi
leaq (%rdi,%r8,4), %rdi
leaq (%r9,%r8,4), %r8
xorl %r9d, %r9d
vmovaps 0xa3840f(%rip), %ymm5 # 0x1f21e40
vmovaps 0xa3e607(%rip), %ymm6 # 0x1f28040
vextractf128 $0x1, %ymm11, %xmm8
vmovddup 0xa38441(%rip), %xmm7 # xmm7 = mem[0,0]
vxorps %xmm7, %xmm8, %xmm8
vxorps %xmm7, %xmm11, %xmm9
vpcmpeqd %xmm10, %xmm10, %xmm10
vxorps %xmm7, %xmm11, %xmm11
vmovq %r9, %xmm12
vpshufd $0x44, %xmm12, %xmm12 # xmm12 = xmm12[0,1,0,1]
vinsertf128 $0x1, %xmm12, %ymm12, %ymm12
vorps %ymm5, %ymm12, %ymm13
vorps %ymm6, %ymm12, %ymm12
vextractf128 $0x1, %ymm12, %xmm14
vxorps %xmm7, %xmm14, %xmm14
vpcmpgtq %xmm8, %xmm14, %xmm14
vxorps %xmm7, %xmm12, %xmm12
vpcmpgtq %xmm9, %xmm12, %xmm12
vpackssdw %xmm14, %xmm12, %xmm12
vpxor %xmm10, %xmm12, %xmm12
vextractf128 $0x1, %ymm13, %xmm14
vxorps %xmm7, %xmm14, %xmm14
vpcmpgtq %xmm8, %xmm14, %xmm14
vxorps %xmm7, %xmm13, %xmm13
vpcmpgtq %xmm11, %xmm13, %xmm13
vpackssdw %xmm14, %xmm13, %xmm13
vpxor %xmm10, %xmm13, %xmm13
vpackssdw %xmm12, %xmm13, %xmm12
vpmovsxwd %xmm12, %xmm13
vpunpckhwd %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[4,4,5,5,6,6,7,7]
vinsertf128 $0x1, %xmm12, %ymm13, %ymm12
vmaskmovps %ymm0, %ymm12, (%rcx,%r9,4)
vmaskmovps %ymm1, %ymm12, (%rdx,%r9,4)
vmaskmovps %ymm2, %ymm12, (%rsi,%r9,4)
vmaskmovps %ymm3, %ymm12, (%rdi,%r9,4)
vmaskmovps %ymm4, %ymm12, (%r8,%r9,4)
addq $0x8, %r9
cmpq %r9, %rax
jne 0x14e9a58
leaq 0x3e80(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14e9b11
vzeroupper
callq 0x1ee612d
leaq 0x180(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14e9b2d
vzeroupper
callq 0x1ee612d
leaq 0x11c0(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14e8b38
vzeroupper
callq 0x1ee612d
jmp 0x14e8b38
movq 0x18(%r15), %rcx
leaq 0x2200(%rsp), %rdi
movq %rcx, (%rdi)
movq %r15, 0x8(%rdi)
movq 0x40(%rsp), %rdx
movl 0x34(%rdx), %ecx
movl %ecx, 0x10(%rdi)
movl 0x3c(%rdx), %ecx
movl %ecx, 0x14(%rdi)
movq %rbx, 0x18(%rdi)
movq %r13, 0x20(%rdi)
movq 0x21c0(%rsp), %rcx
movq %rcx, 0x28(%rdi)
movq 0x1180(%rsp), %rcx
movq %rcx, 0x30(%rdi)
movq 0x4e80(%rsp), %rcx
movq %rcx, 0x38(%rdi)
movq 0x18(%rbp), %rcx
movq %rcx, 0x40(%rdi)
movq 0x20(%rbp), %rcx
movq %rcx, 0x48(%rdi)
movq 0x28(%rbp), %rcx
movq %rcx, 0x50(%rdi)
movq 0xd8(%rsp), %rcx
movl %ecx, 0x58(%rdi)
vzeroupper
callq *%rax
movl 0x2c(%rsp), %r14d
jmp 0x14e99aa
movq 0x78(%rsp), %rcx
cmpl 0x68(%rsp), %ecx
jb 0x14e9e7f
movl %eax, %edx
vxorps %xmm0, %xmm0, %xmm0
vcvtsi2ss %rdx, %xmm0, %xmm0
vrcpss %xmm0, %xmm0, %xmm1
vmulss %xmm0, %xmm1, %xmm0
vmovss 0xa073fa(%rip), %xmm2 # 0x1ef0ff8
vsubss %xmm0, %xmm2, %xmm0
vmulss %xmm0, %xmm1, %xmm3
movq 0x50(%rsp), %rcx
leal -0x2(%rcx,%rcx), %ecx
movl %ecx, 0x30(%rsp)
movq 0x68(%rsp), %rcx
leal 0x1(%rcx), %edx
movl 0xa0(%rsp), %r11d
cmpl %edx, %r11d
cmoval %r11d, %edx
subl %ecx, %edx
leal 0x7(%rdx), %r11d
vmovd %ecx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0xa070af(%rip), %xmm0, %xmm1 # 0x1ef0cf0
vpaddd 0xa37257(%rip), %xmm0, %xmm0 # 0x1f20ea0
decl %edx
vinsertf128 $0x1, %xmm0, %ymm1, %ymm0
vmovd %edx, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovd %eax, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
andl $-0x8, %r11d
xorl %r14d, %r14d
vmovaps 0xa70c97(%rip), %ymm4 # 0x1f5a920
vbroadcastss 0xa28a72(%rip), %xmm5 # 0x1f12704
vextractf128 $0x1, %ymm2, %xmm6
vbroadcastss 0xa70c6f(%rip), %xmm7 # 0x1f5a910
vmovd %r14d, %xmm8
vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vinsertf128 $0x1, %xmm8, %ymm8, %ymm8
vorps %ymm4, %ymm8, %ymm8
vpminud %xmm1, %xmm8, %xmm9
vpcmpeqd %xmm9, %xmm8, %xmm10
vpaddd %xmm0, %xmm0, %xmm11
vextractf128 $0x1, %ymm0, %xmm9
vpaddd %xmm9, %xmm9, %xmm12
vpor %xmm5, %xmm12, %xmm12
vpmulld %xmm6, %xmm12, %xmm12
vpor %xmm5, %xmm11, %xmm11
vpmulld %xmm2, %xmm11, %xmm11
vinsertf128 $0x1, %xmm12, %ymm11, %ymm11
vmovd %xmm10, %eax
testb $0x1, %al
je 0x14e9cfc
vmovd %xmm11, %eax
cltd
idivl 0x30(%rsp)
vmovd %eax, %xmm10
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0x4, %xmm12, %eax
testb $0x1, %al
je 0x14e9d27
vpextrd $0x1, %xmm11, %eax
cltd
idivl 0x30(%rsp)
vpinsrd $0x1, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0x8, %xmm12, %eax
testb $0x1, %al
je 0x14e9d52
vpextrd $0x2, %xmm11, %eax
cltd
idivl 0x30(%rsp)
vpinsrd $0x2, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0xc, %xmm12, %eax
testb $0x1, %al
je 0x14e9d7d
vpextrd $0x3, %xmm11, %eax
cltd
idivl 0x30(%rsp)
vpinsrd $0x3, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vextractf128 $0x1, %ymm1, %xmm12
vextractf128 $0x1, %ymm8, %xmm13
vpminud %xmm12, %xmm13, %xmm12
vpcmpeqd %xmm12, %xmm13, %xmm12
vpextrb $0x0, %xmm12, %eax
testb $0x1, %al
je 0x14e9dbe
vextractf128 $0x1, %ymm11, %xmm13
vmovd %xmm13, %eax
cltd
idivl 0x30(%rsp)
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x0, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0x4, %xmm12, %eax
testb $0x1, %al
je 0x14e9deb
vextractf128 $0x1, %ymm11, %xmm13
vextractps $0x1, %xmm13, %eax
cltd
idivl 0x30(%rsp)
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x1, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0x8, %xmm12, %eax
testb $0x1, %al
je 0x14e9e18
vextractf128 $0x1, %ymm11, %xmm13
vextractps $0x2, %xmm13, %eax
cltd
idivl 0x30(%rsp)
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x2, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0xc, %xmm12, %eax
testb $0x1, %al
je 0x14e9e45
vextractf128 $0x1, %ymm11, %xmm11
vextractps $0x3, %xmm11, %eax
cltd
idivl 0x30(%rsp)
vextractf128 $0x1, %ymm10, %xmm11
vpinsrd $0x3, %eax, %xmm11, %xmm11
vinsertf128 $0x1, %xmm11, %ymm10, %ymm10
vpminud %xmm1, %xmm8, %xmm11
vpcmpeqd %xmm11, %xmm8, %xmm8
vinsertf128 $0x1, %xmm12, %ymm8, %ymm8
vcvtdq2ps %ymm10, %ymm10
vmulps %ymm3, %ymm10, %ymm10
vmaskmovps %ymm10, %ymm8, (%rbx,%r14,4)
vpaddd %xmm7, %xmm9, %xmm8
vpaddd %xmm7, %xmm0, %xmm0
vinsertf128 $0x1, %xmm8, %ymm0, %ymm0
addq $0x8, %r14
cmpl %r14d, %r11d
jne 0x14e9ca1
movq 0x50(%rsp), %r11
leal -0x1(%r11), %eax
cmpl 0x78(%rsp), %eax
jne 0x14e8e53
movl 0xe0(%rsp), %eax
movl $0x3f800000, (%rbx,%rax,4) # imm = 0x3F800000
jmp 0x14e8e53
movl %esi, %eax
imull 0x58(%rsp), %eax
leaq (%rbx,%rax,4), %r8
movq 0x78(%rsp), %rax
cmpl 0x68(%rsp), %eax
jb 0x14ea14f
movl %r10d, %eax
vxorps %xmm1, %xmm1, %xmm1
vcvtsi2ss %rax, %xmm1, %xmm0
vrcpss %xmm0, %xmm0, %xmm1
vmulss %xmm0, %xmm1, %xmm0
vmovss 0xa0711d(%rip), %xmm2 # 0x1ef0ff8
vsubss %xmm0, %xmm2, %xmm0
vmulss %xmm0, %xmm1, %xmm3
movq 0x50(%rsp), %rax
leal -0x2(%rax,%rax), %r12d
movq 0x68(%rsp), %rcx
leal 0x1(%rcx), %eax
movl 0xa0(%rsp), %edx
cmpl %eax, %edx
cmoval %edx, %eax
subl %ecx, %eax
leal 0x7(%rax), %r11d
vmovd %ecx, %xmm0
vpshufd $0x0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vpaddd 0xa06dd8(%rip), %xmm0, %xmm1 # 0x1ef0cf0
vpaddd 0xa36f80(%rip), %xmm0, %xmm0 # 0x1f20ea0
decl %eax
vinsertf128 $0x1, %xmm0, %ymm1, %ymm0
vmovd %eax, %xmm1
vpshufd $0x0, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmovd %r10d, %xmm2
vpshufd $0x0, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vinsertf128 $0x1, %xmm2, %ymm2, %ymm2
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
andl $-0x8, %r11d
xorl %r10d, %r10d
vmovaps 0xa709bf(%rip), %ymm4 # 0x1f5a920
vbroadcastss 0xa2879a(%rip), %xmm5 # 0x1f12704
vextractf128 $0x1, %ymm2, %xmm6
vbroadcastss 0xa70997(%rip), %xmm7 # 0x1f5a910
vmovd %r10d, %xmm8
vpshufd $0x0, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vinsertf128 $0x1, %xmm8, %ymm8, %ymm8
vorps %ymm4, %ymm8, %ymm8
vpminud %xmm1, %xmm8, %xmm9
vpcmpeqd %xmm9, %xmm8, %xmm10
vpaddd %xmm0, %xmm0, %xmm11
vextractf128 $0x1, %ymm0, %xmm9
vpaddd %xmm9, %xmm9, %xmm12
vpor %xmm5, %xmm12, %xmm12
vpmulld %xmm6, %xmm12, %xmm12
vpor %xmm5, %xmm11, %xmm11
vpmulld %xmm2, %xmm11, %xmm11
vinsertf128 $0x1, %xmm12, %ymm11, %ymm11
vmovd %xmm10, %eax
testb $0x1, %al
je 0x14e9fd3
vmovd %xmm11, %eax
cltd
idivl %r12d
vmovd %eax, %xmm10
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0x4, %xmm12, %eax
testb $0x1, %al
je 0x14e9ffd
vpextrd $0x1, %xmm11, %eax
cltd
idivl %r12d
vpinsrd $0x1, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0x8, %xmm12, %eax
testb $0x1, %al
je 0x14ea027
vpextrd $0x2, %xmm11, %eax
cltd
idivl %r12d
vpinsrd $0x2, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vpminud %xmm1, %xmm8, %xmm12
vpcmpeqd %xmm12, %xmm8, %xmm12
vpextrb $0xc, %xmm12, %eax
testb $0x1, %al
je 0x14ea051
vpextrd $0x3, %xmm11, %eax
cltd
idivl %r12d
vpinsrd $0x3, %eax, %xmm10, %xmm12
vblendps $0xf, %ymm12, %ymm10, %ymm10 # ymm10 = ymm12[0,1,2,3],ymm10[4,5,6,7]
vextractf128 $0x1, %ymm1, %xmm12
vextractf128 $0x1, %ymm8, %xmm13
vpminud %xmm12, %xmm13, %xmm12
vpcmpeqd %xmm12, %xmm13, %xmm12
vpextrb $0x0, %xmm12, %eax
testb $0x1, %al
je 0x14ea091
vextractf128 $0x1, %ymm11, %xmm13
vmovd %xmm13, %eax
cltd
idivl %r12d
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x0, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0x4, %xmm12, %eax
testb $0x1, %al
je 0x14ea0bd
vextractf128 $0x1, %ymm11, %xmm13
vextractps $0x1, %xmm13, %eax
cltd
idivl %r12d
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x1, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0x8, %xmm12, %eax
testb $0x1, %al
je 0x14ea0e9
vextractf128 $0x1, %ymm11, %xmm13
vextractps $0x2, %xmm13, %eax
cltd
idivl %r12d
vextractf128 $0x1, %ymm10, %xmm13
vpinsrd $0x2, %eax, %xmm13, %xmm13
vinsertf128 $0x1, %xmm13, %ymm10, %ymm10
vpextrb $0xc, %xmm12, %eax
testb $0x1, %al
je 0x14ea115
vextractf128 $0x1, %ymm11, %xmm11
vextractps $0x3, %xmm11, %eax
cltd
idivl %r12d
vextractf128 $0x1, %ymm10, %xmm11
vpinsrd $0x3, %eax, %xmm11, %xmm11
vinsertf128 $0x1, %xmm11, %ymm10, %ymm10
vpminud %xmm1, %xmm8, %xmm11
vpcmpeqd %xmm11, %xmm8, %xmm8
vinsertf128 $0x1, %xmm12, %ymm8, %ymm8
vcvtdq2ps %ymm10, %ymm10
vmulps %ymm3, %ymm10, %ymm10
vmaskmovps %ymm10, %ymm8, (%r8,%r10,4)
vpaddd %xmm7, %xmm9, %xmm8
vpaddd %xmm7, %xmm0, %xmm0
vinsertf128 $0x1, %xmm8, %ymm0, %ymm0
addq $0x8, %r10
cmpl %r10d, %r11d
jne 0x14e9f79
movq 0x50(%rsp), %r11
leal -0x1(%r11), %eax
cmpl 0x78(%rsp), %eax
movl 0x10(%rbp), %ecx
jne 0x14e8e71
movl 0xe0(%rsp), %eax
movl $0x3f800000, (%r8,%rax,4) # imm = 0x3F800000
jmp 0x14e8e71
movl 0xe0(%rsp), %eax
leaq (,%rax,4), %r8
addq %r13, %r8
movq 0x38(%rsp), %rax
cmpl %eax, 0x48(%rsp)
jb 0x14ea20b
movl %r9d, %eax
vxorps %xmm1, %xmm1, %xmm1
vcvtsi2ss %rax, %xmm1, %xmm0
vrcpss %xmm0, %xmm0, %xmm1
vmulss %xmm0, %xmm1, %xmm0
vmovss 0xa06e46(%rip), %xmm2 # 0x1ef0ff8
vsubss %xmm0, %xmm2, %xmm0
vmulss %xmm0, %xmm1, %xmm0
movl 0x10(%rbp), %eax
leal -0x2(,%rax,2), %r14d
movq 0x38(%rsp), %rax
leal 0x1(,%rax,2), %ecx
imull %r9d, %ecx
addl %r9d, %r9d
xorl %r11d, %r11d
movl %eax, %r12d
movl %ecx, %eax
cltd
idivl %r14d
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %eax, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm1
movl %r11d, %eax
vmovss %xmm1, (%r8,%rax,4)
incl %r12d
addl 0x58(%rsp), %r11d
addl %r9d, %ecx
cmpl 0x48(%rsp), %r12d
jbe 0x14ea1de
movl 0x10(%rbp), %ecx
leal -0x1(%rcx), %eax
cmpl 0x48(%rsp), %eax
movq 0x50(%rsp), %r11
jne 0x14e8e8e
movl %esi, %eax
imull 0x58(%rsp), %eax
movl $0x3f800000, (%r8,%rax,4) # imm = 0x3F800000
jmp 0x14e8e8e
jmp 0x14ea23f
jmp 0x14ea23f
jmp 0x14ea2e2
jmp 0x14ea23f
movq %rax, %rbx
movq 0x3e00(%rsp), %r15
leaq 0x2200(%rsp), %rax
cmpq %rax, %r15
setne %al
testq %r15, %r15
setne %cl
testb %cl, %al
je 0x14ea297
leaq -0x40(%r15), %r14
movq -0x8(%r15), %rax
testq %rax, %rax
je 0x14ea28f
imulq $0x380, %rax, %r12 # imm = 0x380
addq $-0x380, %r15 # imm = 0xFC80
leaq (%r15,%r12), %rdi
callq 0xd46ddc
addq $-0x380, %r12 # imm = 0xFC80
jne 0x14ea27d
movq %r14, %rdi
callq 0x1ee612d
movl $0x1880, %r14d # imm = 0x1880
movq $-0x380, %r15 # imm = 0xFC80
leaq (%rsp,%r14), %rdi
addq $0x2200, %rdi # imm = 0x2200
callq 0xd46ddc
addq %r15, %r14
cmpq %r15, %r14
jne 0x14ea2a4
jmp 0x14ea2e5
jmp 0x14ea33c
movq %rax, %rbx
leaq 0x2200(%rsp), %rdi
callq 0xd426ae
jmp 0x14ea2e5
movq %rax, %rbx
jmp 0x14ea2fe
movq %rax, %rbx
jmp 0x14ea317
jmp 0x14ea33c
jmp 0x14ea33c
jmp 0x14ea33c
movq %rax, %rbx
leaq 0x3e80(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14ea2fe
callq 0x1ee612d
leaq 0x180(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14ea317
callq 0x1ee612d
leaq 0x11c0(%rsp), %rax
movq 0x1000(%rax), %rdi
cmpq %rax, %rdi
je 0x14ea330
callq 0x1ee612d
movq %rbx, %rdi
callq 0x6a600
jmp 0x14ea33c
jmp 0x14ea33c
movq %rax, %rdi
callq 0x8d6de8
|
/embree[P]embree/kernels/subdiv/subdivpatch1base_eval.cpp
|
embree::Vec3<embree::vfloat_impl<8>> embree::avx::patchEval<embree::vfloat_impl<8>>(embree::SubdivPatch1Base const&, embree::vfloat_impl<8> const&, embree::vfloat_impl<8> const&)
|
Vec3<simdf> patchEval(const SubdivPatch1Base& patch, const simdf& uu, const simdf& vv)
{
if (likely(patch.type == SubdivPatch1Base::BEZIER_PATCH))
return ((BezierPatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::BSPLINE_PATCH))
return ((BSplinePatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::GREGORY_PATCH))
return ((DenseGregoryPatch3fa*)patch.patch_v)->eval(uu,vv);
else if (likely(patch.type == SubdivPatch1Base::BILINEAR_PATCH))
return ((BilinearPatch3fa*)patch.patch_v)->eval(uu,vv);
return Vec3<simdf>( zero );
}
|
movq %rdi, %rax
movzbl 0x2d(%rsi), %edi
cmpl $0x2, %edi
jne 0x14ecd9d
vbroadcastss 0x9ffd44(%rip), %ymm0 # 0x1eec714
vmovaps (%rdx), %ymm5
vsubps %ymm5, %ymm0, %ymm2
vmovaps (%rcx), %ymm8
vsubps %ymm8, %ymm0, %ymm4
vmulps %ymm2, %ymm2, %ymm0
vmulps %ymm0, %ymm2, %ymm1
vmulps %ymm4, %ymm4, %ymm0
vmulps %ymm0, %ymm4, %ymm0
vmulps %ymm2, %ymm5, %ymm6
vmulps %ymm6, %ymm2, %ymm2
vbroadcastss 0xa045ea(%rip), %ymm7 # 0x1ef0fec
vmulps %ymm7, %ymm2, %ymm3
vmulps %ymm4, %ymm8, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vmulps %ymm7, %ymm2, %ymm2
vmulps %ymm6, %ymm5, %ymm4
vmulps %ymm7, %ymm4, %ymm6
vmulps %ymm9, %ymm8, %ymm4
vmulps %ymm7, %ymm4, %ymm4
vmulps %ymm5, %ymm5, %ymm7
vmulps %ymm7, %ymm5, %ymm7
vmulps %ymm8, %ymm8, %ymm5
vmulps %ymm5, %ymm8, %ymm5
vbroadcastss 0x40(%rsi), %ymm8
vbroadcastss 0x50(%rsi), %ymm9
vbroadcastss 0x60(%rsi), %ymm10
vbroadcastss 0x70(%rsi), %ymm11
vmulps %ymm7, %ymm11, %ymm11
vmulps %ymm6, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm3, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vmulps %ymm1, %ymm8, %ymm8
vbroadcastss 0x80(%rsi), %ymm10
vbroadcastss 0x90(%rsi), %ymm11
vbroadcastss 0xa0(%rsi), %ymm12
vaddps %ymm9, %ymm8, %ymm8
vbroadcastss 0xb0(%rsi), %ymm9
vmulps %ymm7, %ymm9, %ymm9
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm9, %ymm12, %ymm9
vmulps %ymm3, %ymm11, %ymm11
vaddps %ymm9, %ymm11, %ymm9
vmulps %ymm1, %ymm10, %ymm10
vaddps %ymm9, %ymm10, %ymm9
vbroadcastss 0xc0(%rsi), %ymm10
vbroadcastss 0xd0(%rsi), %ymm11
vbroadcastss 0xe0(%rsi), %ymm12
vbroadcastss 0xf0(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm3, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm1, %ymm10, %ymm10
vbroadcastss 0x100(%rsi), %ymm12
vbroadcastss 0x110(%rsi), %ymm13
vbroadcastss 0x120(%rsi), %ymm14
vbroadcastss 0x130(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm3, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vmulps %ymm5, %ymm11, %ymm11
vmulps %ymm4, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm2, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vmulps %ymm0, %ymm8, %ymm8
vbroadcastss 0x44(%rsi), %ymm10
vbroadcastss 0x54(%rsi), %ymm11
vbroadcastss 0x64(%rsi), %ymm12
vaddps %ymm9, %ymm8, %ymm8
vbroadcastss 0x74(%rsi), %ymm9
vmulps %ymm7, %ymm9, %ymm9
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm9, %ymm12, %ymm9
vmulps %ymm3, %ymm11, %ymm11
vaddps %ymm9, %ymm11, %ymm9
vmulps %ymm1, %ymm10, %ymm10
vaddps %ymm9, %ymm10, %ymm9
vbroadcastss 0x84(%rsi), %ymm10
vbroadcastss 0x94(%rsi), %ymm11
vbroadcastss 0xa4(%rsi), %ymm12
vbroadcastss 0xb4(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm3, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm1, %ymm10, %ymm10
vbroadcastss 0xc4(%rsi), %ymm12
vbroadcastss 0xd4(%rsi), %ymm13
vbroadcastss 0xe4(%rsi), %ymm14
vbroadcastss 0xf4(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm3, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vbroadcastss 0x104(%rsi), %ymm12
vbroadcastss 0x114(%rsi), %ymm13
vbroadcastss 0x124(%rsi), %ymm14
vbroadcastss 0x134(%rsi), %ymm15
vmulps %ymm7, %ymm15, %ymm15
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm15, %ymm14, %ymm14
vmulps %ymm3, %ymm13, %ymm13
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm5, %ymm12, %ymm12
vmulps %ymm4, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm2, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm0, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vbroadcastss 0x48(%rsi), %ymm10
vbroadcastss 0x58(%rsi), %ymm11
vbroadcastss 0x68(%rsi), %ymm12
vbroadcastss 0x78(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm3, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm1, %ymm10, %ymm10
vbroadcastss 0x88(%rsi), %ymm12
vbroadcastss 0x98(%rsi), %ymm13
vbroadcastss 0xa8(%rsi), %ymm14
vbroadcastss 0xb8(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm3, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm1, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vbroadcastss 0xc8(%rsi), %ymm12
vbroadcastss 0xe8(%rsi), %ymm13
vbroadcastss 0xf8(%rsi), %ymm14
vmulps %ymm7, %ymm14, %ymm14
vmulps %ymm6, %ymm13, %ymm13
vaddps %ymm14, %ymm13, %ymm13
vbroadcastss 0xd8(%rsi), %ymm14
vmulps %ymm3, %ymm14, %ymm14
vaddps %ymm13, %ymm14, %ymm13
vmulps %ymm1, %ymm12, %ymm12
vbroadcastss 0x138(%rsi), %ymm14
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm7, %ymm14, %ymm7
vbroadcastss 0x128(%rsi), %ymm13
vmulps %ymm6, %ymm13, %ymm6
vaddps %ymm7, %ymm6, %ymm6
vbroadcastss 0x118(%rsi), %ymm7
vmulps %ymm7, %ymm3, %ymm3
vaddps %ymm6, %ymm3, %ymm3
vbroadcastss 0x108(%rsi), %ymm6
vmulps %ymm6, %ymm1, %ymm1
vaddps %ymm3, %ymm1, %ymm1
vmulps %ymm1, %ymm5, %ymm1
vmulps %ymm4, %ymm12, %ymm3
vaddps %ymm1, %ymm3, %ymm1
vmulps %ymm2, %ymm11, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm0, %ymm10, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vmovaps %ymm8, (%rax)
vmovaps %ymm9, 0x20(%rax)
vmovaps %ymm0, 0x40(%rax)
vzeroupper
retq
cmpl $0x1, %edi
jne 0x14ed1fc
vmovaps (%rdx), %ymm0
vbroadcastss 0x9ff961(%rip), %ymm7 # 0x1eec714
vsubps %ymm0, %ymm7, %ymm1
vmulps %ymm1, %ymm1, %ymm2
vmulps %ymm2, %ymm1, %ymm2
vbroadcastss 0x9ffdc4(%rip), %ymm4 # 0x1eecb8c
vmulps %ymm4, %ymm2, %ymm3
vmulps %ymm0, %ymm0, %ymm5
vmulps %ymm5, %ymm0, %ymm8
vaddps %ymm3, %ymm8, %ymm3
vmulps %ymm1, %ymm0, %ymm6
vmulps %ymm6, %ymm1, %ymm1
vbroadcastss 0xa04213(%rip), %ymm5 # 0x1ef0ffc
vmulps %ymm5, %ymm1, %ymm9
vmulps %ymm6, %ymm0, %ymm0
vbroadcastss 0xa041fa(%rip), %ymm6 # 0x1ef0ff4
vmulps %ymm6, %ymm0, %ymm10
vaddps %ymm10, %ymm9, %ymm9
vaddps %ymm3, %ymm9, %ymm3
vmulps %ymm4, %ymm8, %ymm9
vaddps %ymm2, %ymm9, %ymm9
vmulps %ymm5, %ymm0, %ymm0
vmulps %ymm6, %ymm1, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vaddps %ymm0, %ymm9, %ymm9
vbroadcastss 0xa041d8(%rip), %ymm10 # 0x1ef1000
vmulps %ymm2, %ymm10, %ymm0
vmulps %ymm3, %ymm10, %ymm1
vmulps %ymm10, %ymm9, %ymm2
vmulps %ymm10, %ymm8, %ymm3
vmovaps (%rcx), %ymm8
vsubps %ymm8, %ymm7, %ymm7
vmulps %ymm7, %ymm7, %ymm9
vmulps %ymm7, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm11
vmulps %ymm8, %ymm8, %ymm12
vmulps %ymm12, %ymm8, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vmulps %ymm7, %ymm8, %ymm13
vmulps %ymm7, %ymm13, %ymm7
vmulps %ymm5, %ymm7, %ymm14
vmulps %ymm13, %ymm8, %ymm8
vmulps %ymm6, %ymm8, %ymm13
vaddps %ymm13, %ymm14, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vmulps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm9, %ymm4
vmulps %ymm5, %ymm8, %ymm5
vmulps %ymm6, %ymm7, %ymm6
vaddps %ymm6, %ymm5, %ymm5
vaddps %ymm5, %ymm4, %ymm6
vmulps %ymm10, %ymm9, %ymm4
vmulps %ymm10, %ymm11, %ymm5
vmulps %ymm6, %ymm10, %ymm6
vmulps %ymm10, %ymm12, %ymm7
vbroadcastss 0x40(%rsi), %ymm8
vbroadcastss 0x80(%rsi), %ymm9
vbroadcastss 0xc0(%rsi), %ymm10
vbroadcastss 0x100(%rsi), %ymm11
vmulps %ymm7, %ymm11, %ymm11
vmulps %ymm6, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm5, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vmulps %ymm4, %ymm8, %ymm8
vbroadcastss 0x50(%rsi), %ymm10
vbroadcastss 0x90(%rsi), %ymm11
vbroadcastss 0xd0(%rsi), %ymm12
vaddps %ymm9, %ymm8, %ymm8
vbroadcastss 0x110(%rsi), %ymm9
vmulps %ymm7, %ymm9, %ymm9
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm9, %ymm12, %ymm9
vmulps %ymm5, %ymm11, %ymm11
vaddps %ymm9, %ymm11, %ymm9
vmulps %ymm4, %ymm10, %ymm10
vaddps %ymm9, %ymm10, %ymm9
vbroadcastss 0x60(%rsi), %ymm10
vbroadcastss 0xa0(%rsi), %ymm11
vbroadcastss 0xe0(%rsi), %ymm12
vbroadcastss 0x120(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm5, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm4, %ymm10, %ymm10
vbroadcastss 0x70(%rsi), %ymm12
vbroadcastss 0xb0(%rsi), %ymm13
vbroadcastss 0xf0(%rsi), %ymm14
vbroadcastss 0x130(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm5, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vmulps %ymm3, %ymm11, %ymm11
vmulps %ymm2, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm1, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vmulps %ymm0, %ymm8, %ymm8
vbroadcastss 0x44(%rsi), %ymm10
vbroadcastss 0x84(%rsi), %ymm11
vbroadcastss 0xc4(%rsi), %ymm12
vaddps %ymm9, %ymm8, %ymm8
vbroadcastss 0x104(%rsi), %ymm9
vmulps %ymm7, %ymm9, %ymm9
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm9, %ymm12, %ymm9
vmulps %ymm5, %ymm11, %ymm11
vaddps %ymm9, %ymm11, %ymm9
vmulps %ymm4, %ymm10, %ymm10
vaddps %ymm9, %ymm10, %ymm9
vbroadcastss 0x54(%rsi), %ymm10
vbroadcastss 0x94(%rsi), %ymm11
vbroadcastss 0xd4(%rsi), %ymm12
vbroadcastss 0x114(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm5, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm4, %ymm10, %ymm10
vbroadcastss 0x64(%rsi), %ymm12
vbroadcastss 0xa4(%rsi), %ymm13
vbroadcastss 0xe4(%rsi), %ymm14
vbroadcastss 0x124(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm5, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vbroadcastss 0x74(%rsi), %ymm12
vbroadcastss 0xb4(%rsi), %ymm13
vbroadcastss 0xf4(%rsi), %ymm14
vbroadcastss 0x134(%rsi), %ymm15
vmulps %ymm7, %ymm15, %ymm15
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm15, %ymm14, %ymm14
vmulps %ymm5, %ymm13, %ymm13
vaddps %ymm14, %ymm13, %ymm13
vmulps %ymm4, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm3, %ymm12, %ymm12
vmulps %ymm2, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm1, %ymm10, %ymm10
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm0, %ymm9, %ymm9
vaddps %ymm10, %ymm9, %ymm9
vbroadcastss 0x48(%rsi), %ymm10
vbroadcastss 0x88(%rsi), %ymm11
vbroadcastss 0xc8(%rsi), %ymm12
vbroadcastss 0x108(%rsi), %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm6, %ymm12, %ymm12
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm5, %ymm11, %ymm11
vaddps %ymm12, %ymm11, %ymm11
vmulps %ymm4, %ymm10, %ymm10
vbroadcastss 0x58(%rsi), %ymm12
vbroadcastss 0x98(%rsi), %ymm13
vbroadcastss 0xd8(%rsi), %ymm14
vbroadcastss 0x118(%rsi), %ymm15
vaddps %ymm11, %ymm10, %ymm10
vmulps %ymm7, %ymm15, %ymm11
vmulps %ymm6, %ymm14, %ymm14
vaddps %ymm11, %ymm14, %ymm11
vmulps %ymm5, %ymm13, %ymm13
vaddps %ymm11, %ymm13, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vaddps %ymm11, %ymm12, %ymm11
vbroadcastss 0x68(%rsi), %ymm12
vbroadcastss 0xe8(%rsi), %ymm13
vbroadcastss 0x128(%rsi), %ymm14
vmulps %ymm7, %ymm14, %ymm14
vmulps %ymm6, %ymm13, %ymm13
vaddps %ymm14, %ymm13, %ymm13
vbroadcastss 0xa8(%rsi), %ymm14
vmulps %ymm5, %ymm14, %ymm14
vaddps %ymm13, %ymm14, %ymm13
vmulps %ymm4, %ymm12, %ymm12
vbroadcastss 0x138(%rsi), %ymm14
vaddps %ymm13, %ymm12, %ymm12
vmulps %ymm7, %ymm14, %ymm7
vbroadcastss 0xf8(%rsi), %ymm13
vmulps %ymm6, %ymm13, %ymm6
vaddps %ymm7, %ymm6, %ymm6
vbroadcastss 0xb8(%rsi), %ymm7
vmulps %ymm7, %ymm5, %ymm5
vaddps %ymm6, %ymm5, %ymm5
vbroadcastss 0x78(%rsi), %ymm6
vmulps %ymm6, %ymm4, %ymm4
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm4, %ymm3, %ymm3
vmulps %ymm2, %ymm12, %ymm2
vaddps %ymm3, %ymm2, %ymm2
vmulps %ymm1, %ymm11, %ymm1
vaddps %ymm2, %ymm1, %ymm1
jmp 0x14ecd83
cmpl $0x3, %edi
jne 0x14ed8e8
subq $0x188, %rsp # imm = 0x188
vbroadcastss 0x4c(%rsi), %ymm5
vbroadcastss 0x5c(%rsi), %ymm4
vbroadcastss 0x6c(%rsi), %ymm3
vbroadcastss 0x8c(%rsi), %ymm11
vbroadcastss 0x9c(%rsi), %ymm2
vmovaps (%rdx), %ymm7
vxorps %xmm0, %xmm0, %xmm0
vbroadcastss 0x9ff4d3(%rip), %ymm1 # 0x1eec714
vcmpeqps %ymm0, %ymm7, %ymm6
vcmpeqps %ymm1, %ymm7, %ymm8
vorps %ymm6, %ymm8, %ymm6
vmovaps (%rcx), %ymm12
vcmpeqps %ymm0, %ymm12, %ymm0
vcmpeqps %ymm1, %ymm12, %ymm8
vorps %ymm0, %ymm8, %ymm0
vorps %ymm0, %ymm6, %ymm10
vbroadcastss 0x90(%rsi), %ymm0
vmovups %ymm0, 0x40(%rsp)
vbroadcastss 0x94(%rsi), %ymm8
vmovups %ymm8, 0x20(%rsp)
vbroadcastss 0x98(%rsi), %ymm9
vmovups %ymm9, (%rsp)
vbroadcastss 0xa0(%rsi), %ymm13
vmovups %ymm13, -0x80(%rsp)
vbroadcastss 0xa4(%rsi), %ymm14
vmovups %ymm14, 0x160(%rsp)
vmulps %ymm0, %ymm7, %ymm6
vmulps %ymm7, %ymm8, %ymm8
vmulps %ymm5, %ymm12, %ymm5
vaddps %ymm6, %ymm5, %ymm5
vmulps %ymm7, %ymm9, %ymm6
vmulps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm8, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vaddps %ymm6, %ymm3, %ymm3
vaddps %ymm7, %ymm12, %ymm6
vrcpps %ymm6, %ymm8
vmulps %ymm6, %ymm8, %ymm6
vsubps %ymm6, %ymm1, %ymm6
vmulps %ymm6, %ymm8, %ymm6
vaddps %ymm6, %ymm8, %ymm6
vsubps %ymm7, %ymm1, %ymm9
vmulps %ymm6, %ymm5, %ymm0
vmovups %ymm0, -0x20(%rsp)
vmulps %ymm6, %ymm4, %ymm0
vmovups %ymm0, -0x40(%rsp)
vmulps %ymm6, %ymm3, %ymm0
vmovups %ymm0, -0x60(%rsp)
vmulps %ymm9, %ymm11, %ymm0
vmulps %ymm2, %ymm9, %ymm2
vmulps %ymm13, %ymm12, %ymm3
vaddps %ymm3, %ymm0, %ymm0
vmulps %ymm14, %ymm12, %ymm3
vaddps %ymm3, %ymm2, %ymm2
vbroadcastss 0xac(%rsi), %ymm3
vmulps %ymm3, %ymm9, %ymm3
vbroadcastss 0xa8(%rsi), %ymm4
vmovups %ymm4, 0x140(%rsp)
vmulps %ymm4, %ymm12, %ymm4
vaddps %ymm4, %ymm3, %ymm3
vaddps %ymm12, %ymm9, %ymm4
vrcpps %ymm4, %ymm5
vmulps %ymm5, %ymm4, %ymm4
vsubps %ymm4, %ymm1, %ymm4
vmulps %ymm4, %ymm5, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm4, %ymm0, %ymm0
vmovups %ymm0, 0x120(%rsp)
vmulps %ymm4, %ymm2, %ymm0
vmovups %ymm0, 0x100(%rsp)
vbroadcastss 0xcc(%rsi), %ymm0
vmulps %ymm4, %ymm3, %ymm2
vmovups %ymm2, 0xe0(%rsp)
vbroadcastss 0xdc(%rsi), %ymm2
vsubps %ymm12, %ymm1, %ymm11
vmovaps %ymm1, %ymm4
vmulps %ymm0, %ymm11, %ymm0
vbroadcastss 0xe0(%rsi), %ymm1
vmovups %ymm1, 0xc0(%rsp)
vmulps %ymm1, %ymm9, %ymm3
vaddps %ymm3, %ymm0, %ymm3
vmulps %ymm2, %ymm11, %ymm2
vbroadcastss 0xe4(%rsi), %ymm0
vmovups %ymm0, 0xa0(%rsp)
vmulps %ymm0, %ymm9, %ymm5
vaddps %ymm5, %ymm2, %ymm5
vbroadcastss 0xec(%rsi), %ymm2
vmulps %ymm2, %ymm11, %ymm6
vbroadcastss 0xe8(%rsi), %ymm13
vmulps %ymm13, %ymm9, %ymm8
vaddps %ymm11, %ymm9, %ymm1
vrcpps %ymm1, %ymm0
vaddps %ymm6, %ymm8, %ymm6
vmulps %ymm0, %ymm1, %ymm1
vsubps %ymm1, %ymm4, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmulps %ymm0, %ymm3, %ymm8
vmulps %ymm0, %ymm5, %ymm1
vmovups %ymm1, 0x80(%rsp)
vmulps %ymm0, %ymm6, %ymm0
vmovups %ymm0, 0x60(%rsp)
vbroadcastss 0x10c(%rsi), %ymm0
vbroadcastss 0x11c(%rsi), %ymm1
vmulps %ymm7, %ymm0, %ymm3
vbroadcastss 0xd0(%rsi), %ymm4
vmulps %ymm4, %ymm11, %ymm15
vaddps %ymm3, %ymm15, %ymm6
vmulps %ymm7, %ymm1, %ymm1
vbroadcastss 0xd4(%rsi), %ymm15
vmulps %ymm15, %ymm11, %ymm14
vaddps %ymm1, %ymm14, %ymm5
vbroadcastss 0x12c(%rsi), %ymm14
vmulps %ymm7, %ymm14, %ymm14
vbroadcastss 0xd8(%rsi), %ymm3
vmulps %ymm3, %ymm11, %ymm2
vaddps %ymm7, %ymm11, %ymm1
vrcpps %ymm1, %ymm0
vaddps %ymm2, %ymm14, %ymm2
vmulps %ymm0, %ymm1, %ymm1
vbroadcastss 0x9ff268(%rip), %ymm14 # 0x1eec714
vsubps %ymm1, %ymm14, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm1, %ymm0, %ymm0
vmulps %ymm0, %ymm6, %ymm1
vmulps %ymm0, %ymm5, %ymm6
vmulps %ymm0, %ymm2, %ymm0
vmovups -0x20(%rsp), %ymm2
vblendvps %ymm10, 0x40(%rsp), %ymm2, %ymm14
vmovups -0x40(%rsp), %ymm2
vblendvps %ymm10, 0x20(%rsp), %ymm2, %ymm2
vmovups %ymm2, -0x40(%rsp)
vmovups -0x60(%rsp), %ymm2
vblendvps %ymm10, (%rsp), %ymm2, %ymm2
vmovups %ymm2, (%rsp)
vmovups 0x120(%rsp), %ymm2
vblendvps %ymm10, -0x80(%rsp), %ymm2, %ymm2
vmovups %ymm2, -0x80(%rsp)
vmovups 0x100(%rsp), %ymm2
vblendvps %ymm10, 0x160(%rsp), %ymm2, %ymm2
vmovups %ymm2, -0x60(%rsp)
vmovups 0xe0(%rsp), %ymm2
vblendvps %ymm10, 0x140(%rsp), %ymm2, %ymm2
vmovups %ymm2, -0x20(%rsp)
vblendvps %ymm10, 0xc0(%rsp), %ymm8, %ymm8
vmovups 0x80(%rsp), %ymm2
vblendvps %ymm10, 0xa0(%rsp), %ymm2, %ymm5
vmovups 0x60(%rsp), %ymm2
vblendvps %ymm10, %ymm13, %ymm2, %ymm2
vmovups %ymm2, 0x40(%rsp)
vblendvps %ymm10, %ymm4, %ymm1, %ymm13
vblendvps %ymm10, %ymm15, %ymm6, %ymm4
vblendvps %ymm10, %ymm3, %ymm0, %ymm0
vmovups %ymm0, 0x20(%rsp)
vmulps %ymm9, %ymm9, %ymm0
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm11, %ymm11, %ymm0
vmulps %ymm0, %ymm11, %ymm10
vmulps %ymm7, %ymm9, %ymm0
vmulps %ymm0, %ymm9, %ymm2
vbroadcastss 0xa03a3d(%rip), %ymm15 # 0x1ef0fec
vmulps %ymm2, %ymm15, %ymm3
vmulps %ymm11, %ymm12, %ymm2
vmulps %ymm2, %ymm11, %ymm6
vmulps %ymm6, %ymm15, %ymm6
vmulps %ymm0, %ymm7, %ymm0
vmulps %ymm0, %ymm15, %ymm9
vmulps %ymm2, %ymm12, %ymm0
vmulps %ymm0, %ymm15, %ymm0
vmulps %ymm7, %ymm7, %ymm2
vmulps %ymm2, %ymm7, %ymm11
vmulps %ymm12, %ymm12, %ymm2
vmulps %ymm2, %ymm12, %ymm7
vbroadcastss 0x60(%rsi), %ymm2
vbroadcastss 0x70(%rsi), %ymm12
vmulps %ymm12, %ymm11, %ymm12
vmulps %ymm2, %ymm9, %ymm2
vbroadcastss 0x50(%rsi), %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm3, %ymm15, %ymm12
vbroadcastss 0x40(%rsi), %ymm15
vaddps %ymm2, %ymm12, %ymm2
vmulps %ymm1, %ymm15, %ymm12
vbroadcastss 0xb0(%rsi), %ymm15
vaddps %ymm2, %ymm12, %ymm12
vmulps %ymm15, %ymm11, %ymm2
vmulps -0x80(%rsp), %ymm9, %ymm15
vaddps %ymm2, %ymm15, %ymm2
vmulps %ymm3, %ymm14, %ymm14
vaddps %ymm2, %ymm14, %ymm2
vbroadcastss 0x80(%rsi), %ymm14
vmulps %ymm1, %ymm14, %ymm14
vaddps %ymm2, %ymm14, %ymm2
vbroadcastss 0xf0(%rsi), %ymm14
vmulps %ymm14, %ymm11, %ymm14
vmulps %ymm8, %ymm9, %ymm8
vaddps %ymm14, %ymm8, %ymm8
vmulps %ymm3, %ymm13, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vbroadcastss 0xc0(%rsi), %ymm13
vmulps %ymm1, %ymm13, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vbroadcastss 0x130(%rsi), %ymm13
vmulps %ymm13, %ymm11, %ymm13
vbroadcastss 0x120(%rsi), %ymm14
vmulps %ymm14, %ymm9, %ymm14
vaddps %ymm13, %ymm14, %ymm13
vbroadcastss 0x110(%rsi), %ymm14
vmulps %ymm3, %ymm14, %ymm14
vaddps %ymm13, %ymm14, %ymm13
vbroadcastss 0x100(%rsi), %ymm14
vmulps %ymm1, %ymm14, %ymm14
vaddps %ymm13, %ymm14, %ymm13
vmulps %ymm7, %ymm13, %ymm13
vmulps %ymm0, %ymm8, %ymm8
vaddps %ymm13, %ymm8, %ymm8
vmulps %ymm2, %ymm6, %ymm2
vaddps %ymm2, %ymm8, %ymm2
vmulps %ymm12, %ymm10, %ymm8
vbroadcastss 0x74(%rsi), %ymm13
vaddps %ymm2, %ymm8, %ymm12
vmulps %ymm13, %ymm11, %ymm2
vbroadcastss 0x64(%rsi), %ymm8
vmulps %ymm8, %ymm9, %ymm8
vaddps %ymm2, %ymm8, %ymm2
vbroadcastss 0x54(%rsi), %ymm8
vmulps %ymm3, %ymm8, %ymm8
vaddps %ymm2, %ymm8, %ymm2
vbroadcastss 0x44(%rsi), %ymm8
vmulps %ymm1, %ymm8, %ymm8
vaddps %ymm2, %ymm8, %ymm2
vbroadcastss 0xb4(%rsi), %ymm8
vmulps %ymm8, %ymm11, %ymm8
vmulps -0x60(%rsp), %ymm9, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vmulps -0x40(%rsp), %ymm3, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vbroadcastss 0x84(%rsi), %ymm13
vmulps %ymm1, %ymm13, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vbroadcastss 0xf4(%rsi), %ymm13
vmulps %ymm13, %ymm11, %ymm13
vmulps %ymm5, %ymm9, %ymm5
vaddps %ymm5, %ymm13, %ymm5
vmulps %ymm4, %ymm3, %ymm4
vaddps %ymm5, %ymm4, %ymm4
vbroadcastss 0xc4(%rsi), %ymm5
vmulps %ymm5, %ymm1, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vbroadcastss 0x124(%rsi), %ymm5
vbroadcastss 0x134(%rsi), %ymm13
vmulps %ymm13, %ymm11, %ymm13
vmulps %ymm5, %ymm9, %ymm5
vbroadcastss 0x114(%rsi), %ymm14
vaddps %ymm5, %ymm13, %ymm5
vmulps %ymm3, %ymm14, %ymm13
vbroadcastss 0x104(%rsi), %ymm14
vaddps %ymm5, %ymm13, %ymm5
vmulps %ymm1, %ymm14, %ymm13
vaddps %ymm5, %ymm13, %ymm5
vmulps %ymm5, %ymm7, %ymm5
vmulps %ymm4, %ymm0, %ymm4
vaddps %ymm5, %ymm4, %ymm4
vmulps %ymm6, %ymm8, %ymm5
vaddps %ymm4, %ymm5, %ymm4
vmulps %ymm2, %ymm10, %ymm2
vaddps %ymm4, %ymm2, %ymm5
vbroadcastss 0x68(%rsi), %ymm2
vbroadcastss 0x78(%rsi), %ymm4
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm2, %ymm9, %ymm2
vaddps %ymm4, %ymm2, %ymm2
vbroadcastss 0x58(%rsi), %ymm4
vmulps %ymm4, %ymm3, %ymm4
vaddps %ymm2, %ymm4, %ymm2
vbroadcastss 0x48(%rsi), %ymm4
vmulps %ymm4, %ymm1, %ymm4
vaddps %ymm2, %ymm4, %ymm2
vbroadcastss 0xb8(%rsi), %ymm4
vmulps %ymm4, %ymm11, %ymm4
vmulps -0x20(%rsp), %ymm9, %ymm8
vaddps %ymm4, %ymm8, %ymm4
vmulps (%rsp), %ymm3, %ymm8
vaddps %ymm4, %ymm8, %ymm4
vbroadcastss 0x88(%rsi), %ymm8
vmulps %ymm1, %ymm8, %ymm8
vaddps %ymm4, %ymm8, %ymm4
vbroadcastss 0xf8(%rsi), %ymm8
vmulps %ymm8, %ymm11, %ymm8
vmulps 0x40(%rsp), %ymm9, %ymm13
vaddps %ymm8, %ymm13, %ymm8
vbroadcastss 0xc8(%rsi), %ymm13
vmulps 0x20(%rsp), %ymm3, %ymm14
vaddps %ymm8, %ymm14, %ymm8
vmulps %ymm1, %ymm13, %ymm13
vbroadcastss 0x138(%rsi), %ymm14
vaddps %ymm8, %ymm13, %ymm8
vmulps %ymm14, %ymm11, %ymm11
vbroadcastss 0x128(%rsi), %ymm13
vmulps %ymm13, %ymm9, %ymm9
vaddps %ymm11, %ymm9, %ymm9
vbroadcastss 0x118(%rsi), %ymm11
vmulps %ymm3, %ymm11, %ymm3
vaddps %ymm3, %ymm9, %ymm3
vbroadcastss 0x108(%rsi), %ymm9
vmulps %ymm1, %ymm9, %ymm1
vaddps %ymm3, %ymm1, %ymm1
vmulps %ymm1, %ymm7, %ymm1
vmulps %ymm0, %ymm8, %ymm0
vaddps %ymm1, %ymm0, %ymm0
vmulps %ymm4, %ymm6, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps %ymm2, %ymm10, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmovaps %ymm12, (%rax)
vmovaps %ymm5, 0x20(%rax)
vmovaps %ymm0, 0x40(%rax)
addq $0x188, %rsp # imm = 0x188
vzeroupper
retq
cmpl $0x6, %edi
jne 0x14ed9fd
vbroadcastss 0x40(%rsi), %ymm1
vmovss 0x50(%rsi), %xmm0
vmovss 0x54(%rsi), %xmm3
vsubss 0x40(%rsi), %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vinsertf128 $0x1, %xmm0, %ymm0, %ymm2
vmovaps (%rdx), %ymm0
vmulps %ymm2, %ymm0, %ymm2
vbroadcastss 0x70(%rsi), %ymm4
vaddps %ymm2, %ymm1, %ymm2
vmovss 0x60(%rsi), %xmm1
vsubss 0x70(%rsi), %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vinsertf128 $0x1, %xmm1, %ymm1, %ymm1
vmulps %ymm1, %ymm0, %ymm1
vaddps %ymm1, %ymm4, %ymm1
vsubps %ymm2, %ymm1, %ymm4
vmovaps (%rcx), %ymm1
vmulps %ymm4, %ymm1, %ymm4
vaddps %ymm4, %ymm2, %ymm2
vbroadcastss 0x44(%rsi), %ymm4
vsubss 0x44(%rsi), %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vinsertf128 $0x1, %xmm3, %ymm3, %ymm3
vbroadcastss 0x74(%rsi), %ymm5
vmulps %ymm3, %ymm0, %ymm3
vmovss 0x64(%rsi), %xmm6
vsubss 0x74(%rsi), %xmm6, %xmm6
vaddps %ymm3, %ymm4, %ymm3
vshufps $0x0, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmulps %ymm4, %ymm0, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vbroadcastss 0x48(%rsi), %ymm5
vmulps %ymm4, %ymm1, %ymm4
vmovss 0x58(%rsi), %xmm6
vsubss 0x48(%rsi), %xmm6, %xmm6
vaddps %ymm4, %ymm3, %ymm3
vshufps $0x0, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[0,0,0,0]
vinsertf128 $0x1, %xmm4, %ymm4, %ymm4
vmulps %ymm4, %ymm0, %ymm4
vaddps %ymm4, %ymm5, %ymm4
vbroadcastss 0x78(%rsi), %ymm5
vmovss 0x68(%rsi), %xmm6
vsubss 0x78(%rsi), %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vinsertf128 $0x1, %xmm6, %ymm6, %ymm6
vmulps %ymm6, %ymm0, %ymm0
vaddps %ymm0, %ymm5, %ymm0
vsubps %ymm4, %ymm0, %ymm0
vmulps %ymm0, %ymm1, %ymm0
vaddps %ymm0, %ymm4, %ymm0
vmovaps %ymm2, (%rax)
vmovaps %ymm3, 0x20(%rax)
jmp 0x14ecd94
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x40(%rax)
vmovaps %ymm0, 0x20(%rax)
vmovaps %ymm0, (%rax)
vzeroupper
retq
|
/embree[P]embree/kernels/subdiv/subdivpatch1base_eval.cpp
|
embree::avx::InstanceIntersector1::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstancePrimitive const&)
|
bool InstanceIntersector1::pointQuery(PointQuery* query, PointQueryContext* context, const InstancePrimitive& prim)
{
const Instance* instance = prim.instance;
const AffineSpace3fa local2world = instance->getLocal2World();
const AffineSpace3fa world2local = instance->getWorld2Local();
float similarityScale = 0.f;
const bool similtude = context->query_type == POINT_QUERY_TYPE_SPHERE
&& similarityTransform(world2local, &similarityScale);
assert((similtude && similarityScale > 0) || !similtude);
if (likely(instance_id_stack::push(context->userContext, prim.instID_, 0, world2local, local2world)))
{
PointQuery query_inst;
query_inst.time = query->time;
query_inst.p = xfmPoint(world2local, query->p);
query_inst.radius = query->radius * similarityScale;
PointQueryContext context_inst(
(Scene*)instance->object,
context->query_ws,
similtude ? POINT_QUERY_TYPE_SPHERE : POINT_QUERY_TYPE_AABB,
context->func,
context->userContext,
similarityScale,
context->userPtr);
bool changed = instance->object->intersectors.pointQuery(&query_inst, &context_inst);
instance_id_stack::pop(context->userContext);
return changed;
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
subq $0x110, %rsp # imm = 0x110
movq %rdx, %r15
movq %rsi, %rbx
movq %rdi, %r14
movq (%rdx), %r12
movzbl 0x3d(%r12), %ecx
shll $0x8, %ecx
movq 0x60(%r12), %rax
cmpl $0x100, %ecx # imm = 0x100
je 0x14fc56d
vmovaps (%rax), %xmm7
vmovaps 0x10(%rax), %xmm6
vmovaps 0x20(%rax), %xmm8
vmovaps 0x30(%rax), %xmm9
vmovaps 0x70(%r12), %xmm10
vmovaps 0x80(%r12), %xmm11
vmovaps 0x90(%r12), %xmm12
vmovaps 0xa0(%r12), %xmm13
cmpl $0x1, 0x18(%rbx)
jne 0x14fc2db
vdpps $0x7f, %xmm11, %xmm10, %xmm0
vbroadcastss 0xa24c36(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x9f0472(%rip), %xmm1 # 0x1eec710
ja 0x14fc2cc
vdpps $0x7f, %xmm12, %xmm10, %xmm1
vandps %xmm2, %xmm1, %xmm1
vucomiss 0x9f045e(%rip), %xmm1 # 0x1eec710
ja 0x14fc2cc
vdpps $0x7f, %xmm12, %xmm11, %xmm1
vandps %xmm2, %xmm1, %xmm1
vucomiss 0x9f044a(%rip), %xmm1 # 0x1eec710
jbe 0x14fc487
xorl %ebp, %ebp
movzbl %bpl, %ecx
movl $0x2, %eax
subl %ecx, %eax
jmp 0x14fc2e4
movl $0x2, %eax
vxorps %xmm0, %xmm0, %xmm0
movq 0x28(%rbx), %rcx
movl 0x8(%r15), %edx
movl 0x88(%rcx), %esi
movl %edx, 0x80(%rcx,%rsi,4)
movl $0x0, 0x84(%rcx,%rsi,4)
shlq $0x6, %rsi
vmovups %xmm10, (%rcx,%rsi)
vmovups %xmm11, 0x10(%rcx,%rsi)
vmovups %xmm12, 0x20(%rcx,%rsi)
vmovups %xmm13, 0x30(%rcx,%rsi)
vmovups %xmm7, 0x40(%rcx,%rsi)
vmovups %xmm6, 0x50(%rcx,%rsi)
vmovups %xmm8, 0x60(%rcx,%rsi)
vmovups %xmm9, 0x70(%rcx,%rsi)
incl 0x88(%rcx)
vmovss 0xc(%r14), %xmm1
vmovss %xmm1, 0xc(%rsp)
vbroadcastss (%r14), %xmm1
vbroadcastss 0x4(%r14), %xmm2
vbroadcastss 0x8(%r14), %xmm3
vmulps %xmm3, %xmm12, %xmm3
vaddps %xmm3, %xmm13, %xmm3
vmulps %xmm2, %xmm11, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm10, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmovlps %xmm1, (%rsp)
vextractps $0x2, %xmm1, 0x8(%rsp)
vmulss 0x10(%r14), %xmm0, %xmm1
vmovss %xmm1, 0x10(%rsp)
movq 0x58(%r12), %rcx
movq 0x10(%rbx), %rdx
movq 0x38(%rbx), %rsi
movq %rcx, 0xa0(%rsp)
movq $0x0, 0xa8(%rsp)
movq %rdx, 0xb0(%rsp)
movl %eax, 0xb8(%rsp)
vmovdqa 0x20(%rbx), %xmm2
vmovdqa %xmm2, 0xc0(%rsp)
vmovss %xmm0, 0xd0(%rsp)
movq %rsi, 0xd8(%rsp)
movq $-0x1, 0xe0(%rsp)
vbroadcastss 0x10(%rdx), %xmm1
vmovaps %xmm1, 0xf0(%rsp)
vmovss 0x10(%rdx), %xmm1
cmpl $0x2, %eax
jne 0x14fc424
vucomiss 0x9ef619(%rip), %xmm1 # 0x1eeba20
jae 0x14fc41d
vpextrq $0x1, %xmm2, %rsi
movl 0x88(%rsi), %edi
testl %edi, %edi
jne 0x14fc73c
vshufps $0x0, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
jmp 0x14fc42d
vmulss %xmm1, %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
leaq 0xa0(%rsp), %rdx
vmovaps %xmm0, 0x50(%rdx)
movq 0x58(%r12), %rax
leaq 0x58(%rax), %rdi
movq %rsp, %rsi
callq *0x88(%rax)
movq 0x28(%rbx), %rcx
movl 0x88(%rcx), %edx
decl %edx
movl %edx, 0x88(%rcx)
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
movl %esi, 0x80(%rcx,%rdx,4)
movl 0x88(%rcx), %edx
movl %esi, 0x84(%rcx,%rdx,4)
addq $0x110, %rsp # imm = 0x110
popq %rbx
popq %r12
popq %r14
popq %r15
popq %rbp
retq
vdpps $0x7f, %xmm10, %xmm10, %xmm1
vdpps $0x7f, %xmm11, %xmm11, %xmm3
vsubss %xmm3, %xmm1, %xmm4
vandps %xmm2, %xmm4, %xmm4
xorl %ebp, %ebp
vucomiss 0x9f026b(%rip), %xmm4 # 0x1eec710
ja 0x14fc2ce
vdpps $0x7f, %xmm12, %xmm12, %xmm4
vsubss %xmm4, %xmm1, %xmm5
vandps %xmm2, %xmm5, %xmm5
vucomiss 0x9f024f(%rip), %xmm5 # 0x1eec710
ja 0x14fc2ce
vsubss %xmm4, %xmm3, %xmm3
vandps %xmm2, %xmm3, %xmm2
vucomiss 0x9f0239(%rip), %xmm2 # 0x1eec710
setbe %bpl
ja 0x14fc2ce
movb $0x1, %bpl
vucomiss %xmm0, %xmm1
jb 0x14fc4f3
vsqrtss %xmm1, %xmm1, %xmm0
jmp 0x14fc2ce
vmovaps %xmm1, %xmm0
vmovaps %xmm7, 0x90(%rsp)
vmovaps %xmm6, 0x80(%rsp)
vmovaps %xmm8, 0x70(%rsp)
vmovaps %xmm9, 0x60(%rsp)
vmovaps %xmm10, 0x50(%rsp)
vmovaps %xmm11, 0x40(%rsp)
vmovaps %xmm12, 0x30(%rsp)
vmovaps %xmm13, 0x20(%rsp)
callq 0x6aa20
vmovaps 0x20(%rsp), %xmm13
vmovaps 0x30(%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm11
vmovaps 0x50(%rsp), %xmm10
vmovaps 0x60(%rsp), %xmm9
vmovaps 0x70(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm6
vmovaps 0x90(%rsp), %xmm7
jmp 0x14fc2ce
vmovss 0x3c(%rax), %xmm2
vmovss 0xc(%rax), %xmm4
vmovss 0x1c(%rax), %xmm3
vmovss 0x2c(%rax), %xmm0
vmulss %xmm4, %xmm4, %xmm6
vmulss %xmm2, %xmm2, %xmm7
vaddss %xmm6, %xmm7, %xmm1
vbroadcastss 0xa2492a(%rip), %xmm8 # 0x1f20ec0
vxorps %xmm3, %xmm8, %xmm5
vmulss %xmm3, %xmm5, %xmm5
vaddss %xmm1, %xmm5, %xmm1
vxorps %xmm0, %xmm8, %xmm8
vmulss %xmm0, %xmm8, %xmm8
vaddss %xmm1, %xmm8, %xmm9
vmulss %xmm0, %xmm2, %xmm1
vmulss %xmm3, %xmm4, %xmm10
vaddss %xmm1, %xmm10, %xmm11
vsubss %xmm1, %xmm10, %xmm1
vmulss %xmm0, %xmm4, %xmm10
vsubss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm3, %xmm7
vaddss %xmm6, %xmm7, %xmm7
vaddss %xmm7, %xmm8, %xmm7
vmulss %xmm3, %xmm2, %xmm8
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm8, %xmm10, %xmm12
vmulss %xmm0, %xmm3, %xmm3
vaddss %xmm8, %xmm10, %xmm2
vaddss %xmm4, %xmm3, %xmm8
vsubss %xmm4, %xmm3, %xmm3
vaddss %xmm11, %xmm11, %xmm4
vaddss %xmm12, %xmm12, %xmm10
vaddss %xmm6, %xmm5, %xmm5
vmulss %xmm0, %xmm0, %xmm0
vaddss %xmm5, %xmm0, %xmm5
vshufps $0x0, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm10, %xmm10, %xmm6 # xmm6 = xmm10[0,0,0,0]
vmovaps 0x9f00e1(%rip), %xmm9 # 0x1eec700
vmulps %xmm6, %xmm9, %xmm6
vmovsd 0x9f00c5(%rip), %xmm10 # 0x1eec6f0
vmulps %xmm4, %xmm10, %xmm4
vaddps %xmm4, %xmm6, %xmm4
vmovss 0x9f00d9(%rip), %xmm6 # 0x1eec714
vmulps %xmm6, %xmm0, %xmm0
vaddps %xmm4, %xmm0, %xmm0
vaddss %xmm8, %xmm8, %xmm4
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm9, %xmm4
vmulps %xmm7, %xmm10, %xmm7
vaddps %xmm4, %xmm7, %xmm4
vaddss %xmm1, %xmm1, %xmm1
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm4, %xmm1, %xmm1
vmovaps (%rax), %xmm4
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vxorps %xmm7, %xmm7, %xmm7
vshufps $0xe9, %xmm7, %xmm4, %xmm4 # xmm4 = xmm4[1,2],xmm7[2,3]
vblendps $0x4, 0x10(%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[2],xmm4[3]
vaddss %xmm3, %xmm3, %xmm3
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm3, %xmm10, %xmm3
vaddss %xmm2, %xmm2, %xmm2
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vaddps %xmm5, %xmm3, %xmm3
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vaddps %xmm7, %xmm4, %xmm3
vbroadcastss (%rax), %xmm4
vmulps %xmm7, %xmm2, %xmm5
vmulps %xmm7, %xmm1, %xmm6
vaddps %xmm5, %xmm6, %xmm6
vmulps %xmm0, %xmm4, %xmm4
vaddps %xmm6, %xmm4, %xmm7
vbroadcastss 0x10(%rax), %xmm4
vbroadcastss 0x14(%rax), %xmm6
vmulps %xmm1, %xmm6, %xmm6
vaddps %xmm5, %xmm6, %xmm5
vmulps %xmm0, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm6
vbroadcastss 0x24(%rax), %xmm4
vbroadcastss 0x28(%rax), %xmm5
vmulps %xmm2, %xmm5, %xmm5
vmulps %xmm1, %xmm4, %xmm4
vaddps %xmm5, %xmm4, %xmm4
vbroadcastss 0x20(%rax), %xmm5
vmulps %xmm0, %xmm5, %xmm5
vaddps %xmm4, %xmm5, %xmm8
vbroadcastss 0x38(%rax), %xmm4
vmulps %xmm2, %xmm4, %xmm2
vbroadcastss 0x34(%rax), %xmm4
vmulps %xmm1, %xmm4, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vbroadcastss 0x30(%rax), %xmm2
vmulps %xmm0, %xmm2, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm3, %xmm9
jmp 0x14fc254
leaq 0x100(%rsp), %r14
vmovaps %xmm1, %xmm0
movq %r14, %rdx
callq 0x3f642f
vmovaps (%r14), %xmm0
jmp 0x14fc42d
|
/embree[P]embree/kernels/geometry/instance_intersector.cpp
|
embree::avx::InstanceArrayIntersector1MB::pointQuery(embree::PointQueryK<1>*, embree::PointQueryContext*, embree::InstanceArrayPrimitive const&)
|
bool InstanceArrayIntersector1MB::pointQuery(PointQuery* query, PointQueryContext* context, const Primitive& prim)
{
const InstanceArray* instance = context->scene->get<InstanceArray>(prim.instID_);
Accel* object = instance->getObject(prim.primID_);
if (!object) return false;
const AffineSpace3fa local2world = instance->getLocal2World(prim.primID_, query->time);
const AffineSpace3fa world2local = instance->getWorld2Local(prim.primID_, query->time);
float similarityScale = 0.f;
const bool similtude = context->query_type == POINT_QUERY_TYPE_SPHERE
&& similarityTransform(world2local, &similarityScale);
if (likely(instance_id_stack::push(context->userContext, prim.instID_, prim.primID_, world2local, local2world)))
{
PointQuery query_inst;
query_inst.time = query->time;
query_inst.p = xfmPoint(world2local, query->p);
query_inst.radius = query->radius * similarityScale;
PointQueryContext context_inst(
(Scene*)object,
context->query_ws,
similtude ? POINT_QUERY_TYPE_SPHERE : POINT_QUERY_TYPE_AABB,
context->func,
context->userContext,
similarityScale,
context->userPtr);
bool changed = object->intersectors.pointQuery(&query_inst, &context_inst);
instance_id_stack::pop(context->userContext);
return changed;
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x238, %rsp # imm = 0x238
movq %rdx, %r15
movq %rsi, %rbx
movq %rdi, %r14
movq (%rsi), %rcx
movl (%rdx), %eax
movl 0x4(%rdx), %edx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rdx,8), %r13
movq 0x58(%r13), %r12
testq %r12, %r12
jne 0x14ff060
movq 0x90(%r13), %rcx
movq 0xa0(%r13), %rdx
imulq %rax, %rdx
movl (%rcx,%rdx), %ecx
movl $0xffffffff, %edx # imm = 0xFFFFFFFF
cmpq %rdx, %rcx
je 0x14ff05d
movq 0x60(%r13), %rdx
movq (%rdx,%rcx,8), %r12
jmp 0x14ff060
xorl %r12d, %r12d
testq %r12, %r12
je 0x14ff11a
cmpl $0x1, 0x24(%r13)
jne 0x14ff121
movzbl 0x3d(%r13), %esi
shll $0x8, %esi
movq 0x88(%r13), %rcx
movl 0x20(%rcx), %edx
cmpl $0x100, %esi # imm = 0x100
je 0x15002be
cmpl $0x9243, %edx # imm = 0x9243
jg 0x14ff21f
cmpl $0x9134, %edx # imm = 0x9134
je 0x14ff29a
cmpl $0x9234, %edx # imm = 0x9234
jne 0x14ff789
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm15 # xmm15 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm11 # xmm11 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm13 # xmm13 = xmm0[0,2,3,1]
jmp 0x14ff789
xorl %eax, %eax
jmp 0x15001c2
vmovss 0xc(%r14), %xmm0
vmovss 0x28(%r13), %xmm1
vmovss 0x2c(%r13), %xmm2
vmovss 0x30(%r13), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x9f1875(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm1
vcvttss2si %xmm1, %ecx
vsubss %xmm1, %xmm0, %xmm15
movzbl 0x3d(%r13), %r8d
shll $0x8, %r8d
movq 0x88(%r13), %rdx
imulq $0x38, %rcx, %rdi
leaq (%rdx,%rdi), %rsi
movl 0x20(%rdx,%rdi), %edi
cmpl $0x100, %r8d # imm = 0x100
je 0x1500346
cmpl $0x9243, %edi # imm = 0x9243
jg 0x14ff25b
cmpl $0x9134, %edi # imm = 0x9134
je 0x14ff2fe
cmpl $0x9234, %edi # imm = 0x9234
jne 0x14ff527
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovsd 0x10(%rdi,%rsi), %xmm1
vmovss 0xc(%rdi,%rsi), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
vmovsd 0x1c(%rdi,%rsi), %xmm2
vmovss 0x18(%rdi,%rsi), %xmm3
vshufps $0x4c, %xmm2, %xmm3, %xmm2 # xmm2 = xmm3[0,3],xmm2[0,1]
vshufps $0x78, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,2,3,1]
vmovsd 0x28(%rdi,%rsi), %xmm2
vmovss 0x24(%rdi,%rsi), %xmm4
vshufps $0x4c, %xmm2, %xmm4, %xmm2 # xmm2 = xmm4[0,3],xmm2[0,1]
vshufps $0x78, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,2,3,1]
jmp 0x14ff527
cmpl $0xb001, %edx # imm = 0xB001
je 0x14ff365
cmpl $0x9244, %edx # imm = 0x9244
jne 0x14ff789
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm15
vmovaps 0x10(%rdx,%rax), %xmm11
vmovaps 0x20(%rdx,%rax), %xmm12
vmovaps 0x30(%rdx,%rax), %xmm13
jmp 0x14ff789
cmpl $0xb001, %edi # imm = 0xB001
je 0x14ff447
cmpl $0x9244, %edi # imm = 0x9244
jne 0x14ff527
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm0
vmovaps 0x10(%rdi,%rsi), %xmm1
vmovaps 0x20(%rdi,%rsi), %xmm3
vmovaps 0x30(%rdi,%rsi), %xmm2
jmp 0x14ff527
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm15 # xmm15 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm11 # xmm11 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm12 # xmm12 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm13 # xmm13 = xmm0[0,1],mem[0],zero
jmp 0x14ff789
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm4
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm2, %xmm3 # xmm3 = xmm2[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm4, %xmm2 # xmm2 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
jmp 0x14ff527
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm1
vmovss (%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdx,%rax), %xmm2
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ed32a(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ed326(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm13 # xmm13 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm15 # xmm15 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm12 # xmm12 = xmm0[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm11 # xmm11 = xmm0[0,1,2],xmm1[0]
jmp 0x14ff789
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vmovlhps %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0,2],xmm0[1,3]
vmovss 0x18(%rdi,%rsi), %xmm2
vmovsd 0x1c(%rdi,%rsi), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdi,%rsi), %xmm4
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ed245(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ed241(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm3, %xmm6 # xmm6 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm1, %xmm3 # xmm3 = xmm1[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm6, %xmm1 # xmm1 = xmm6[0,1],mem[0],xmm6[3]
vinsertps $0x30, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[0]
incl %ecx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rcx
movl 0x20(%rdx,%rsi), %edx
cmpl $0x9243, %edx # imm = 0x9243
jg 0x14ff5b9
cmpl $0x9134, %edx # imm = 0x9134
je 0x14ff5f5
cmpl $0x9234, %edx # imm = 0x9234
jne 0x14ff740
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm4
vmovss (%rdx,%rax), %xmm5
vshufps $0x4c, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,3],xmm4[0,1]
vshufps $0x78, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm5
vmovss 0xc(%rdx,%rax), %xmm6
vshufps $0x4c, %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm5
vmovss 0x18(%rdx,%rax), %xmm6
vshufps $0x4c, %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm5
vmovss 0x24(%rdx,%rax), %xmm8
vshufps $0x4c, %xmm5, %xmm8, %xmm5 # xmm5 = xmm8[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x14ff740
cmpl $0xb001, %edx # imm = 0xB001
je 0x14ff659
cmpl $0x9244, %edx # imm = 0x9244
jne 0x14ff740
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm4
vmovaps 0x10(%rdx,%rax), %xmm7
vmovaps 0x20(%rdx,%rax), %xmm6
vmovaps 0x30(%rdx,%rax), %xmm5
jmp 0x14ff740
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm4
vmovss 0x4(%rdx,%rax), %xmm5
vmovss 0x8(%rdx,%rax), %xmm6
vmovss 0xc(%rdx,%rax), %xmm8
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm5, %xmm7 # xmm7 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm6, %xmm5 # xmm5 = xmm6[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm5, %xmm6 # xmm6 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm8, %xmm5 # xmm5 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
jmp 0x14ff740
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm4
vinsertps $0x20, 0x8(%rdx,%rax), %xmm4, %xmm6 # xmm6 = xmm4[0,1],mem[0],xmm4[3]
vmovsd 0x34(%rdx,%rax), %xmm4
vmovss (%rdx,%rax), %xmm5
vmovss 0xc(%rdx,%rax), %xmm7
vmovlhps %xmm4, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,2],xmm4[1,3]
vmovss 0x18(%rdx,%rax), %xmm5
vmovsd 0x1c(%rdx,%rax), %xmm8
vmovlhps %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm8[0]
vshufps $0xd8, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,2],xmm8[1,3]
vmovss 0x24(%rdx,%rax), %xmm8
vmovss 0x28(%rdx,%rax), %xmm9
vmovss 0x2c(%rdx,%rax), %xmm10
vmovss 0x30(%rdx,%rax), %xmm11
vmulss %xmm9, %xmm9, %xmm12
vmulss %xmm8, %xmm8, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm10, %xmm10, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm11, %xmm11, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vrsqrtss %xmm12, %xmm12, %xmm13
vmulss 0x9ed030(%rip), %xmm13, %xmm14 # 0x1eec718
vmulss 0x9ed02c(%rip), %xmm12, %xmm12 # 0x1eec71c
vmulss %xmm12, %xmm13, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm12, %xmm13, %xmm12
vaddss %xmm12, %xmm14, %xmm12
vmulss %xmm12, %xmm8, %xmm8
vinsertps $0x30, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm8[0]
vmulss %xmm12, %xmm9, %xmm8
vinsertps $0x30, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[0]
vmulss %xmm12, %xmm10, %xmm8
vmulss %xmm12, %xmm11, %xmm9
vinsertps $0x10, 0x4(%rdx,%rax), %xmm7, %xmm7 # xmm7 = xmm7[0],mem[0],xmm7[2,3]
vinsertps $0x30, %xmm9, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm9[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm7, %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
vinsertps $0x30, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],xmm8[0]
vmovss 0x9ecfcc(%rip), %xmm8 # 0x1eec714
vsubss %xmm15, %xmm8, %xmm8
vshufps $0x0, %xmm15, %xmm15, %xmm9 # xmm9 = xmm15[0,0,0,0]
vmulps %xmm4, %xmm9, %xmm4
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm0, %xmm8, %xmm0
vaddps %xmm4, %xmm0, %xmm15
vmulps %xmm7, %xmm9, %xmm0
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm11
vmulps %xmm6, %xmm9, %xmm0
vmulps %xmm3, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm12
vmulps %xmm5, %xmm9, %xmm0
vmulps %xmm2, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm13
movl (%r15), %eax
cmpl $0x1, 0x24(%r13)
jne 0x14ff83d
movzbl 0x3d(%r13), %esi
shll $0x8, %esi
movq 0x88(%r13), %rcx
movl 0x20(%rcx), %edx
cmpl $0x100, %esi # imm = 0x100
je 0x15003dc
cmpl $0x9243, %edx # imm = 0x9243
jg 0x14ff93b
cmpl $0x9134, %edx # imm = 0x9134
je 0x14ff9b6
cmpl $0x9234, %edx # imm = 0x9234
jne 0x14fff15
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
jmp 0x14fff15
vmovss 0xc(%r14), %xmm0
vmovss 0x28(%r13), %xmm1
vmovss 0x2c(%r13), %xmm2
vmovss 0x30(%r13), %xmm3
vsubss %xmm2, %xmm0, %xmm0
vsubss %xmm2, %xmm3, %xmm2
vdivss %xmm2, %xmm0, %xmm0
vmulss %xmm0, %xmm1, %xmm0
vroundss $0x9, %xmm0, %xmm0, %xmm2
vaddss 0x9f1159(%rip), %xmm1, %xmm1 # 0x1ef09cc
vminss %xmm1, %xmm2, %xmm1
vxorps %xmm2, %xmm2, %xmm2
vmaxss %xmm1, %xmm2, %xmm1
vcvttss2si %xmm1, %ecx
vsubss %xmm1, %xmm0, %xmm14
movzbl 0x3d(%r13), %r8d
shll $0x8, %r8d
movq 0x88(%r13), %rdx
imulq $0x38, %rcx, %rdi
leaq (%rdx,%rdi), %rsi
movl 0x20(%rdx,%rdi), %edi
cmpl $0x100, %r8d # imm = 0x100
je 0x150047c
cmpl $0x9243, %edi # imm = 0x9243
jg 0x14ff977
cmpl $0x9134, %edi # imm = 0x9134
je 0x14ffa1a
cmpl $0x9234, %edi # imm = 0x9234
jne 0x14ffc43
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovsd 0x10(%rdi,%rsi), %xmm1
vmovss 0xc(%rdi,%rsi), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[0,2,3,1]
vmovsd 0x1c(%rdi,%rsi), %xmm1
vmovss 0x18(%rdi,%rsi), %xmm2
vshufps $0x4c, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[0,2,3,1]
vmovsd 0x28(%rdi,%rsi), %xmm1
vmovss 0x24(%rdi,%rsi), %xmm4
vshufps $0x4c, %xmm1, %xmm4, %xmm1 # xmm1 = xmm4[0,3],xmm1[0,1]
vshufps $0x78, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,2,3,1]
jmp 0x14ffc43
cmpl $0xb001, %edx # imm = 0xB001
je 0x14ffa81
cmpl $0x9244, %edx # imm = 0x9244
jne 0x14fff15
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm4
vmovaps 0x10(%rdx,%rax), %xmm3
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm0
jmp 0x14fff15
cmpl $0xb001, %edi # imm = 0xB001
je 0x14ffb63
cmpl $0x9244, %edi # imm = 0x9244
jne 0x14ffc43
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm0
vmovaps 0x10(%rdi,%rsi), %xmm3
vmovaps 0x20(%rdi,%rsi), %xmm2
vmovaps 0x30(%rdi,%rsi), %xmm1
jmp 0x14ffc43
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm5
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm4 # xmm4 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm3 # xmm3 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm5, %xmm0 # xmm0 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
jmp 0x14fff15
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm4
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm1, %xmm3 # xmm3 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm1 # xmm1 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm1, %xmm2 # xmm2 = xmm1[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm4, %xmm1 # xmm1 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],zero
jmp 0x14ffc43
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm0[1,3]
vmovss 0x18(%rdx,%rax), %xmm0
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ecc0e(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ecc0a(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm4 # xmm4 = xmm2[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm5
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm1, %xmm2 # xmm2 = xmm1[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm1 # xmm1 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm5, %xmm1, %xmm3 # xmm3 = xmm1[0,1,2],xmm5[0]
jmp 0x14fff15
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vmovss 0xc(%rdi,%rsi), %xmm3
vmovlhps %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,2],xmm0[1,3]
vmovss 0x18(%rdi,%rsi), %xmm1
vmovsd 0x1c(%rdi,%rsi), %xmm4
vmovlhps %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,2],xmm4[1,3]
vmovss 0x24(%rdi,%rsi), %xmm4
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ecb29(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ecb25(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0,1,2],xmm4[0]
incl %ecx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rcx
movl 0x20(%rdx,%rsi), %edx
cmpl $0x9243, %edx # imm = 0x9243
jg 0x14ffcd5
cmpl $0x9134, %edx # imm = 0x9134
je 0x14ffd11
cmpl $0x9234, %edx # imm = 0x9234
jne 0x14ffecc
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm4
vmovss (%rdx,%rax), %xmm5
vshufps $0x4c, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,3],xmm4[0,1]
vshufps $0x78, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm5
vmovss 0xc(%rdx,%rax), %xmm6
vshufps $0x4c, %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm7 # xmm7 = xmm5[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm5
vmovss 0x18(%rdx,%rax), %xmm6
vshufps $0x4c, %xmm5, %xmm6, %xmm5 # xmm5 = xmm6[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm5
vmovss 0x24(%rdx,%rax), %xmm8
vshufps $0x4c, %xmm5, %xmm8, %xmm5 # xmm5 = xmm8[0,3],xmm5[0,1]
vshufps $0x78, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,2,3,1]
jmp 0x14ffecc
cmpl $0xb001, %edx # imm = 0xB001
je 0x14ffd75
cmpl $0x9244, %edx # imm = 0x9244
jne 0x14ffecc
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm4
vmovaps 0x10(%rdx,%rax), %xmm7
vmovaps 0x20(%rdx,%rax), %xmm6
vmovaps 0x30(%rdx,%rax), %xmm5
jmp 0x14ffecc
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm4
vmovss 0x4(%rdx,%rax), %xmm5
vmovss 0x8(%rdx,%rax), %xmm6
vmovss 0xc(%rdx,%rax), %xmm8
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm5, %xmm7 # xmm7 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm6, %xmm5 # xmm5 = xmm6[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm5, %xmm6 # xmm6 = xmm5[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm8, %xmm5 # xmm5 = xmm8[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm5, %xmm5 # xmm5 = xmm5[0,1],mem[0],zero
jmp 0x14ffecc
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm4
vinsertps $0x20, 0x8(%rdx,%rax), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],xmm4[3]
vmovaps %xmm4, (%rsp)
vmovsd 0x34(%rdx,%rax), %xmm4
vmovss (%rdx,%rax), %xmm5
vmovss 0xc(%rdx,%rax), %xmm6
vmovaps %xmm6, 0x30(%rsp)
vmovlhps %xmm4, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm5, %xmm4 # xmm4 = xmm5[0,2],xmm4[1,3]
vmovaps %xmm4, 0x20(%rsp)
vmovss 0x18(%rdx,%rax), %xmm5
vmovsd 0x1c(%rdx,%rax), %xmm8
vmovlhps %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0],xmm8[0]
vshufps $0xd8, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,2],xmm8[1,3]
vmovss 0x24(%rdx,%rax), %xmm8
vmovss 0x28(%rdx,%rax), %xmm9
vmovss 0x2c(%rdx,%rax), %xmm10
vmovaps %xmm15, 0x10(%rsp)
vmovaps %xmm11, %xmm15
vmovss 0x30(%rdx,%rax), %xmm11
vmovaps %xmm0, %xmm7
vmovaps %xmm12, %xmm6
vmulss %xmm9, %xmm9, %xmm12
vmovaps %xmm3, %xmm4
vmovaps %xmm1, %xmm3
vmovaps %xmm13, %xmm1
vmulss %xmm8, %xmm8, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm10, %xmm10, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vmulss %xmm11, %xmm11, %xmm13
vaddss %xmm12, %xmm13, %xmm12
vrsqrtss %xmm12, %xmm12, %xmm13
vmovaps %xmm2, %xmm0
vmovaps %xmm14, %xmm2
vmulss 0x9ec8dc(%rip), %xmm13, %xmm14 # 0x1eec718
vmulss 0x9ec8d8(%rip), %xmm12, %xmm12 # 0x1eec71c
vmulss %xmm12, %xmm13, %xmm12
vmulss %xmm13, %xmm13, %xmm13
vmulss %xmm12, %xmm13, %xmm12
vmovaps %xmm1, %xmm13
vmovaps %xmm3, %xmm1
vmovaps %xmm4, %xmm3
vaddss %xmm12, %xmm14, %xmm12
vmovaps %xmm2, %xmm14
vmovaps %xmm0, %xmm2
vmulss %xmm12, %xmm8, %xmm8
vinsertps $0x30, %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[0,1,2],xmm8[0]
vmulss %xmm12, %xmm9, %xmm8
vmovaps 0x20(%rsp), %xmm0
vinsertps $0x30, %xmm8, %xmm0, %xmm4 # xmm4 = xmm0[0,1,2],xmm8[0]
vmulss %xmm12, %xmm10, %xmm8
vmulss %xmm12, %xmm11, %xmm9
vmovaps %xmm6, %xmm12
vmovaps %xmm7, %xmm0
vmovaps %xmm15, %xmm11
vmovaps 0x10(%rsp), %xmm15
vmovaps 0x30(%rsp), %xmm6
vinsertps $0x10, 0x4(%rdx,%rax), %xmm6, %xmm7 # xmm7 = xmm6[0],mem[0],xmm6[2,3]
vmovaps (%rsp), %xmm6
vinsertps $0x30, %xmm9, %xmm6, %xmm6 # xmm6 = xmm6[0,1,2],xmm9[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm7, %xmm7 # xmm7 = xmm7[0,1],mem[0],xmm7[3]
vinsertps $0x30, %xmm8, %xmm7, %xmm7 # xmm7 = xmm7[0,1,2],xmm8[0]
vmovss 0x9ec840(%rip), %xmm8 # 0x1eec714
vsubss %xmm14, %xmm8, %xmm8
vshufps $0x0, %xmm14, %xmm14, %xmm9 # xmm9 = xmm14[0,0,0,0]
vmulps %xmm4, %xmm9, %xmm4
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vmulps %xmm0, %xmm8, %xmm0
vaddps %xmm4, %xmm0, %xmm4
vmulps %xmm7, %xmm9, %xmm0
vmulps %xmm3, %xmm8, %xmm3
vaddps %xmm0, %xmm3, %xmm3
vmulps %xmm6, %xmm9, %xmm0
vmulps %xmm2, %xmm8, %xmm2
vaddps %xmm0, %xmm2, %xmm2
vmulps %xmm5, %xmm9, %xmm0
vmulps %xmm1, %xmm8, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vshufps $0xc9, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vmulps %xmm5, %xmm2, %xmm6
vmulps %xmm1, %xmm3, %xmm7
vsubps %xmm6, %xmm7, %xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[1,2,0,3]
vmulps %xmm4, %xmm1, %xmm1
vmulps %xmm2, %xmm8, %xmm2
vsubps %xmm1, %xmm2, %xmm1
vmulps %xmm3, %xmm8, %xmm2
vmulps %xmm5, %xmm4, %xmm3
vsubps %xmm2, %xmm3, %xmm2
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vunpcklps %xmm3, %xmm7, %xmm3 # xmm3 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
vunpcklps %xmm2, %xmm6, %xmm2 # xmm2 = xmm6[0],xmm2[0],xmm6[1],xmm2[1]
vinsertps $0x4a, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1],zero,xmm1[2],zero
vxorps %xmm6, %xmm6, %xmm6
vmovss %xmm1, %xmm6, %xmm1 # xmm1 = xmm1[0],xmm6[1,2,3]
vunpcklps %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
vunpcklps %xmm5, %xmm3, %xmm2 # xmm2 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
vunpckhps %xmm5, %xmm3, %xmm3 # xmm3 = xmm3[2],xmm5[2],xmm3[3],xmm5[3]
vdpps $0x7f, %xmm7, %xmm4, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vdivps %xmm4, %xmm2, %xmm6
vdivps %xmm4, %xmm3, %xmm7
vdivps %xmm4, %xmm1, %xmm8
vshufps $0x0, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,0,0,0]
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmulps %xmm0, %xmm8, %xmm0
vmulps %xmm7, %xmm2, %xmm2
vaddps %xmm0, %xmm2, %xmm0
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm9
vbroadcastss 0xa20f09(%rip), %xmm0 # 0x1f20ec0
vxorps %xmm0, %xmm9, %xmm10
cmpl $0x1, 0x18(%rbx)
jne 0x150001d
vdpps $0x7f, %xmm7, %xmm6, %xmm0
vbroadcastss 0xa20ef4(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm1
vxorps %xmm0, %xmm0, %xmm0
vucomiss 0x9ec730(%rip), %xmm1 # 0x1eec710
ja 0x150000e
vdpps $0x7f, %xmm8, %xmm6, %xmm1
vandps %xmm2, %xmm1, %xmm1
vucomiss 0x9ec71c(%rip), %xmm1 # 0x1eec710
ja 0x150000e
vdpps $0x7f, %xmm8, %xmm7, %xmm1
vandps %xmm2, %xmm1, %xmm1
vucomiss 0x9ec708(%rip), %xmm1 # 0x1eec710
jbe 0x15001d4
xorl %ebp, %ebp
movzbl %bpl, %ecx
movl $0x2, %eax
subl %ecx, %eax
jmp 0x1500026
movl $0x2, %eax
vxorps %xmm0, %xmm0, %xmm0
movq 0x28(%rbx), %rcx
movl 0x88(%rcx), %edx
vmovsd (%r15), %xmm1
vshufps $0xe1, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[1,0,2,3]
vmovlps %xmm1, 0x80(%rcx,%rdx,4)
shlq $0x6, %rdx
vmovups %xmm6, (%rcx,%rdx)
vmovups %xmm7, 0x10(%rcx,%rdx)
vmovups %xmm8, 0x20(%rcx,%rdx)
vmovups %xmm10, 0x30(%rcx,%rdx)
vmovups %xmm15, 0x40(%rcx,%rdx)
vmovups %xmm11, 0x50(%rcx,%rdx)
vmovups %xmm12, 0x60(%rcx,%rdx)
vmovups %xmm13, 0x70(%rcx,%rdx)
incl 0x88(%rcx)
vmovss 0xc(%r14), %xmm1
vmovss %xmm1, 0x1ac(%rsp)
vbroadcastss (%r14), %xmm1
vbroadcastss 0x4(%r14), %xmm2
vbroadcastss 0x8(%r14), %xmm3
vmulps %xmm3, %xmm8, %xmm3
vsubps %xmm9, %xmm3, %xmm3
vmulps %xmm2, %xmm7, %xmm2
vaddps %xmm3, %xmm2, %xmm2
vmulps %xmm1, %xmm6, %xmm1
vaddps %xmm2, %xmm1, %xmm1
vmovlps %xmm1, 0x1a0(%rsp)
vextractps $0x2, %xmm1, 0x1a8(%rsp)
vmulss 0x10(%r14), %xmm0, %xmm1
vmovss %xmm1, 0x1b0(%rsp)
movq 0x10(%rbx), %rcx
movq 0x38(%rbx), %rdx
movq %r12, 0x1c0(%rsp)
movq $0x0, 0x1c8(%rsp)
movq %rcx, 0x1d0(%rsp)
movl %eax, 0x1d8(%rsp)
vmovdqa 0x20(%rbx), %xmm2
vmovdqa %xmm2, 0x1e0(%rsp)
vmovss %xmm0, 0x1f0(%rsp)
movq %rdx, 0x1f8(%rsp)
movq $-0x1, 0x200(%rsp)
vbroadcastss 0x10(%rcx), %xmm1
vmovaps %xmm1, 0x210(%rsp)
vmovss 0x10(%rcx), %xmm1
cmpl $0x2, %eax
jne 0x150016c
vucomiss 0x9eb8d1(%rip), %xmm1 # 0x1eeba20
jae 0x1500165
vpextrq $0x1, %xmm2, %rsi
movl 0x88(%rsi), %edi
testl %edi, %edi
jne 0x150062a
vshufps $0x0, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[0,0,0,0]
jmp 0x1500175
vmulss %xmm1, %xmm0, %xmm0
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
leaq 0x1c0(%rsp), %rdx
vmovaps %xmm0, 0x50(%rdx)
leaq 0x58(%r12), %rdi
leaq 0x1a0(%rsp), %rsi
callq *0x88(%r12)
movq 0x28(%rbx), %rcx
movl 0x88(%rcx), %edx
decl %edx
movl %edx, 0x88(%rcx)
movl $0xffffffff, %esi # imm = 0xFFFFFFFF
movl %esi, 0x80(%rcx,%rdx,4)
movl 0x88(%rcx), %edx
movl %esi, 0x84(%rcx,%rdx,4)
addq $0x238, %rsp # imm = 0x238
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
vdpps $0x7f, %xmm6, %xmm6, %xmm1
vdpps $0x7f, %xmm7, %xmm7, %xmm3
vsubss %xmm3, %xmm1, %xmm4
vandps %xmm2, %xmm4, %xmm4
xorl %ebp, %ebp
vucomiss 0x9ec51e(%rip), %xmm4 # 0x1eec710
ja 0x1500010
vdpps $0x7f, %xmm8, %xmm8, %xmm4
vsubss %xmm4, %xmm1, %xmm5
vandps %xmm2, %xmm5, %xmm5
vucomiss 0x9ec502(%rip), %xmm5 # 0x1eec710
ja 0x1500010
vsubss %xmm4, %xmm3, %xmm3
vandps %xmm2, %xmm3, %xmm2
vucomiss 0x9ec4ec(%rip), %xmm2 # 0x1eec710
setbe %bpl
ja 0x1500010
movb $0x1, %bpl
vucomiss %xmm0, %xmm1
jb 0x1500240
vsqrtss %xmm1, %xmm1, %xmm0
jmp 0x1500010
vmovaps %xmm1, %xmm0
vmovaps %xmm15, 0x10(%rsp)
vmovaps %xmm11, 0x50(%rsp)
vmovaps %xmm12, 0x70(%rsp)
vmovaps %xmm13, 0x40(%rsp)
vmovaps %xmm6, (%rsp)
vmovaps %xmm7, 0x30(%rsp)
vmovaps %xmm8, 0x20(%rsp)
vmovaps %xmm9, 0x60(%rsp)
vmovaps %xmm10, 0x90(%rsp)
callq 0x6aa20
vmovaps 0x90(%rsp), %xmm10
vmovaps 0x60(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm7
vmovaps (%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm15
jmp 0x1500010
cmpl $0x9243, %edx # imm = 0x9243
jg 0x150052a
cmpl $0x9134, %edx # imm = 0x9134
je 0x1500648
cmpl $0x9234, %edx # imm = 0x9234
jne 0x15008c5
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm2
vshufps $0x4c, %xmm0, %xmm2, %xmm0 # xmm0 = xmm2[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm4
vshufps $0x4c, %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
jmp 0x15008c5
cmpl $0x9243, %edi # imm = 0x9243
vmovaps %xmm15, 0x10(%rsp)
jg 0x1500566
cmpl $0x9134, %edi # imm = 0x9134
je 0x15006ac
cmpl $0x9234, %edi # imm = 0x9234
jne 0x1500b86
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm11 # xmm11 = xmm0[0,2,3,1]
vmovsd 0x10(%rdi,%rsi), %xmm0
vmovss 0xc(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm15 # xmm15 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdi,%rsi), %xmm0
vmovss 0x18(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovaps %xmm0, (%rsp)
vmovsd 0x28(%rdi,%rsi), %xmm0
vmovss 0x24(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm13 # xmm13 = xmm0[0,2,3,1]
jmp 0x1500b86
cmpl $0x9243, %edx # imm = 0x9243
vmovaps %xmm15, 0x10(%rsp)
vmovaps %xmm11, 0x50(%rsp)
vmovaps %xmm12, 0x70(%rsp)
vmovaps %xmm13, 0x40(%rsp)
jg 0x15005aa
cmpl $0x9134, %edx # imm = 0x9134
je 0x1500718
cmpl $0x9234, %edx # imm = 0x9234
jne 0x1500d31
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm4
vshufps $0x4c, %xmm0, %xmm4, %xmm0 # xmm0 = xmm4[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
jmp 0x1500d31
cmpl $0x9243, %edi # imm = 0x9243
vmovaps %xmm15, 0x10(%rsp)
vmovaps %xmm11, 0x50(%rsp)
vmovaps %xmm12, 0x70(%rsp)
vmovaps %xmm13, 0x40(%rsp)
vmovaps %xmm14, 0x60(%rsp)
jg 0x15005e6
cmpl $0x9134, %edi # imm = 0x9134
je 0x150077c
cmpl $0x9234, %edi # imm = 0x9234
jne 0x1500ff0
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x4(%rdi,%rsi), %xmm0
vmovss (%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm11 # xmm11 = xmm0[0,2,3,1]
vmovsd 0x10(%rdi,%rsi), %xmm0
vmovss 0xc(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm15 # xmm15 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdi,%rsi), %xmm0
vmovss 0x18(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,2,3,1]
vmovaps %xmm0, (%rsp)
vmovsd 0x28(%rdi,%rsi), %xmm0
vmovss 0x24(%rdi,%rsi), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm12 # xmm12 = xmm0[0,2,3,1]
jmp 0x1500ff0
cmpl $0xb001, %edx # imm = 0xB001
je 0x15007e8
cmpl $0x9244, %edx # imm = 0x9244
jne 0x15008c5
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm1
vmovaps 0x10(%rdx,%rax), %xmm3
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm0
jmp 0x15008c5
cmpl $0xb001, %edi # imm = 0xB001
je 0x1500aa1
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1500b86
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm11
vmovaps 0x10(%rdi,%rsi), %xmm15
vmovaps 0x20(%rdi,%rsi), %xmm0
vmovaps %xmm0, (%rsp)
vmovaps 0x30(%rdi,%rsi), %xmm13
jmp 0x1500b86
cmpl $0xb001, %edx # imm = 0xB001
je 0x1500c54
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1500d31
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm2
vmovaps 0x10(%rdx,%rax), %xmm3
vmovaps 0x20(%rdx,%rax), %xmm1
vmovaps 0x30(%rdx,%rax), %xmm0
jmp 0x1500d31
cmpl $0xb001, %edi # imm = 0xB001
je 0x1500f0b
cmpl $0x9244, %edi # imm = 0x9244
jne 0x1500ff0
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovaps (%rdi,%rsi), %xmm11
vmovaps 0x10(%rdi,%rsi), %xmm15
vmovaps 0x20(%rdi,%rsi), %xmm0
vmovaps %xmm0, (%rsp)
vmovaps 0x30(%rdi,%rsi), %xmm12
jmp 0x1500ff0
leaq 0x220(%rsp), %r14
vmovaps %xmm1, %xmm0
movq %r14, %rdx
callq 0x40a9ef
vmovaps (%r14), %xmm0
jmp 0x1500175
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm2
vmovss 0x8(%rdx,%rax), %xmm4
vmovss 0xc(%rdx,%rax), %xmm5
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm3 # xmm3 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm5, %xmm0 # xmm0 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
jmp 0x15008c5
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm11 # xmm11 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm15 # xmm15 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, (%rsp)
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm13 # xmm13 = xmm0[0,1],mem[0],zero
jmp 0x1500b86
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm4
vmovss 0xc(%rdx,%rax), %xmm5
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm3 # xmm3 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm4, %xmm0 # xmm0 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm5, %xmm0 # xmm0 = xmm5[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
jmp 0x1500d31
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovss (%rdi,%rsi), %xmm0
vmovss 0x4(%rdi,%rsi), %xmm1
vmovss 0x8(%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vinsertps $0x1c, 0x10(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdi,%rsi), %xmm0, %xmm11 # xmm11 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdi,%rsi), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdi,%rsi), %xmm0, %xmm15 # xmm15 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdi,%rsi), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps %xmm0, (%rsp)
vinsertps $0x1c, 0x1c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdi,%rsi), %xmm0, %xmm12 # xmm12 = xmm0[0,1],mem[0],zero
jmp 0x1500ff0
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm1, %xmm1 # xmm1 = xmm1[0,2],xmm0[1,3]
vmovss 0x18(%rdx,%rax), %xmm0
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ebea7(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ebea3(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0,1,2],xmm4[0]
vmovaps %xmm0, 0x10(%rsp)
vshufps $0xff, %xmm0, %xmm0, %xmm6 # xmm6 = xmm0[3,3,3,3]
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vshufps $0xff, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[3,3,3,3]
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm6, %xmm6, %xmm11
vaddss %xmm10, %xmm11, %xmm5
vbroadcastss 0xa205ca(%rip), %xmm12 # 0x1f20ec0
vxorps %xmm7, %xmm12, %xmm8
vmulss %xmm7, %xmm8, %xmm8
vaddss %xmm5, %xmm8, %xmm5
vxorps %xmm4, %xmm12, %xmm12
vmulss %xmm4, %xmm12, %xmm12
vaddss %xmm5, %xmm12, %xmm13
vmulss %xmm6, %xmm4, %xmm5
vmulss %xmm7, %xmm9, %xmm14
vaddss %xmm5, %xmm14, %xmm15
vsubss %xmm5, %xmm14, %xmm5
vmulss %xmm4, %xmm9, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm7, %xmm7, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vmulss %xmm6, %xmm7, %xmm12
vmulss %xmm6, %xmm9, %xmm9
vmovaps %xmm1, %xmm0
vsubss %xmm12, %xmm14, %xmm1
vmulss %xmm4, %xmm7, %xmm7
vaddss %xmm12, %xmm14, %xmm6
vaddss %xmm7, %xmm9, %xmm12
vsubss %xmm9, %xmm7, %xmm7
vaddss %xmm15, %xmm15, %xmm9
vaddss %xmm1, %xmm1, %xmm1
vaddss %xmm10, %xmm8, %xmm8
vmulss %xmm4, %xmm4, %xmm4
vaddss %xmm4, %xmm8, %xmm8
vshufps $0x0, %xmm13, %xmm13, %xmm4 # xmm4 = xmm13[0,0,0,0]
vshufps $0x0, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmovaps 0x9ebd79(%rip), %xmm10 # 0x1eec700
vmulps %xmm1, %xmm10, %xmm1
vmovsd 0x9ebd5d(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm13, %xmm9, %xmm9
vaddps %xmm1, %xmm9, %xmm1
vmovss 0x9ebd70(%rip), %xmm9 # 0x1eec714
vmulps %xmm4, %xmm9, %xmm4
vaddps %xmm1, %xmm4, %xmm4
vaddss %xmm12, %xmm12, %xmm1
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm1, %xmm10, %xmm1
vmulps %xmm13, %xmm11, %xmm11
vaddps %xmm1, %xmm11, %xmm1
vaddss %xmm5, %xmm5, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm9, %xmm5
vaddps %xmm1, %xmm5, %xmm5
vshufps $0x0, %xmm8, %xmm8, %xmm1 # xmm1 = xmm8[0,0,0,0]
vmulps %xmm1, %xmm10, %xmm1
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm7
vaddps %xmm1, %xmm7, %xmm1
vxorps %xmm7, %xmm7, %xmm7
vaddss %xmm6, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm9, %xmm6
vshufps $0xe9, %xmm7, %xmm0, %xmm8 # xmm8 = xmm0[1,2],xmm7[2,3]
vblendps $0x4, %xmm3, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm3[2],xmm8[3]
vaddps %xmm1, %xmm6, %xmm1
vaddps %xmm7, %xmm8, %xmm6
vshufps $0x0, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[0,0,0,0]
vmulps %xmm7, %xmm1, %xmm9
vmulps %xmm7, %xmm5, %xmm7
vaddps %xmm7, %xmm9, %xmm7
vmulps %xmm4, %xmm8, %xmm8
vaddps %xmm7, %xmm8, %xmm15
vshufps $0x0, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vmulps %xmm4, %xmm7, %xmm7
vaddps %xmm3, %xmm7, %xmm11
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[2,2,2,2]
vmulps %xmm1, %xmm7, %xmm7
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm7, %xmm3, %xmm3
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm4, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm12
vmovaps 0x10(%rsp), %xmm0
vshufps $0xaa, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[2,2,2,2]
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm4, %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
vaddps %xmm0, %xmm6, %xmm13
jmp 0x14ff789
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm1
vmovss (%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdi,%rsi), %xmm2
vmovsd 0x1c(%rdi,%rsi), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdi,%rsi), %xmm4
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ebbeb(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ebbe7(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm13 # xmm13 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm11 # xmm11 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm2[0]
vmovaps %xmm0, (%rsp)
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0,1,2],xmm1[0]
incl %ecx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rcx
movl 0x20(%rdx,%rsi), %edx
cmpl $0x9243, %edx # imm = 0x9243
jg 0x1500c18
cmpl $0x9134, %edx # imm = 0x9134
je 0x15010be
cmpl $0x9234, %edx # imm = 0x9234
jne 0x15011ff
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm9 # xmm9 = xmm0[0,2,3,1]
jmp 0x15011ff
cmpl $0xb001, %edx # imm = 0xB001
je 0x1501122
cmpl $0x9244, %edx # imm = 0x9244
jne 0x15011ff
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm4
vmovaps 0x10(%rdx,%rax), %xmm5
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm9
jmp 0x15011ff
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm0[0]
vshufps $0xd8, %xmm0, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm0[1,3]
vmovss 0x18(%rdx,%rax), %xmm0
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9eba3b(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9eba37(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm4[0]
vmulss %xmm6, %xmm8, %xmm4
vmulss %xmm7, %xmm8, %xmm5
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm5, %xmm1, %xmm1 # xmm1 = xmm1[0,1,2],xmm5[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm4, %xmm3, %xmm3 # xmm3 = xmm3[0,1,2],xmm4[0]
vshufps $0xff, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[3,3,3,3]
vshufps $0xff, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[3,3,3,3]
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vshufps $0xff, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[3,3,3,3]
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm5, %xmm5, %xmm11
vaddss %xmm10, %xmm11, %xmm6
vbroadcastss 0xa20164(%rip), %xmm12 # 0x1f20ec0
vxorps %xmm7, %xmm12, %xmm8
vmulss %xmm7, %xmm8, %xmm8
vaddss %xmm6, %xmm8, %xmm6
vxorps %xmm4, %xmm12, %xmm12
vmulss %xmm4, %xmm12, %xmm12
vaddss %xmm6, %xmm12, %xmm13
vmulss %xmm4, %xmm5, %xmm6
vmulss %xmm7, %xmm9, %xmm14
vaddss %xmm6, %xmm14, %xmm15
vsubss %xmm6, %xmm14, %xmm6
vmovss %xmm6, (%rsp)
vmulss %xmm4, %xmm9, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm7, %xmm7, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm6
vmulss %xmm7, %xmm5, %xmm12
vmulss %xmm5, %xmm9, %xmm5
vmovaps %xmm0, %xmm11
vmovaps %xmm2, %xmm0
vsubss %xmm12, %xmm14, %xmm2
vmulss %xmm4, %xmm7, %xmm9
vaddss %xmm12, %xmm14, %xmm7
vaddss %xmm5, %xmm9, %xmm12
vsubss %xmm5, %xmm9, %xmm9
vaddss %xmm15, %xmm15, %xmm5
vaddss %xmm2, %xmm2, %xmm2
vaddss %xmm10, %xmm8, %xmm8
vmulss %xmm4, %xmm4, %xmm4
vaddss %xmm4, %xmm8, %xmm4
vshufps $0x0, %xmm13, %xmm13, %xmm8 # xmm8 = xmm13[0,0,0,0]
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmovaps 0x9eb90c(%rip), %xmm10 # 0x1eec700
vmulps %xmm2, %xmm10, %xmm2
vmovsd 0x9eb8f0(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm5, %xmm13, %xmm5
vaddps %xmm2, %xmm5, %xmm2
vmovss 0x9eb904(%rip), %xmm14 # 0x1eec714
vmulps %xmm14, %xmm8, %xmm5
vaddps %xmm5, %xmm2, %xmm5
vaddss %xmm12, %xmm12, %xmm2
vshufps $0x0, %xmm6, %xmm6, %xmm8 # xmm8 = xmm6[0,0,0,0]
vshufps $0x0, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[0,0,0,0]
vmulps %xmm2, %xmm10, %xmm2
vmulps %xmm13, %xmm8, %xmm8
vaddps %xmm2, %xmm8, %xmm2
vmovss (%rsp), %xmm6
vaddss %xmm6, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm14, %xmm6
vaddps %xmm2, %xmm6, %xmm6
vshufps $0x0, %xmm4, %xmm4, %xmm2 # xmm2 = xmm4[0,0,0,0]
vmulps %xmm2, %xmm10, %xmm2
vaddss %xmm9, %xmm9, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm2, %xmm4, %xmm2
vxorps %xmm4, %xmm4, %xmm4
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm14, %xmm7
vshufps $0xe9, %xmm4, %xmm0, %xmm8 # xmm8 = xmm0[1,2],xmm4[2,3]
vblendps $0x4, %xmm3, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm3[2],xmm8[3]
vaddps %xmm2, %xmm7, %xmm7
vaddps %xmm4, %xmm8, %xmm8
vshufps $0x0, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,0,0,0]
vmulps %xmm4, %xmm7, %xmm9
vmulps %xmm4, %xmm6, %xmm4
vaddps %xmm4, %xmm9, %xmm4
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm4, %xmm2, %xmm4
vshufps $0x0, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[0,0,0,0]
vshufps $0x55, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,1,1,1]
vmulps %xmm6, %xmm3, %xmm3
vaddps %xmm3, %xmm9, %xmm3
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm3, %xmm2, %xmm3
vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm9, %xmm2
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm2
vshufps $0xaa, %xmm11, %xmm11, %xmm1 # xmm1 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm1, %xmm1
vshufps $0x55, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[1,1,1,1]
vmulps %xmm6, %xmm7, %xmm6
vaddps %xmm1, %xmm6, %xmm1
vshufps $0x0, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[0,0,0,0]
vmulps %xmm5, %xmm0, %xmm0
vaddps %xmm1, %xmm0, %xmm0
jmp 0x1502152
movq (%rsi), %rdi
movq 0x10(%rsi), %rsi
imulq %rax, %rsi
vmovsd 0x10(%rdi,%rsi), %xmm0
vinsertps $0x20, 0x8(%rdi,%rsi), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdi,%rsi), %xmm1
vmovss (%rdi,%rsi), %xmm2
vmovss 0xc(%rdi,%rsi), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdi,%rsi), %xmm2
vmovsd 0x1c(%rdi,%rsi), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdi,%rsi), %xmm4
vmovss 0x28(%rdi,%rsi), %xmm5
vmovss 0x2c(%rdi,%rsi), %xmm6
vmovss 0x30(%rdi,%rsi), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9eb781(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9eb77d(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm12 # xmm12 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm11 # xmm11 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdi,%rsi), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm2[0]
vmovaps %xmm0, (%rsp)
vinsertps $0x20, 0x3c(%rdi,%rsi), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm15 # xmm15 = xmm0[0,1,2],xmm1[0]
incl %ecx
imulq $0x38, %rcx, %rsi
leaq (%rdx,%rsi), %rcx
movl 0x20(%rdx,%rsi), %edx
cmpl $0x9243, %edx # imm = 0x9243
jg 0x1501082
cmpl $0x9134, %edx # imm = 0x9134
je 0x15018fd
cmpl $0x9234, %edx # imm = 0x9234
jne 0x1501a3e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x4(%rdx,%rax), %xmm0
vmovss (%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[0,2,3,1]
vmovsd 0x10(%rdx,%rax), %xmm0
vmovss 0xc(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[0,2,3,1]
vmovsd 0x1c(%rdx,%rax), %xmm0
vmovss 0x18(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[0,2,3,1]
vmovsd 0x28(%rdx,%rax), %xmm0
vmovss 0x24(%rdx,%rax), %xmm1
vshufps $0x4c, %xmm0, %xmm1, %xmm0 # xmm0 = xmm1[0,3],xmm0[0,1]
vshufps $0x78, %xmm0, %xmm0, %xmm9 # xmm9 = xmm0[0,2,3,1]
jmp 0x1501a3e
cmpl $0xb001, %edx # imm = 0xB001
je 0x1501961
cmpl $0x9244, %edx # imm = 0x9244
jne 0x1501a3e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovaps (%rdx,%rax), %xmm4
vmovaps 0x10(%rdx,%rax), %xmm5
vmovaps 0x20(%rdx,%rax), %xmm2
vmovaps 0x30(%rdx,%rax), %xmm9
jmp 0x1501a3e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm4 # xmm4 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm5 # xmm5 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm9 # xmm9 = xmm0[0,1],mem[0],zero
jmp 0x15011ff
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm1
vmovss (%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdx,%rax), %xmm2
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9eb56d(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9eb569(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm9 # xmm9 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm2 # xmm2 = xmm0[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0,1,2],xmm1[0]
vshufps $0xff, %xmm13, %xmm13, %xmm8 # xmm8 = xmm13[3,3,3,3]
vmovaps %xmm11, %xmm12
vshufps $0xff, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[3,3,3,3]
vmovaps %xmm15, %xmm14
vshufps $0xff, %xmm15, %xmm15, %xmm6 # xmm6 = xmm15[3,3,3,3]
vpermilps $0xff, (%rsp), %xmm3 # xmm3 = mem[3,3,3,3]
vshufps $0xff, %xmm9, %xmm9, %xmm15 # xmm15 = xmm9[3,3,3,3]
vshufps $0xff, %xmm4, %xmm4, %xmm10 # xmm10 = xmm4[3,3,3,3]
vshufps $0xff, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[3,3,3,3]
vmovaps %xmm2, 0xf0(%rsp)
vshufps $0xff, %xmm2, %xmm2, %xmm0 # xmm0 = xmm2[3,3,3,3]
vmulss %xmm7, %xmm10, %xmm1
vmovaps %xmm8, 0x170(%rsp)
vmulss %xmm15, %xmm8, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmovaps %xmm11, 0xe0(%rsp)
vmulss %xmm6, %xmm11, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmulss %xmm0, %xmm3, %xmm2
vaddss %xmm1, %xmm2, %xmm11
vbroadcastss 0xa1fc48(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm11, %xmm8
vucomiss %xmm11, %xmm8
vxorps %xmm1, %xmm0, %xmm2
jbe 0x150128f
vxorps %xmm1, %xmm15, %xmm15
vxorps %xmm1, %xmm10, %xmm10
ja 0x1501295
vmovaps %xmm0, %xmm2
vmovaps %xmm2, 0x140(%rsp)
vmovaps %xmm15, 0x150(%rsp)
vmovaps %xmm10, 0x160(%rsp)
vmovaps %xmm7, 0x20(%rsp)
vmovaps %xmm6, 0x180(%rsp)
vmovaps %xmm3, 0x30(%rsp)
vmovaps %xmm9, 0x100(%rsp)
vmovaps %xmm5, 0x190(%rsp)
vmovaps %xmm4, 0x90(%rsp)
vmovaps %xmm13, 0x60(%rsp)
vmovaps %xmm14, 0x40(%rsp)
vmovaps %xmm12, 0x70(%rsp)
vbroadcastss 0xa1fbc9(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm11, %xmm4
vmulss 0x9ef679(%rip), %xmm4, %xmm0 # 0x1ef0980
vaddss 0x9ef675(%rip), %xmm0, %xmm0 # 0x1ef0984
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9ef66d(%rip), %xmm0, %xmm0 # 0x1ef0988
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9ef665(%rip), %xmm0, %xmm0 # 0x1ef098c
vmovaps %xmm8, 0xd0(%rsp)
vmaxss %xmm11, %xmm8, %xmm8
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9ef64f(%rip), %xmm0, %xmm0 # 0x1ef0990
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9ef647(%rip), %xmm0, %xmm1 # 0x1ef0994
vmovss 0x9eb3bf(%rip), %xmm0 # 0x1eec714
vsubss %xmm4, %xmm0, %xmm0
vxorps %xmm3, %xmm3, %xmm3
vucomiss %xmm3, %xmm0
vmovss %xmm8, 0x50(%rsp)
jb 0x150136f
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x15013b4
vmovaps %xmm11, 0xc0(%rsp)
vmovaps %xmm4, 0xb0(%rsp)
vmovss %xmm1, 0xa0(%rsp)
callq 0x6aa20
vmovss 0xa0(%rsp), %xmm1
vxorps %xmm3, %xmm3, %xmm3
vmovaps 0xb0(%rsp), %xmm4
vmovaps 0xc0(%rsp), %xmm11
vmovss 0x50(%rsp), %xmm8
vmulss %xmm1, %xmm0, %xmm0
vmovss 0x9ef5d8(%rip), %xmm1 # 0x1ef0998
vsubss %xmm0, %xmm1, %xmm0
vmaxss %xmm0, %xmm3, %xmm0
vbroadcastss 0xa1faef(%rip), %xmm13 # 0x1f20ec0
vxorps %xmm0, %xmm13, %xmm2
vcmpltss %xmm3, %xmm8, %xmm3
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vsubss %xmm0, %xmm1, %xmm0
vmovss 0x9eb328(%rip), %xmm9 # 0x1eec714
vcmpltss %xmm4, %xmm9, %xmm2
vbroadcastss 0x9ef5a2(%rip), %xmm3 # 0x1ef099c
vblendvps %xmm2, %xmm3, %xmm0, %xmm0
vmovaps 0x10(%rsp), %xmm7
vmulss %xmm0, %xmm7, %xmm0
vmulss 0x9ef58e(%rip), %xmm0, %xmm2 # 0x1ef09a0
vroundss $0x9, %xmm2, %xmm2, %xmm2
vcvttss2si %xmm2, %eax
vmulss %xmm1, %xmm2, %xmm1
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm1
vmulss 0x9ef574(%rip), %xmm1, %xmm2 # 0x1ef09a4
vaddss 0x9ef570(%rip), %xmm2, %xmm2 # 0x1ef09a8
vmulss 0x9ef56c(%rip), %xmm1, %xmm3 # 0x1ef09ac
vaddss 0x9ef568(%rip), %xmm3, %xmm5 # 0x1ef09b0
vmovaps 0xe0(%rsp), %xmm10
vxorps %xmm13, %xmm10, %xmm3
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9ef552(%rip), %xmm2, %xmm2 # 0x1ef09b4
vcmpltss 0xd0(%rsp), %xmm11, %xmm4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9ef540(%rip), %xmm5, %xmm5 # 0x1ef09b8
movl %eax, %ecx
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9ef536(%rip), %xmm2, %xmm2 # 0x1ef09bc
andl $0x3, %ecx
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9ef52b(%rip), %xmm5, %xmm5 # 0x1ef09c0
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9ef523(%rip), %xmm2, %xmm6 # 0x1ef09c4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9eb26f(%rip), %xmm5, %xmm5 # 0x1eec71c
vmulss %xmm6, %xmm1, %xmm6
vaddss %xmm6, %xmm9, %xmm6
vmulss %xmm5, %xmm1, %xmm1
vaddss %xmm1, %xmm9, %xmm5
vmulss %xmm6, %xmm0, %xmm0
testb $0x1, %al
vmovaps 0x180(%rsp), %xmm1
je 0x15014d4
vmovaps %xmm5, %xmm12
jmp 0x15014dc
vmovaps %xmm0, %xmm12
vmovaps %xmm5, %xmm0
vblendvps %xmm4, %xmm3, %xmm10, %xmm9
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x15014ef
vxorps %xmm13, %xmm12, %xmm12
cmpl $0x2, %eax
vmovaps %xmm12, 0xe0(%rsp)
jae 0x1501501
vxorps %xmm0, %xmm13, %xmm0
vmovaps 0x170(%rsp), %xmm11
vmulss %xmm11, %xmm8, %xmm4
vmovaps 0x150(%rsp), %xmm13
vsubss %xmm13, %xmm4, %xmm4
vmovaps 0x20(%rsp), %xmm3
vmulss %xmm3, %xmm8, %xmm5
vmovaps 0x160(%rsp), %xmm2
vsubss %xmm2, %xmm5, %xmm5
vmulss %xmm1, %xmm8, %xmm6
vsubss %xmm9, %xmm6, %xmm10
vmovaps 0x30(%rsp), %xmm15
vmulss %xmm15, %xmm8, %xmm6
vmovaps 0x140(%rsp), %xmm8
vsubss %xmm8, %xmm6, %xmm14
vmovss %xmm5, 0xd0(%rsp)
vmulss %xmm5, %xmm5, %xmm6
vmovaps %xmm9, %xmm12
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm10, 0xc0(%rsp)
vmulss %xmm10, %xmm10, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm14, 0xb0(%rsp)
vmulss %xmm14, %xmm14, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vrsqrtss %xmm6, %xmm6, %xmm9
vmovss 0x9eb17c(%rip), %xmm5 # 0x1eec71c
vmulss %xmm5, %xmm6, %xmm6
vmulss %xmm6, %xmm9, %xmm6
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm6, %xmm10, %xmm6
vmulss 0x9eb15f(%rip), %xmm9, %xmm9 # 0x1eec718
vaddss %xmm6, %xmm9, %xmm9
vmulss %xmm4, %xmm9, %xmm4
vmovaps 0xe0(%rsp), %xmm5
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm0, %xmm11, %xmm6
vsubss %xmm4, %xmm6, %xmm4
vmovss 0x9eb136(%rip), %xmm6 # 0x1eec714
vsubss %xmm7, %xmm6, %xmm6
vmulss %xmm7, %xmm13, %xmm10
vmulss %xmm6, %xmm11, %xmm11
vaddss %xmm10, %xmm11, %xmm13
vmulss %xmm2, %xmm7, %xmm10
vmulss %xmm3, %xmm6, %xmm11
vaddss %xmm10, %xmm11, %xmm10
vmulss %xmm7, %xmm12, %xmm3
vmulss %xmm1, %xmm6, %xmm11
vaddss %xmm3, %xmm11, %xmm3
vmulss %xmm7, %xmm8, %xmm11
vmulss %xmm6, %xmm15, %xmm14
vaddss %xmm11, %xmm14, %xmm11
vmulss %xmm10, %xmm10, %xmm14
vmovaps %xmm1, %xmm8
vmulss %xmm13, %xmm13, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm3, %xmm3, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm11, %xmm11, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss 0x9eb0d9(%rip), %xmm14, %xmm2 # 0x1eec71c
vrsqrtss %xmm14, %xmm14, %xmm14
vmulss 0x9eb0c8(%rip), %xmm14, %xmm12 # 0x1eec718
vmulss %xmm2, %xmm14, %xmm2
vmulss %xmm14, %xmm14, %xmm14
vmulss %xmm2, %xmm14, %xmm2
vaddss %xmm2, %xmm12, %xmm2
vmovss 0x9ef35f(%rip), %xmm12 # 0x1ef09c8
vmovss 0x50(%rsp), %xmm1
vucomiss %xmm12, %xmm1
vcmpltss %xmm1, %xmm12, %xmm12
vmulss %xmm2, %xmm13, %xmm13
vblendvps %xmm12, %xmm13, %xmm4, %xmm4
ja 0x15016ca
vmulss 0xd0(%rsp), %xmm9, %xmm2
vmulss 0xc0(%rsp), %xmm9, %xmm3
vmulss 0xb0(%rsp), %xmm9, %xmm9
vmulss %xmm5, %xmm2, %xmm2
vmulss 0x20(%rsp), %xmm0, %xmm7
vsubss %xmm2, %xmm7, %xmm7
vmulss %xmm5, %xmm3, %xmm2
vmulss %xmm0, %xmm8, %xmm3
vsubss %xmm2, %xmm3, %xmm8
vmulss %xmm5, %xmm9, %xmm1
vmulss 0x30(%rsp), %xmm0, %xmm0
vsubss %xmm1, %xmm0, %xmm5
jmp 0x15016d6
vmulss %xmm2, %xmm10, %xmm7
vmulss %xmm2, %xmm3, %xmm8
vmulss %xmm2, %xmm11, %xmm5
vmovaps 0x40(%rsp), %xmm3
vmovaps 0x60(%rsp), %xmm10
vmovaps (%rsp), %xmm9
vpermilps $0x0, 0x10(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
vmulps 0x90(%rsp), %xmm0, %xmm1
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps 0x70(%rsp), %xmm6, %xmm2
vaddps %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x10(%rsp)
vmulps 0x190(%rsp), %xmm0, %xmm1
vmulps %xmm3, %xmm6, %xmm2
vaddps %xmm1, %xmm2, %xmm2
vmulps 0xf0(%rsp), %xmm0, %xmm1
vmulps %xmm6, %xmm9, %xmm9
vaddps %xmm1, %xmm9, %xmm1
vmulps 0x100(%rsp), %xmm0, %xmm0
vmulps %xmm6, %xmm10, %xmm6
vaddps %xmm0, %xmm6, %xmm3
vmulss %xmm7, %xmm7, %xmm10
vmulss %xmm4, %xmm4, %xmm11
vaddss %xmm10, %xmm11, %xmm6
vbroadcastss 0xa1f76a(%rip), %xmm0 # 0x1f20ec0
vxorps %xmm0, %xmm8, %xmm9
vmulss %xmm8, %xmm9, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vxorps %xmm0, %xmm5, %xmm12
vmulss %xmm5, %xmm12, %xmm12
vaddss %xmm6, %xmm12, %xmm13
vmulss %xmm5, %xmm4, %xmm6
vmulss %xmm7, %xmm8, %xmm14
vaddss %xmm6, %xmm14, %xmm15
vsubss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm7, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm8, %xmm8, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vmulss %xmm4, %xmm8, %xmm12
vmulss %xmm7, %xmm4, %xmm4
vsubss %xmm12, %xmm14, %xmm0
vmulss %xmm5, %xmm8, %xmm8
vaddss %xmm12, %xmm14, %xmm7
vaddss %xmm4, %xmm8, %xmm12
vsubss %xmm4, %xmm8, %xmm8
vaddss %xmm15, %xmm15, %xmm4
vaddss %xmm0, %xmm0, %xmm0
vaddss %xmm10, %xmm9, %xmm9
vmulss %xmm5, %xmm5, %xmm5
vaddss %xmm5, %xmm9, %xmm9
vshufps $0x0, %xmm13, %xmm13, %xmm5 # xmm5 = xmm13[0,0,0,0]
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps 0x9eaf1d(%rip), %xmm10 # 0x1eec700
vmulps %xmm0, %xmm10, %xmm0
vmovsd 0x9eaf01(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm4, %xmm13, %xmm4
vaddps %xmm0, %xmm4, %xmm0
vmovss 0x9eaf15(%rip), %xmm14 # 0x1eec714
vmulps %xmm5, %xmm14, %xmm4
vaddps %xmm0, %xmm4, %xmm4
vaddss %xmm12, %xmm12, %xmm0
vshufps $0x0, %xmm11, %xmm11, %xmm5 # xmm5 = xmm11[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm5, %xmm13, %xmm5
vaddps %xmm5, %xmm0, %xmm0
vaddss %xmm6, %xmm6, %xmm5
vshufps $0x0, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[0,0,0,0]
vmulps %xmm5, %xmm14, %xmm5
vaddps %xmm0, %xmm5, %xmm5
vshufps $0x0, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vaddss %xmm8, %xmm8, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm13, %xmm6
vaddps %xmm0, %xmm6, %xmm0
vxorps %xmm6, %xmm6, %xmm6
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm14, %xmm7
vmovaps 0x10(%rsp), %xmm9
vshufps $0xe9, %xmm6, %xmm9, %xmm8 # xmm8 = xmm9[1,2],xmm6[2,3]
vblendps $0x4, %xmm2, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm2[2],xmm8[3]
vaddps %xmm0, %xmm7, %xmm0
vaddps %xmm6, %xmm8, %xmm7
vshufps $0x0, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[0,0,0,0]
vmulps %xmm6, %xmm0, %xmm9
vmulps %xmm6, %xmm5, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vmulps %xmm4, %xmm8, %xmm8
vaddps %xmm6, %xmm8, %xmm15
vshufps $0x0, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm9, %xmm2
vmulps %xmm4, %xmm6, %xmm6
vaddps %xmm2, %xmm6, %xmm11
vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[2,2,2,2]
vmulps %xmm0, %xmm6, %xmm6
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm6, %xmm2, %xmm2
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm12
vshufps $0xaa, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[2,2,2,2]
vmulps %xmm0, %xmm1, %xmm0
vshufps $0x55, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[1,1,1,1]
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm3, %xmm3, %xmm1 # xmm1 = xmm3[0,0,0,0]
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm7, %xmm13
jmp 0x14ff789
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovss (%rdx,%rax), %xmm0
vmovss 0x4(%rdx,%rax), %xmm1
vmovss 0x8(%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vinsertps $0x1c, 0x10(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rdx,%rax), %xmm0, %xmm4 # xmm4 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x14(%rdx,%rax), %xmm1, %xmm0 # xmm0 = xmm1[0],mem[0],zero,zero
vinsertps $0x28, 0x24(%rdx,%rax), %xmm0, %xmm5 # xmm5 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x18(%rdx,%rax), %xmm2, %xmm0 # xmm0 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rdx,%rax), %xmm0, %xmm2 # xmm2 = xmm0[0,1],mem[0],zero
vinsertps $0x1c, 0x1c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0],mem[0],zero,zero
vinsertps $0x28, 0x2c(%rdx,%rax), %xmm0, %xmm9 # xmm9 = xmm0[0,1],mem[0],zero
jmp 0x1501a3e
movq (%rcx), %rdx
imulq 0x10(%rcx), %rax
vmovsd 0x10(%rdx,%rax), %xmm0
vinsertps $0x20, 0x8(%rdx,%rax), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],xmm0[3]
vmovsd 0x34(%rdx,%rax), %xmm1
vmovss (%rdx,%rax), %xmm2
vmovss 0xc(%rdx,%rax), %xmm3
vmovlhps %xmm1, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm1[0]
vshufps $0xd8, %xmm1, %xmm2, %xmm1 # xmm1 = xmm2[0,2],xmm1[1,3]
vmovss 0x18(%rdx,%rax), %xmm2
vmovsd 0x1c(%rdx,%rax), %xmm4
vmovlhps %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0],xmm4[0]
vshufps $0xd8, %xmm4, %xmm2, %xmm2 # xmm2 = xmm2[0,2],xmm4[1,3]
vmovss 0x24(%rdx,%rax), %xmm4
vmovss 0x28(%rdx,%rax), %xmm5
vmovss 0x2c(%rdx,%rax), %xmm6
vmovss 0x30(%rdx,%rax), %xmm7
vmulss %xmm5, %xmm5, %xmm8
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm6, %xmm6, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vmulss %xmm7, %xmm7, %xmm9
vaddss %xmm8, %xmm9, %xmm8
vrsqrtss %xmm8, %xmm8, %xmm9
vmulss 0x9ead2e(%rip), %xmm9, %xmm10 # 0x1eec718
vmulss 0x9ead2a(%rip), %xmm8, %xmm8 # 0x1eec71c
vmulss %xmm8, %xmm9, %xmm8
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm8, %xmm9, %xmm8
vaddss %xmm8, %xmm10, %xmm8
vmulss %xmm4, %xmm8, %xmm4
vinsertps $0x30, %xmm4, %xmm2, %xmm9 # xmm9 = xmm2[0,1,2],xmm4[0]
vmulss %xmm5, %xmm8, %xmm2
vinsertps $0x30, %xmm2, %xmm1, %xmm4 # xmm4 = xmm1[0,1,2],xmm2[0]
vmulss %xmm6, %xmm8, %xmm1
vmulss %xmm7, %xmm8, %xmm2
vinsertps $0x10, 0x4(%rdx,%rax), %xmm3, %xmm3 # xmm3 = xmm3[0],mem[0],xmm3[2,3]
vinsertps $0x30, %xmm2, %xmm0, %xmm2 # xmm2 = xmm0[0,1,2],xmm2[0]
vinsertps $0x20, 0x3c(%rdx,%rax), %xmm3, %xmm0 # xmm0 = xmm3[0,1],mem[0],xmm3[3]
vinsertps $0x30, %xmm1, %xmm0, %xmm5 # xmm5 = xmm0[0,1,2],xmm1[0]
vmovaps %xmm12, %xmm13
vshufps $0xff, %xmm12, %xmm12, %xmm8 # xmm8 = xmm12[3,3,3,3]
vmovaps %xmm11, %xmm14
vshufps $0xff, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[3,3,3,3]
vmovaps %xmm15, %xmm12
vshufps $0xff, %xmm15, %xmm15, %xmm6 # xmm6 = xmm15[3,3,3,3]
vpermilps $0xff, (%rsp), %xmm3 # xmm3 = mem[3,3,3,3]
vshufps $0xff, %xmm9, %xmm9, %xmm15 # xmm15 = xmm9[3,3,3,3]
vshufps $0xff, %xmm4, %xmm4, %xmm10 # xmm10 = xmm4[3,3,3,3]
vshufps $0xff, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[3,3,3,3]
vmovaps %xmm2, 0x160(%rsp)
vshufps $0xff, %xmm2, %xmm2, %xmm0 # xmm0 = xmm2[3,3,3,3]
vmulss %xmm7, %xmm10, %xmm1
vmovaps %xmm8, 0xe0(%rsp)
vmulss %xmm15, %xmm8, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmovaps %xmm11, 0xa0(%rsp)
vmulss %xmm6, %xmm11, %xmm2
vaddss %xmm1, %xmm2, %xmm1
vmulss %xmm0, %xmm3, %xmm2
vaddss %xmm1, %xmm2, %xmm11
vbroadcastss 0xa1f404(%rip), %xmm1 # 0x1f20ec0
vxorps %xmm1, %xmm11, %xmm8
vucomiss %xmm11, %xmm8
vxorps %xmm1, %xmm0, %xmm2
jbe 0x1501ad3
vxorps %xmm1, %xmm15, %xmm15
vxorps %xmm1, %xmm10, %xmm10
ja 0x1501ad9
vmovaps %xmm0, %xmm2
vmovaps %xmm2, 0xb0(%rsp)
vmovaps %xmm15, 0xc0(%rsp)
vmovaps %xmm10, 0xd0(%rsp)
vmovaps %xmm7, 0x20(%rsp)
vmovaps %xmm6, 0x140(%rsp)
vmovaps %xmm3, 0x30(%rsp)
vmovaps %xmm5, 0x150(%rsp)
vmovaps %xmm9, 0x170(%rsp)
vmovaps %xmm4, 0x180(%rsp)
vmovaps %xmm12, 0xf0(%rsp)
vmovaps %xmm13, 0x100(%rsp)
vmovaps %xmm14, 0x190(%rsp)
vbroadcastss 0xa1f37c(%rip), %xmm0 # 0x1f20ec4
vandps %xmm0, %xmm11, %xmm4
vmulss 0x9eee2c(%rip), %xmm4, %xmm0 # 0x1ef0980
vaddss 0x9eee28(%rip), %xmm0, %xmm0 # 0x1ef0984
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9eee20(%rip), %xmm0, %xmm0 # 0x1ef0988
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9eee18(%rip), %xmm0, %xmm0 # 0x1ef098c
vmovaps %xmm8, 0x130(%rsp)
vmaxss %xmm11, %xmm8, %xmm8
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9eee02(%rip), %xmm0, %xmm0 # 0x1ef0990
vmulss %xmm0, %xmm4, %xmm0
vaddss 0x9eedfa(%rip), %xmm0, %xmm1 # 0x1ef0994
vmovss 0x9eab72(%rip), %xmm0 # 0x1eec714
vsubss %xmm4, %xmm0, %xmm0
vxorps %xmm3, %xmm3, %xmm3
vucomiss %xmm3, %xmm0
vmovss %xmm8, 0x90(%rsp)
jb 0x1501bbf
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1501c07
vmovaps %xmm11, 0x120(%rsp)
vmovaps %xmm4, 0x110(%rsp)
vmovss %xmm1, 0x8c(%rsp)
callq 0x6aa20
vmovss 0x8c(%rsp), %xmm1
vxorps %xmm3, %xmm3, %xmm3
vmovaps 0x110(%rsp), %xmm4
vmovaps 0x120(%rsp), %xmm11
vmovss 0x90(%rsp), %xmm8
vmulss %xmm1, %xmm0, %xmm0
vmovss 0x9eed85(%rip), %xmm1 # 0x1ef0998
vsubss %xmm0, %xmm1, %xmm0
vmaxss %xmm0, %xmm3, %xmm0
vbroadcastss 0xa1f29c(%rip), %xmm13 # 0x1f20ec0
vxorps %xmm0, %xmm13, %xmm2
vcmpltss %xmm3, %xmm8, %xmm3
vblendvps %xmm3, %xmm2, %xmm0, %xmm0
vsubss %xmm0, %xmm1, %xmm0
vmovss 0x9eaad5(%rip), %xmm9 # 0x1eec714
vcmpltss %xmm4, %xmm9, %xmm2
vbroadcastss 0x9eed4f(%rip), %xmm3 # 0x1ef099c
vblendvps %xmm2, %xmm3, %xmm0, %xmm0
vmovaps 0x60(%rsp), %xmm7
vmulss %xmm0, %xmm7, %xmm0
vmulss 0x9eed3b(%rip), %xmm0, %xmm2 # 0x1ef09a0
vroundss $0x9, %xmm2, %xmm2, %xmm2
vcvttss2si %xmm2, %eax
vmulss %xmm1, %xmm2, %xmm1
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm1
vmulss 0x9eed21(%rip), %xmm1, %xmm2 # 0x1ef09a4
vaddss 0x9eed1d(%rip), %xmm2, %xmm2 # 0x1ef09a8
vmulss 0x9eed19(%rip), %xmm1, %xmm3 # 0x1ef09ac
vaddss 0x9eed15(%rip), %xmm3, %xmm5 # 0x1ef09b0
vmovaps 0xa0(%rsp), %xmm10
vxorps %xmm13, %xmm10, %xmm3
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9eecff(%rip), %xmm2, %xmm2 # 0x1ef09b4
vcmpltss 0x130(%rsp), %xmm11, %xmm4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9eeced(%rip), %xmm5, %xmm5 # 0x1ef09b8
movl %eax, %ecx
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9eece3(%rip), %xmm2, %xmm2 # 0x1ef09bc
andl $0x3, %ecx
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9eecd8(%rip), %xmm5, %xmm5 # 0x1ef09c0
vmulss %xmm2, %xmm1, %xmm2
vaddss 0x9eecd0(%rip), %xmm2, %xmm6 # 0x1ef09c4
vmulss %xmm5, %xmm1, %xmm5
vaddss 0x9eaa1c(%rip), %xmm5, %xmm5 # 0x1eec71c
vmulss %xmm6, %xmm1, %xmm6
vaddss %xmm6, %xmm9, %xmm6
vmulss %xmm5, %xmm1, %xmm1
vaddss %xmm1, %xmm9, %xmm5
vmulss %xmm6, %xmm0, %xmm0
testb $0x1, %al
vmovaps 0x140(%rsp), %xmm1
je 0x1501d27
vmovaps %xmm5, %xmm12
jmp 0x1501d2f
vmovaps %xmm0, %xmm12
vmovaps %xmm5, %xmm0
vblendvps %xmm4, %xmm3, %xmm10, %xmm9
leal -0x1(%rcx), %eax
cmpl $0x2, %ecx
jb 0x1501d42
vxorps %xmm13, %xmm12, %xmm12
cmpl $0x2, %eax
vmovaps %xmm12, 0xa0(%rsp)
jae 0x1501d54
vxorps %xmm0, %xmm13, %xmm0
vmovaps 0xe0(%rsp), %xmm11
vmulss %xmm11, %xmm8, %xmm4
vmovaps 0xc0(%rsp), %xmm13
vsubss %xmm13, %xmm4, %xmm4
vmovaps 0x20(%rsp), %xmm3
vmulss %xmm3, %xmm8, %xmm5
vmovaps 0xd0(%rsp), %xmm2
vsubss %xmm2, %xmm5, %xmm5
vmulss %xmm1, %xmm8, %xmm6
vsubss %xmm9, %xmm6, %xmm10
vmovaps 0x30(%rsp), %xmm15
vmulss %xmm15, %xmm8, %xmm6
vmovaps 0xb0(%rsp), %xmm8
vsubss %xmm8, %xmm6, %xmm14
vmovss %xmm5, 0x130(%rsp)
vmulss %xmm5, %xmm5, %xmm6
vmovaps %xmm9, %xmm12
vmulss %xmm4, %xmm4, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm10, 0x120(%rsp)
vmulss %xmm10, %xmm10, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vmovss %xmm14, 0x110(%rsp)
vmulss %xmm14, %xmm14, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vrsqrtss %xmm6, %xmm6, %xmm9
vmovss 0x9ea929(%rip), %xmm5 # 0x1eec71c
vmulss %xmm5, %xmm6, %xmm6
vmulss %xmm6, %xmm9, %xmm6
vmulss %xmm9, %xmm9, %xmm10
vmulss %xmm6, %xmm10, %xmm6
vmulss 0x9ea90c(%rip), %xmm9, %xmm9 # 0x1eec718
vaddss %xmm6, %xmm9, %xmm9
vmulss %xmm4, %xmm9, %xmm4
vmovaps 0xa0(%rsp), %xmm5
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm0, %xmm11, %xmm6
vsubss %xmm4, %xmm6, %xmm4
vmovss 0x9ea8e3(%rip), %xmm6 # 0x1eec714
vsubss %xmm7, %xmm6, %xmm6
vmulss %xmm7, %xmm13, %xmm10
vmulss %xmm6, %xmm11, %xmm11
vaddss %xmm10, %xmm11, %xmm13
vmulss %xmm2, %xmm7, %xmm10
vmulss %xmm3, %xmm6, %xmm11
vaddss %xmm10, %xmm11, %xmm10
vmulss %xmm7, %xmm12, %xmm3
vmulss %xmm1, %xmm6, %xmm11
vaddss %xmm3, %xmm11, %xmm3
vmulss %xmm7, %xmm8, %xmm11
vmulss %xmm6, %xmm15, %xmm14
vaddss %xmm11, %xmm14, %xmm11
vmulss %xmm10, %xmm10, %xmm14
vmovaps %xmm1, %xmm8
vmulss %xmm13, %xmm13, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm3, %xmm3, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss %xmm11, %xmm11, %xmm15
vaddss %xmm14, %xmm15, %xmm14
vmulss 0x9ea886(%rip), %xmm14, %xmm2 # 0x1eec71c
vrsqrtss %xmm14, %xmm14, %xmm14
vmulss 0x9ea875(%rip), %xmm14, %xmm12 # 0x1eec718
vmulss %xmm2, %xmm14, %xmm2
vmulss %xmm14, %xmm14, %xmm14
vmulss %xmm2, %xmm14, %xmm2
vaddss %xmm2, %xmm12, %xmm2
vmovss 0x9eeb0c(%rip), %xmm12 # 0x1ef09c8
vmovss 0x90(%rsp), %xmm1
vucomiss %xmm12, %xmm1
vcmpltss %xmm1, %xmm12, %xmm12
vmulss %xmm2, %xmm13, %xmm13
vblendvps %xmm12, %xmm13, %xmm4, %xmm4
ja 0x1501f20
vmulss 0x130(%rsp), %xmm9, %xmm2
vmulss 0x120(%rsp), %xmm9, %xmm3
vmulss 0x110(%rsp), %xmm9, %xmm9
vmulss %xmm5, %xmm2, %xmm2
vmulss 0x20(%rsp), %xmm0, %xmm7
vsubss %xmm2, %xmm7, %xmm7
vmulss %xmm5, %xmm3, %xmm2
vmulss %xmm0, %xmm8, %xmm3
vsubss %xmm2, %xmm3, %xmm8
vmulss %xmm5, %xmm9, %xmm1
vmulss 0x30(%rsp), %xmm0, %xmm0
vsubss %xmm1, %xmm0, %xmm5
jmp 0x1501f2c
vmulss %xmm2, %xmm10, %xmm7
vmulss %xmm2, %xmm3, %xmm8
vmulss %xmm2, %xmm11, %xmm5
vmovaps 0x100(%rsp), %xmm10
vmovaps (%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm11
vpermilps $0x0, 0x60(%rsp), %xmm0 # xmm0 = mem[0,0,0,0]
vmulps 0x180(%rsp), %xmm0, %xmm1
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps 0x190(%rsp), %xmm6, %xmm2
vaddps %xmm1, %xmm2, %xmm3
vmulps 0x150(%rsp), %xmm0, %xmm1
vmulps %xmm6, %xmm11, %xmm2
vaddps %xmm1, %xmm2, %xmm2
vmulps 0x160(%rsp), %xmm0, %xmm1
vmulps %xmm6, %xmm9, %xmm9
vaddps %xmm1, %xmm9, %xmm1
vmulps 0x170(%rsp), %xmm0, %xmm0
vmulps %xmm6, %xmm10, %xmm6
vaddps %xmm0, %xmm6, %xmm0
vmovaps %xmm0, (%rsp)
vmulss %xmm7, %xmm7, %xmm10
vmulss %xmm4, %xmm4, %xmm11
vaddss %xmm10, %xmm11, %xmm6
vbroadcastss 0xa1ef0c(%rip), %xmm0 # 0x1f20ec0
vxorps %xmm0, %xmm8, %xmm9
vmulss %xmm8, %xmm9, %xmm9
vaddss %xmm6, %xmm9, %xmm6
vxorps %xmm0, %xmm5, %xmm12
vmulss %xmm5, %xmm12, %xmm12
vaddss %xmm6, %xmm12, %xmm13
vmulss %xmm5, %xmm4, %xmm6
vmulss %xmm7, %xmm8, %xmm14
vaddss %xmm6, %xmm14, %xmm15
vsubss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm7, %xmm14
vsubss %xmm10, %xmm11, %xmm10
vmulss %xmm8, %xmm8, %xmm11
vaddss %xmm10, %xmm11, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vmulss %xmm4, %xmm8, %xmm12
vmulss %xmm7, %xmm4, %xmm7
vsubss %xmm12, %xmm14, %xmm0
vmulss %xmm5, %xmm8, %xmm8
vaddss %xmm12, %xmm14, %xmm4
vaddss %xmm7, %xmm8, %xmm12
vsubss %xmm7, %xmm8, %xmm7
vaddss %xmm15, %xmm15, %xmm8
vaddss %xmm0, %xmm0, %xmm0
vaddss %xmm10, %xmm9, %xmm9
vmulss %xmm5, %xmm5, %xmm5
vaddss %xmm5, %xmm9, %xmm9
vshufps $0x0, %xmm13, %xmm13, %xmm5 # xmm5 = xmm13[0,0,0,0]
vshufps $0x0, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmovaps 0x9ea6be(%rip), %xmm10 # 0x1eec700
vmulps %xmm0, %xmm10, %xmm0
vmovsd 0x9ea6a2(%rip), %xmm13 # 0x1eec6f0
vmulps %xmm13, %xmm8, %xmm8
vaddps %xmm0, %xmm8, %xmm0
vmovss 0x9ea6b5(%rip), %xmm8 # 0x1eec714
vmulps %xmm5, %xmm8, %xmm5
vaddps %xmm0, %xmm5, %xmm5
vaddss %xmm12, %xmm12, %xmm0
vshufps $0x0, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[0,0,0,0]
vshufps $0x0, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vmulps %xmm13, %xmm11, %xmm11
vaddps %xmm0, %xmm11, %xmm0
vaddss %xmm6, %xmm6, %xmm6
vshufps $0x0, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[0,0,0,0]
vmulps %xmm6, %xmm8, %xmm6
vaddps %xmm0, %xmm6, %xmm6
vshufps $0x0, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,0,0,0]
vmulps %xmm0, %xmm10, %xmm0
vaddss %xmm7, %xmm7, %xmm7
vshufps $0x0, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[0,0,0,0]
vmulps %xmm7, %xmm13, %xmm7
vaddps %xmm0, %xmm7, %xmm0
vxorps %xmm7, %xmm7, %xmm7
vaddss %xmm4, %xmm4, %xmm4
vshufps $0x0, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[0,0,0,0]
vmulps %xmm4, %xmm8, %xmm4
vshufps $0xe9, %xmm7, %xmm3, %xmm8 # xmm8 = xmm3[1,2],xmm7[2,3]
vblendps $0x4, %xmm2, %xmm8, %xmm8 # xmm8 = xmm8[0,1],xmm2[2],xmm8[3]
vaddps %xmm0, %xmm4, %xmm0
vaddps %xmm7, %xmm8, %xmm8
vshufps $0x0, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[0,0,0,0]
vmulps %xmm7, %xmm0, %xmm9
vmulps %xmm7, %xmm6, %xmm4
vaddps %xmm4, %xmm9, %xmm4
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm4, %xmm3, %xmm4
vshufps $0x0, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[0,0,0,0]
vshufps $0x55, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1,1,1]
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm2, %xmm9, %xmm2
vmulps %xmm5, %xmm3, %xmm3
vaddps %xmm2, %xmm3, %xmm3
vshufps $0x55, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[2,2,2,2]
vmulps %xmm0, %xmm7, %xmm7
vmulps %xmm6, %xmm2, %xmm2
vaddps %xmm7, %xmm2, %xmm2
vshufps $0x0, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[0,0,0,0]
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm2, %xmm1, %xmm2
vmovaps (%rsp), %xmm7
vshufps $0xaa, %xmm7, %xmm7, %xmm1 # xmm1 = xmm7[2,2,2,2]
vmulps %xmm0, %xmm1, %xmm0
vshufps $0x55, %xmm7, %xmm7, %xmm1 # xmm1 = xmm7[1,1,1,1]
vmulps %xmm6, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vshufps $0x0, %xmm7, %xmm7, %xmm1 # xmm1 = xmm7[0,0,0,0]
vmulps %xmm5, %xmm1, %xmm1
vaddps %xmm0, %xmm1, %xmm0
vaddps %xmm0, %xmm8, %xmm0
vmovaps 0x10(%rsp), %xmm15
vmovaps 0x50(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm13
jmp 0x14fff15
nop
|
/embree[P]embree/kernels/geometry/instance_array_intersector.cpp
|
embree::avx::BVHNRefitter<4>::refit_toplevel(embree::NodeRefPtr<4>&, unsigned long&, embree::BBox<embree::Vec3fa> const*, unsigned long)
|
BBox3fa BVHNRefitter<N>::refit_toplevel(NodeRef& ref,
size_t &subtrees,
const BBox3fa *const subTreeBounds,
const size_t depth)
{
if (depth >= MAX_SUB_TREE_EXTRACTION_DEPTH)
{
assert(subtrees < MAX_NUM_SUB_TREES);
assert(subTrees[subtrees] == ref);
return subTreeBounds[subtrees++];
}
if (ref.isAABBNode())
{
AABBNode* node = ref.getAABBNode();
BBox3fa bounds[N];
for (size_t i=0; i<N; i++)
{
NodeRef& child = node->child(i);
if (unlikely(child == BVH::emptyNode))
bounds[i] = BBox3fa(empty);
else
bounds[i] = refit_toplevel(child,subtrees,subTreeBounds,depth+1);
}
BBox3vf<N> boundsT = transpose<N>(bounds);
/* set new bounds */
node->lower_x = boundsT.lower.x;
node->lower_y = boundsT.lower.y;
node->lower_z = boundsT.lower.z;
node->upper_x = boundsT.upper.x;
node->upper_y = boundsT.upper.y;
node->upper_z = boundsT.upper.z;
return merge<N>(bounds);
}
else
return leafBounds.leafBounds(ref);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0xb8, %rsp
movq %r8, %r14
movq %rcx, %r15
movq %rdi, %rbp
cmpq $0x4, %r9
jb 0x1502e04
movq (%r15), %rax
leaq 0x1(%rax), %rcx
movq %rcx, (%r15)
shlq $0x5, %rax
vmovaps (%r14,%rax), %xmm0
vmovaps %xmm0, (%rbp)
vmovaps 0x10(%r14,%rax), %xmm0
vmovaps %xmm0, 0x10(%rbp)
jmp 0x1502f4a
movq %rsi, %r13
movq (%rdx), %rbx
testb $0xf, %bl
je 0x1502e20
movq 0x8(%r13), %rsi
movq (%rsi), %rax
movq %rbp, %rdi
callq *(%rax)
jmp 0x1502f4a
movq %r9, %r12
movq %rbp, 0x8(%rsp)
incq %r12
xorl %ebp, %ebp
cmpq $0x8, (%rbx,%rbp)
je 0x1502e6f
leaq (%rbx,%rbp), %rdx
leaq 0x90(%rsp), %rdi
movq %r13, %rsi
movq %r15, %rcx
movq %r14, %r8
movq %r12, %r9
vzeroupper
callq 0x1502dba
vmovups 0x90(%rsp), %ymm0
vmovups %ymm0, 0x10(%rsp,%rbp,4)
addq $0x8, %rbp
cmpq $0x20, %rbp
jne 0x1502e2d
jmp 0x1502e8f
vbroadcastss 0x9e8ba8(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, 0x10(%rsp,%rbp,4)
vbroadcastss 0x9e9cfd(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x20(%rsp,%rbp,4)
jmp 0x1502e63
vmovaps 0x50(%rsp), %xmm0
vmovaps 0x10(%rsp), %xmm1
vmovaps 0x20(%rsp), %xmm2
vmovaps 0x30(%rsp), %xmm3
vmovaps 0x40(%rsp), %xmm4
vunpcklps %xmm0, %xmm1, %xmm5 # xmm5 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
vunpckhps %xmm0, %xmm1, %xmm6 # xmm6 = xmm1[2],xmm0[2],xmm1[3],xmm0[3]
vmovaps 0x70(%rsp), %xmm7
vunpcklps %xmm7, %xmm3, %xmm8 # xmm8 = xmm3[0],xmm7[0],xmm3[1],xmm7[1]
vunpckhps %xmm7, %xmm3, %xmm9 # xmm9 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
vunpcklps %xmm9, %xmm6, %xmm6 # xmm6 = xmm6[0],xmm9[0],xmm6[1],xmm9[1]
vunpcklps %xmm8, %xmm5, %xmm9 # xmm9 = xmm5[0],xmm8[0],xmm5[1],xmm8[1]
vunpckhps %xmm8, %xmm5, %xmm5 # xmm5 = xmm5[2],xmm8[2],xmm5[3],xmm8[3]
vmovaps 0x60(%rsp), %xmm8
vunpcklps %xmm8, %xmm2, %xmm10 # xmm10 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
vunpckhps %xmm8, %xmm2, %xmm11 # xmm11 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
vmovaps 0x80(%rsp), %xmm12
vunpcklps %xmm12, %xmm4, %xmm13 # xmm13 = xmm4[0],xmm12[0],xmm4[1],xmm12[1]
vunpckhps %xmm12, %xmm4, %xmm14 # xmm14 = xmm4[2],xmm12[2],xmm4[3],xmm12[3]
vunpcklps %xmm14, %xmm11, %xmm11 # xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1]
vunpcklps %xmm13, %xmm10, %xmm14 # xmm14 = xmm10[0],xmm13[0],xmm10[1],xmm13[1]
vunpckhps %xmm13, %xmm10, %xmm10 # xmm10 = xmm10[2],xmm13[2],xmm10[3],xmm13[3]
vmovaps %xmm9, 0x20(%rbx)
vmovaps %xmm5, 0x40(%rbx)
vmovaps %xmm6, 0x60(%rbx)
vmovaps %xmm14, 0x30(%rbx)
vmovaps %xmm10, 0x50(%rbx)
vmovaps %xmm11, 0x70(%rbx)
vminps %xmm3, %xmm1, %xmm1
vminps %xmm7, %xmm0, %xmm0
vminps %xmm0, %xmm1, %xmm0
vmaxps %xmm4, %xmm2, %xmm1
vmaxps %xmm12, %xmm8, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
movq 0x8(%rsp), %rbp
vmovaps %xmm0, (%rbp)
vmovaps %xmm1, 0x10(%rbp)
movq %rbp, %rax
addq $0xb8, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitter<8>::recurse_bottom(embree::NodeRefPtr<8>&)
|
BBox3fa BVHNRefitter<N>::recurse_bottom(NodeRef& ref)
{
/* this is a leaf node */
if (unlikely(ref.isLeaf()))
return leafBounds.leafBounds(ref);
/* recurse if this is an internal node */
AABBNode* node = ref.getAABBNode();
/* enable exclusive prefetch for >= AVX platforms */
#if defined(__AVX__)
BVH::prefetchW(ref);
#endif
BBox3fa bounds[N];
for (size_t i=0; i<N; i++)
if (unlikely(node->child(i) == BVH::emptyNode))
{
bounds[i] = BBox3fa(empty);
}
else
bounds[i] = recurse_bottom(node->child(i));
/* AOS to SOA transform */
BBox3vf<N> boundsT = transpose<N>(bounds);
/* set new bounds */
node->lower_x = boundsT.lower.x;
node->lower_y = boundsT.lower.y;
node->lower_z = boundsT.lower.z;
node->upper_x = boundsT.upper.x;
node->upper_y = boundsT.upper.y;
node->upper_z = boundsT.upper.z;
return merge<N>(bounds);
}
|
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x140, %rsp # imm = 0x140
movq %rsi, %r14
movq %rdi, %rbx
movq (%rdx), %r12
testb $0x8, %r12b
jne 0x1503353
prefetcht0 (%r12)
prefetcht0 0x40(%r12)
prefetcht0 0x80(%r12)
prefetcht0 0xc0(%r12)
xorl %r13d, %r13d
leaq 0x120(%rsp), %r15
cmpq $0x8, (%r12,%r13)
je 0x15031a9
leaq (%r12,%r13), %rdx
movq %r15, %rdi
movq %r14, %rsi
vzeroupper
callq 0x150312a
vmovups 0x120(%rsp), %ymm0
vmovups %ymm0, (%rsp,%r13,4)
addq $0x8, %r13
cmpq $0x40, %r13
jne 0x1503175
jmp 0x15031ca
vbroadcastss 0x9e886e(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, (%rsp,%r13,4)
vbroadcastss 0x9e99c3(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x10(%rsp,%r13,4)
jmp 0x150319d
vmovaps 0x80(%rsp), %xmm0
vperm2f128 $0x2, (%rsp), %ymm0, %ymm4 # ymm4 = mem[0,1],ymm0[0,1]
vmovaps 0xa0(%rsp), %xmm1
vperm2f128 $0x2, 0x20(%rsp), %ymm1, %ymm5 # ymm5 = mem[0,1],ymm1[0,1]
vmovaps 0xc0(%rsp), %xmm2
vperm2f128 $0x2, 0x40(%rsp), %ymm2, %ymm6 # ymm6 = mem[0,1],ymm2[0,1]
vmovaps 0xe0(%rsp), %xmm3
vperm2f128 $0x2, 0x60(%rsp), %ymm3, %ymm7 # ymm7 = mem[0,1],ymm3[0,1]
vunpcklps %ymm6, %ymm4, %ymm8 # ymm8 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[4],ymm6[4],ymm4[5],ymm6[5]
vunpckhps %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
vunpcklps %ymm7, %ymm5, %ymm9 # ymm9 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
vunpckhps %ymm7, %ymm5, %ymm5 # ymm5 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
vunpcklps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vmovups %ymm4, 0x100(%rsp)
vunpcklps %ymm9, %ymm8, %ymm7 # ymm7 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
vmovaps 0x90(%rsp), %xmm4
vperm2f128 $0x2, 0x10(%rsp), %ymm4, %ymm10 # ymm10 = mem[0,1],ymm4[0,1]
vunpckhps %ymm9, %ymm8, %ymm9 # ymm9 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
vmovaps 0xb0(%rsp), %xmm5
vperm2f128 $0x2, 0x30(%rsp), %ymm5, %ymm11 # ymm11 = mem[0,1],ymm5[0,1]
vmovaps 0xd0(%rsp), %xmm8
vperm2f128 $0x2, 0x50(%rsp), %ymm8, %ymm12 # ymm12 = mem[0,1],ymm8[0,1]
vmovaps 0xf0(%rsp), %xmm13
vperm2f128 $0x2, 0x70(%rsp), %ymm13, %ymm14 # ymm14 = mem[0,1],ymm13[0,1]
vunpcklps %ymm12, %ymm10, %ymm15 # ymm15 = ymm10[0],ymm12[0],ymm10[1],ymm12[1],ymm10[4],ymm12[4],ymm10[5],ymm12[5]
vunpckhps %ymm12, %ymm10, %ymm10 # ymm10 = ymm10[2],ymm12[2],ymm10[3],ymm12[3],ymm10[6],ymm12[6],ymm10[7],ymm12[7]
vunpcklps %ymm14, %ymm11, %ymm12 # ymm12 = ymm11[0],ymm14[0],ymm11[1],ymm14[1],ymm11[4],ymm14[4],ymm11[5],ymm14[5]
vunpckhps %ymm14, %ymm11, %ymm11 # ymm11 = ymm11[2],ymm14[2],ymm11[3],ymm14[3],ymm11[6],ymm14[6],ymm11[7],ymm14[7]
vmovaps (%rsp), %xmm14
vunpcklps %ymm11, %ymm10, %ymm10 # ymm10 = ymm10[0],ymm11[0],ymm10[1],ymm11[1],ymm10[4],ymm11[4],ymm10[5],ymm11[5]
vunpcklps %ymm12, %ymm15, %ymm11 # ymm11 = ymm15[0],ymm12[0],ymm15[1],ymm12[1],ymm15[4],ymm12[4],ymm15[5],ymm12[5]
vunpckhps %ymm12, %ymm15, %ymm12 # ymm12 = ymm15[2],ymm12[2],ymm15[3],ymm12[3],ymm15[6],ymm12[6],ymm15[7],ymm12[7]
vminps 0x20(%rsp), %xmm14, %xmm14
vmovaps 0x40(%rsp), %xmm15
vminps 0x60(%rsp), %xmm15, %xmm15
vminps %xmm15, %xmm14, %xmm14
vmovaps 0x10(%rsp), %xmm15
vmaxps 0x30(%rsp), %xmm15, %xmm15
vmovaps 0x50(%rsp), %xmm6
vmaxps 0x70(%rsp), %xmm6, %xmm6
vmaxps %xmm6, %xmm15, %xmm6
vmovaps %ymm7, 0x40(%r12)
vmovaps %ymm9, 0x80(%r12)
vmovups 0x100(%rsp), %ymm7
vmovaps %ymm7, 0xc0(%r12)
vmovaps %ymm11, 0x60(%r12)
vmovaps %ymm12, 0xa0(%r12)
vmovaps %ymm10, 0xe0(%r12)
vminps %xmm1, %xmm0, %xmm0
vminps %xmm3, %xmm2, %xmm1
vminps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm14, %xmm0
vmaxps %xmm5, %xmm4, %xmm1
vmaxps %xmm13, %xmm8, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmaxps %xmm1, %xmm6, %xmm1
vmovaps %xmm0, (%rbx)
vmovaps %xmm1, 0x10(%rbx)
movq %rbx, %rax
addq $0x140, %rsp # imm = 0x140
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
vzeroupper
retq
movq 0x8(%r14), %rsi
movq (%rsi), %rax
movq %rbx, %rdi
callq *(%rax)
jmp 0x150333c
nop
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitter<8>::refit_toplevel(embree::NodeRefPtr<8>&, unsigned long&, embree::BBox<embree::Vec3fa> const*, unsigned long)
|
BBox3fa BVHNRefitter<N>::refit_toplevel(NodeRef& ref,
size_t &subtrees,
const BBox3fa *const subTreeBounds,
const size_t depth)
{
if (depth >= MAX_SUB_TREE_EXTRACTION_DEPTH)
{
assert(subtrees < MAX_NUM_SUB_TREES);
assert(subTrees[subtrees] == ref);
return subTreeBounds[subtrees++];
}
if (ref.isAABBNode())
{
AABBNode* node = ref.getAABBNode();
BBox3fa bounds[N];
for (size_t i=0; i<N; i++)
{
NodeRef& child = node->child(i);
if (unlikely(child == BVH::emptyNode))
bounds[i] = BBox3fa(empty);
else
bounds[i] = refit_toplevel(child,subtrees,subTreeBounds,depth+1);
}
BBox3vf<N> boundsT = transpose<N>(bounds);
/* set new bounds */
node->lower_x = boundsT.lower.x;
node->lower_y = boundsT.lower.y;
node->lower_z = boundsT.lower.z;
node->upper_x = boundsT.upper.x;
node->upper_y = boundsT.upper.y;
node->upper_z = boundsT.upper.z;
return merge<N>(bounds);
}
else
return leafBounds.leafBounds(ref);
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x158, %rsp # imm = 0x158
movq %r8, %r14
movq %rcx, %r15
movq %rdi, %rbp
cmpq $0x3, %r9
jb 0x1503414
movq (%r15), %rax
leaq 0x1(%rax), %rcx
movq %rcx, (%r15)
shlq $0x5, %rax
vmovaps (%r14,%rax), %xmm0
vmovaps %xmm0, (%rbp)
vmovaps 0x10(%r14,%rax), %xmm0
vmovaps %xmm0, 0x10(%rbp)
jmp 0x1503613
movq %rsi, %r13
movq (%rdx), %rbx
testb $0xf, %bl
je 0x1503430
movq 0x8(%r13), %rsi
movq (%rsi), %rax
movq %rbp, %rdi
callq *(%rax)
jmp 0x1503613
movq %r9, %r12
movq %rbp, 0x8(%rsp)
incq %r12
xorl %ebp, %ebp
cmpq $0x8, (%rbx,%rbp)
je 0x150347f
leaq (%rbx,%rbp), %rdx
leaq 0x130(%rsp), %rdi
movq %r13, %rsi
movq %r15, %rcx
movq %r14, %r8
movq %r12, %r9
vzeroupper
callq 0x15033ca
vmovups 0x130(%rsp), %ymm0
vmovups %ymm0, 0x10(%rsp,%rbp,4)
addq $0x8, %rbp
cmpq $0x40, %rbp
jne 0x150343d
jmp 0x150349f
vbroadcastss 0x9e8598(%rip), %xmm0 # 0x1eeba20
vmovaps %xmm0, 0x10(%rsp,%rbp,4)
vbroadcastss 0x9e96ed(%rip), %xmm0 # 0x1eecb84
vmovaps %xmm0, 0x20(%rsp,%rbp,4)
jmp 0x1503473
vmovaps 0x90(%rsp), %xmm0
vperm2f128 $0x2, 0x10(%rsp), %ymm0, %ymm4 # ymm4 = mem[0,1],ymm0[0,1]
vmovaps 0xb0(%rsp), %xmm1
vperm2f128 $0x2, 0x30(%rsp), %ymm1, %ymm5 # ymm5 = mem[0,1],ymm1[0,1]
vmovaps 0xd0(%rsp), %xmm2
vperm2f128 $0x2, 0x50(%rsp), %ymm2, %ymm6 # ymm6 = mem[0,1],ymm2[0,1]
vmovaps 0xf0(%rsp), %xmm3
vperm2f128 $0x2, 0x70(%rsp), %ymm3, %ymm7 # ymm7 = mem[0,1],ymm3[0,1]
vunpcklps %ymm6, %ymm4, %ymm8 # ymm8 = ymm4[0],ymm6[0],ymm4[1],ymm6[1],ymm4[4],ymm6[4],ymm4[5],ymm6[5]
vunpckhps %ymm6, %ymm4, %ymm4 # ymm4 = ymm4[2],ymm6[2],ymm4[3],ymm6[3],ymm4[6],ymm6[6],ymm4[7],ymm6[7]
vunpcklps %ymm7, %ymm5, %ymm9 # ymm9 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
vunpckhps %ymm7, %ymm5, %ymm5 # ymm5 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
vunpcklps %ymm5, %ymm4, %ymm4 # ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[4],ymm5[4],ymm4[5],ymm5[5]
vmovups %ymm4, 0x110(%rsp)
vmovaps 0xa0(%rsp), %xmm4
vperm2f128 $0x2, 0x20(%rsp), %ymm4, %ymm10 # ymm10 = mem[0,1],ymm4[0,1]
vunpcklps %ymm9, %ymm8, %ymm11 # ymm11 = ymm8[0],ymm9[0],ymm8[1],ymm9[1],ymm8[4],ymm9[4],ymm8[5],ymm9[5]
vmovaps 0xc0(%rsp), %xmm5
vperm2f128 $0x2, 0x40(%rsp), %ymm5, %ymm12 # ymm12 = mem[0,1],ymm5[0,1]
vunpckhps %ymm9, %ymm8, %ymm8 # ymm8 = ymm8[2],ymm9[2],ymm8[3],ymm9[3],ymm8[6],ymm9[6],ymm8[7],ymm9[7]
vmovaps 0xe0(%rsp), %xmm7
vperm2f128 $0x2, 0x60(%rsp), %ymm7, %ymm9 # ymm9 = mem[0,1],ymm7[0,1]
vmovaps 0x100(%rsp), %xmm13
vperm2f128 $0x2, 0x80(%rsp), %ymm13, %ymm14 # ymm14 = mem[0,1],ymm13[0,1]
vunpcklps %ymm9, %ymm10, %ymm15 # ymm15 = ymm10[0],ymm9[0],ymm10[1],ymm9[1],ymm10[4],ymm9[4],ymm10[5],ymm9[5]
vunpckhps %ymm9, %ymm10, %ymm9 # ymm9 = ymm10[2],ymm9[2],ymm10[3],ymm9[3],ymm10[6],ymm9[6],ymm10[7],ymm9[7]
vunpcklps %ymm14, %ymm12, %ymm10 # ymm10 = ymm12[0],ymm14[0],ymm12[1],ymm14[1],ymm12[4],ymm14[4],ymm12[5],ymm14[5]
vunpckhps %ymm14, %ymm12, %ymm12 # ymm12 = ymm12[2],ymm14[2],ymm12[3],ymm14[3],ymm12[6],ymm14[6],ymm12[7],ymm14[7]
vmovaps 0x10(%rsp), %xmm14
vunpcklps %ymm12, %ymm9, %ymm9 # ymm9 = ymm9[0],ymm12[0],ymm9[1],ymm12[1],ymm9[4],ymm12[4],ymm9[5],ymm12[5]
vunpcklps %ymm10, %ymm15, %ymm12 # ymm12 = ymm15[0],ymm10[0],ymm15[1],ymm10[1],ymm15[4],ymm10[4],ymm15[5],ymm10[5]
vunpckhps %ymm10, %ymm15, %ymm10 # ymm10 = ymm15[2],ymm10[2],ymm15[3],ymm10[3],ymm15[6],ymm10[6],ymm15[7],ymm10[7]
vmovaps 0x50(%rsp), %xmm15
vminps 0x30(%rsp), %xmm14, %xmm14
vminps 0x70(%rsp), %xmm15, %xmm15
vminps %xmm15, %xmm14, %xmm14
vmovaps 0x20(%rsp), %xmm15
vmaxps 0x40(%rsp), %xmm15, %xmm15
vmovaps 0x60(%rsp), %xmm6
vmaxps 0x80(%rsp), %xmm6, %xmm6
vmaxps %xmm6, %xmm15, %xmm6
vmovaps %ymm11, 0x40(%rbx)
vmovaps %ymm8, 0x80(%rbx)
vmovups 0x110(%rsp), %ymm8
vmovaps %ymm8, 0xc0(%rbx)
vmovaps %ymm12, 0x60(%rbx)
vmovaps %ymm10, 0xa0(%rbx)
vmovaps %ymm9, 0xe0(%rbx)
vminps %xmm1, %xmm0, %xmm0
vminps %xmm3, %xmm2, %xmm1
vminps %xmm1, %xmm0, %xmm0
vminps %xmm0, %xmm14, %xmm0
vmaxps %xmm5, %xmm4, %xmm1
vmaxps %xmm13, %xmm7, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vmaxps %xmm1, %xmm6, %xmm1
movq 0x8(%rsp), %rbp
vmovaps %xmm0, (%rbp)
vmovaps %xmm1, 0x10(%rbp)
movq %rbp, %rax
addq $0x158, %rsp # imm = 0x158
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitT<4, embree::TriangleMesh, embree::TriangleMi<4>>::BVHNRefitT(embree::BVHN<4>*, embree::Builder*, embree::TriangleMesh*, unsigned long)
|
BVHNRefitT<N,Mesh,Primitive>::BVHNRefitT (BVH* bvh, Builder* builder, Mesh* mesh, size_t mode)
: bvh(bvh), builder(builder), refitter(new BVHNRefitter<N>(bvh,*(typename BVHNRefitter<N>::LeafBoundsInterface*)this)), mesh(mesh), topologyVersion(0) {}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %r14
movq %rsi, %r15
movq %rdi, %rbx
movq $0x0, 0x8(%rdi)
leaq 0xc199f4(%rip), %rax # 0x211d178
movq %rax, (%rdi)
leaq 0xc19a3a(%rip), %rax # 0x211d1c8
movq %rax, 0x10(%rdi)
movq %rsi, 0x18(%rdi)
movq %rdx, 0x20(%rdi)
movl $0x818, %edi # imm = 0x818
callq 0x6a170
movq %rax, %r12
leaq 0x10(%rbx), %rdx
movq %rax, %rdi
movq %r15, %rsi
callq 0x1502a1a
movq %r12, 0x28(%rbx)
movq %r14, 0x30(%rbx)
movl $0x0, 0x38(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %rax, %r14
movq %r12, %rdi
callq 0x6a4f0
jmp 0x15037e1
movq %rax, %r14
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x15037f0
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x20(%rbx)
movq %r14, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitT<4, embree::QuadMesh, embree::QuadMv<4>>::BVHNRefitT(embree::BVHN<4>*, embree::Builder*, embree::QuadMesh*, unsigned long)
|
BVHNRefitT<N,Mesh,Primitive>::BVHNRefitT (BVH* bvh, Builder* builder, Mesh* mesh, size_t mode)
: bvh(bvh), builder(builder), refitter(new BVHNRefitter<N>(bvh,*(typename BVHNRefitter<N>::LeafBoundsInterface*)this)), mesh(mesh), topologyVersion(0) {}
|
pushq %r15
pushq %r14
pushq %r12
pushq %rbx
pushq %rax
movq %rcx, %r14
movq %rsi, %r15
movq %rdi, %rbx
movq $0x0, 0x8(%rdi)
leaq 0xc19a04(%rip), %rax # 0x211d3f8
movq %rax, (%rdi)
leaq 0xc19a4a(%rip), %rax # 0x211d448
movq %rax, 0x10(%rdi)
movq %rsi, 0x18(%rdi)
movq %rdx, 0x20(%rdi)
movl $0x818, %edi # imm = 0x818
callq 0x6a170
movq %rax, %r12
leaq 0x10(%rbx), %rdx
movq %rax, %rdi
movq %r15, %rsi
callq 0x1502a1a
movq %r12, 0x28(%rbx)
movq %r14, 0x30(%rbx)
movl $0x0, 0x38(%rbx)
addq $0x8, %rsp
popq %rbx
popq %r12
popq %r14
popq %r15
retq
movq %rax, %r14
movq %r12, %rdi
callq 0x6a4f0
jmp 0x1503a51
movq %rax, %r14
movq 0x20(%rbx), %rdi
testq %rdi, %rdi
je 0x1503a60
movq (%rdi), %rax
callq *0x8(%rax)
movq $0x0, 0x20(%rbx)
movq %r14, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitT<4, embree::TriangleMesh, embree::TriangleMv<4>>::build()
|
void BVHNRefitT<N,Mesh,Primitive>::build()
{
if (mesh->topologyChanged(topologyVersion)) {
topologyVersion = mesh->getTopologyVersion();
builder->build();
}
else
refitter->refit();
}
|
movq 0x30(%rdi), %rax
movl 0x7c(%rax), %eax
cmpl 0x38(%rdi), %eax
jbe 0x1504e3b
movl %eax, 0x38(%rdi)
movq 0x20(%rdi), %rdi
movq (%rdi), %rax
jmpq *0x20(%rax)
movq 0x28(%rdi), %rdi
jmp 0x1502a2a
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitT<8, embree::TriangleMesh, embree::TriangleMi<4>>::build()
|
void BVHNRefitT<N,Mesh,Primitive>::build()
{
if (mesh->topologyChanged(topologyVersion)) {
topologyVersion = mesh->getTopologyVersion();
builder->build();
}
else
refitter->refit();
}
|
movq 0x30(%rdi), %rax
movl 0x7c(%rax), %eax
cmpl 0x38(%rdi), %eax
jbe 0x1505163
movl %eax, 0x38(%rdi)
movq 0x20(%rdi), %rdi
movq (%rdi), %rax
jmpq *0x20(%rax)
movq 0x28(%rdi), %rdi
jmp 0x1502f72
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNRefitT<8, embree::UserGeometry, embree::Object>::build()
|
void BVHNRefitT<N,Mesh,Primitive>::build()
{
if (mesh->topologyChanged(topologyVersion)) {
topologyVersion = mesh->getTopologyVersion();
builder->build();
}
else
refitter->refit();
}
|
movq 0x30(%rdi), %rax
movl 0x20(%rax), %eax
cmpl 0x38(%rdi), %eax
jne 0x1505487
movq 0x28(%rdi), %rdi
jmp 0x1502f72
movl %eax, 0x38(%rdi)
movq 0x20(%rdi), %rdi
movq (%rdi), %rax
jmpq *0x20(%rax)
|
/embree[P]embree/kernels/bvh/bvh_refit.cpp
|
embree::avx::BVHNBuilderVirtual<8>::BVHNBuilderV::build(embree::FastAllocator*, embree::BuildProgressMonitor&, embree::PrimRef*, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::GeneralBVHBuilder::Settings)
|
typename BVHN<N>::NodeRef BVHNBuilderVirtual<N>::BVHNBuilderV::build(FastAllocator* allocator, BuildProgressMonitor& progressFunc, PrimRef* prims, const PrimInfo& pinfo, GeneralBVHBuilder::Settings settings)
{
auto createLeafFunc = [&] (const PrimRef* prims, const range<size_t>& set, const Allocator& alloc) -> NodeRef {
return createLeaf(prims,set,alloc);
};
settings.branchingFactor = N;
settings.maxDepth = BVH::maxBuildDepthLeaf;
return BVHBuilderBinnedSAH::build<NodeRef>
(FastAllocator::Create(allocator),typename BVH::AABBNode::Create2(),typename BVH::AABBNode::Set3(allocator,prims),createLeafFunc,progressFunc,prims,pinfo,settings);
}
|
pushq %rbx
subq $0x60, %rsp
movq %rcx, %r9
movq %rdx, %rax
movq %rsp, %r10
movq %rdi, (%r10)
leaq 0x70(%rsp), %r11
movq $0x8, (%r11)
movq $0x28, 0x8(%r11)
leaq 0x8(%rsp), %rdi
movq %rcx, (%rdi)
movq 0x48(%r8), %rbx
subq 0x40(%r8), %rbx
movq %rsi, %rcx
vmovaps (%r8), %xmm0
leaq 0x10(%rsp), %rdx
vmovaps %xmm0, (%rdx)
vmovaps 0x10(%r8), %xmm0
vmovaps %xmm0, 0x10(%rdx)
vmovaps 0x20(%r8), %xmm0
vmovaps %xmm0, 0x20(%rdx)
vmovaps 0x30(%r8), %xmm0
vmovaps %xmm0, 0x30(%rdx)
movq $0x0, 0x40(%rdx)
movq %rbx, 0x48(%rdx)
subq $0x8, %rsp
movq %r9, %rsi
movq %rcx, %r8
pushq %r11
pushq %rax
pushq %r10
callq 0x150e2e6
addq $0x80, %rsp
popq %rbx
retq
|
/embree[P]embree/kernels/bvh/bvh_builder.cpp
|
embree::avx::GeneralBVHBuilder::BuilderT<embree::avx::GeneralBVHBuilder::BuildRecordT<embree::avx::PrimInfoRange, embree::avx::BinSplit<32ul>>, embree::avx::HeuristicArrayBinningSAH<embree::PrimRef, 32ul>, embree::avx::PrimInfoRange, embree::PrimRef, embree::NodeRefPtr<4>, embree::FastAllocator::CachedAllocator, embree::FastAllocator::Create, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Create2, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Set3, embree::avx::BVHNBuilderVirtual<4>::BVHNBuilderV::build(embree::FastAllocator*, embree::BuildProgressMonitor&, embree::PrimRef*, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::GeneralBVHBuilder::Settings)::'lambda'(embree::PrimRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&), embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafFunc<embree::PrimRef, embree::avx::PrimInfoRange>, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafSplitFunc<embree::PrimRef, embree::avx::PrimInfoRange>, embree::BuildProgressMonitor>::BuilderT(embree::PrimRef*, embree::avx::HeuristicArrayBinningSAH<embree::PrimRef, 32ul>&, embree::FastAllocator::Create const&, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Create2 const&, embree::AABBNode_t<embree::NodeRefPtr<4>, 4>::Set3 const&, embree::avx::BVHNBuilderVirtual<4>::BVHNBuilderV::build(embree::FastAllocator*, embree::BuildProgressMonitor&, embree::PrimRef*, embree::PrimInfoT<embree::BBox<embree::Vec3fa>> const&, embree::avx::GeneralBVHBuilder::Settings)::'lambda'(embree::PrimRef const*, embree::range<unsigned long> const&, embree::FastAllocator::CachedAllocator const&) const&, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafFunc<embree::PrimRef, embree::avx::PrimInfoRange> const&, embree::avx::GeneralBVHBuilder::DefaultCanCreateLeafSplitFunc<embree::PrimRef, embree::avx::PrimInfoRange> const&, embree::BuildProgressMonitor const&, embree::avx::GeneralBVHBuilder::Settings const&)
|
BuilderT (PrimRef* prims,
Heuristic& heuristic,
const CreateAllocFunc& createAlloc,
const CreateNodeFunc& createNode,
const UpdateNodeFunc& updateNode,
const CreateLeafFunc& createLeaf,
const CanCreateLeafFunc& canCreateLeaf,
const CanCreateLeafSplitFunc& canCreateLeafSplit,
const ProgressMonitor& progressMonitor,
const Settings& settings) :
cfg(settings),
prims(prims),
heuristic(heuristic),
createAlloc(createAlloc),
createNode(createNode),
updateNode(updateNode),
createLeaf(createLeaf),
canCreateLeaf(canCreateLeaf),
canCreateLeafSplit(canCreateLeafSplit),
progressMonitor(progressMonitor)
{
if (cfg.branchingFactor > MAX_BRANCHING_FACTOR)
throw_RTCError(RTC_ERROR_UNKNOWN,"bvh_builder: branching factor too large");
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %rbx
subq $0x28, %rsp
vmovups 0x50(%rsp), %ymm0
movq 0x70(%rsp), %rax
vmovups (%rax), %ymm1
vmovups 0x20(%rax), %ymm2
vmovups %ymm1, (%rdi)
vmovups %ymm2, 0x20(%rdi)
movq %rsi, 0x40(%rdi)
movq %rdx, 0x48(%rdi)
movq %rcx, 0x50(%rdi)
movq %r8, 0x58(%rdi)
movq %r9, 0x60(%rdi)
vmovups %ymm0, 0x68(%rdi)
cmpq $0x11, (%rdi)
jae 0x1505ca0
addq $0x28, %rsp
popq %rbx
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
movl $0x30, %edi
vzeroupper
callq 0x6a3b0
movq %rax, %rbx
leaq 0x18(%rsp), %r15
movq %r15, -0x10(%r15)
leaq 0x9e6f8c(%rip), %rsi # 0x1eecc4c
leaq 0x9e6fac(%rip), %rdx # 0x1eecc73
leaq 0x8(%rsp), %rdi
callq 0x8d7230
leaq 0xbfbbd8(%rip), %rax # 0x21018b0
movq %rax, (%rbx)
movl $0x1, 0x8(%rbx)
leaq 0x10(%rbx), %rdi
movq %rbx, %rax
addq $0x20, %rax
movq %rax, 0x10(%rbx)
movq 0x8(%rsp), %rsi
movq 0x10(%rsp), %rdx
addq %rsi, %rdx
callq 0x8d7100
leaq 0xbfbb46(%rip), %rsi # 0x2101850
leaq -0xc2eef5(%rip), %rdx # 0x8d6e1c
movq %rbx, %rdi
callq 0x6a5d0
movq %rax, %r14
xorl %ebp, %ebp
jmp 0x1505d2e
movq %rax, %r14
movq %rbx, %rdi
callq 0x6a0e0
movb $0x1, %bpl
movq 0x8(%rsp), %rdi
cmpq %r15, %rdi
je 0x1505d45
callq 0x6a4f0
jmp 0x1505d45
movq %rax, %r14
movb $0x1, %bpl
testb %bpl, %bpl
je 0x1505d52
movq %rbx, %rdi
callq 0x6a8a0
movq %r14, %rdi
callq 0x6a600
|
/embree[P]embree/kernels/bvh/../builders/bvh_builder_sah.h
|
void embree::avx512::CurveNvIntersectorK<8, 16>::intersect_t<embree::avx512::SweepCurve1IntersectorK<embree::BezierCurveT, 16>, embree::avx512::Intersect1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayHitK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xc40, %rsp # imm = 0xC40
movq %rcx, %r15
movq %rdx, %r10
movq %rsi, %r12
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r10,4), %xmm1
vmovss 0x100(%rsi,%r10,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x301034(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x30f802(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x30f777(%rip), %ymm6 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm6, %ymm2, %ymm5
vbroadcastss 0x2df889(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm6, %ymm1, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm6, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2daf77(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc0(%r12,%r10,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x30e683(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
movq %r10, 0x20(%rsp)
vminps 0x200(%r12,%r10,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x30e656(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x349058(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x520(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c14037
leaq (%r8,%rax), %r9
addq $0x6, %r9
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r10d
addq $0x10, %r9
leaq 0x880(%rsp), %rax
addq $0x1c0, %rax # imm = 0x1C0
movq %rax, 0x1b0(%rsp)
movl $0x1, %eax
movq 0x20(%rsp), %rcx
shlxl %ecx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x7c0(%rsp)
movq %r15, 0x30(%rsp)
movq %r8, 0x1c0(%rsp)
movq %r9, 0x1b8(%rsp)
tzcntq %r10, %rax
blsrq %r10, %r10
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x40(%rsp)
shll $0x6, %eax
movq %r10, %rcx
vmovups (%r9,%rax), %xmm0
subq $0x1, %rcx
jb 0x1c11999
andq %r10, %rcx
tzcntq %r10, %rdx
shll $0x6, %edx
prefetcht0 (%r9,%rdx)
prefetcht0 0x40(%r9,%rdx)
testq %rcx, %rcx
je 0x1c11999
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r9,%rcx)
prefetcht1 0x40(%r9,%rcx)
vmovups 0x10(%r9,%rax), %xmm1
vmovups 0x20(%r9,%rax), %xmm2
vmovups 0x30(%r9,%rax), %xmm3
movq 0x20(%rsp), %rax
vmovss (%r12,%rax,4), %xmm4
vinsertps $0x1c, 0x40(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vbroadcastss 0x100(%r12,%rax,4), %ymm26
vbroadcastss 0x140(%r12,%rax,4), %ymm30
vunpcklps %xmm30, %xmm26, %xmm5 # xmm5 = xmm26[0],xmm30[0],xmm26[1],xmm30[1]
vbroadcastss 0x180(%r12,%rax,4), %ymm21
vinsertps $0x28, %xmm21, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm21[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x30b8f5(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0xc0(%r12,%rax,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
movl 0x2(%r8), %eax
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2df5b7(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x260(%rsp)
vmovaps %ymm6, 0x380(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm0
vmovaps %ymm0, 0x760(%rsp)
vbroadcastss 0x300c72(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm1
vmovaps %ymm1, 0x740(%rsp)
vbroadcastss 0x30f433(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm2
vmovaps %ymm2, 0x720(%rsp)
vbroadcastss 0x30f418(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x340(%rsp)
vpermps %ymm6, %ymm2, %ymm4
vmovaps %ymm4, 0x700(%rsp)
vbroadcastss %xmm8, %ymm4
vmovaps %ymm4, 0x6e0(%rsp)
vpermps %ymm8, %ymm0, %ymm4
vmovaps %ymm4, 0x6c0(%rsp)
vpermps %ymm8, %ymm1, %ymm4
vmovaps %ymm4, 0x6a0(%rsp)
vmovaps %ymm8, 0x300(%rsp)
vpermps %ymm8, %ymm2, %ymm4
vmovaps %ymm4, 0x680(%rsp)
vbroadcastss %xmm7, %ymm4
vmovaps %ymm4, 0x660(%rsp)
vpermps %ymm7, %ymm0, %ymm4
vmovaps %ymm4, 0x640(%rsp)
vpermps %ymm7, %ymm1, %ymm4
vmovaps %ymm4, 0x620(%rsp)
vmovaps %ymm7, 0x320(%rsp)
vpermps %ymm7, %ymm2, %ymm4
vmovaps %ymm4, 0x600(%rsp)
vbroadcastss %xmm3, %ymm4
vmovaps %ymm4, 0x5e0(%rsp)
vpermps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vpermps %ymm3, %ymm1, %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps %ymm3, 0x2e0(%rsp)
vpermps %ymm3, %ymm2, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmulss %xmm21, %xmm21, %xmm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vfmadd231ps %ymm26, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm26) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x560(%rsp)
vandps 0x30f300(%rip){1to8}, %ymm0, %ymm0 # 0x1f20ec4
vmovaps %ymm0, 0x4a0(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x280(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x540(%rsp)
movq %rax, 0x98(%rsp)
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x840(%rsp)
movl $0x1, %ebx
xorl %r11d, %r11d
movl 0x40(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x800(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x11c(%rsp)
vmovaps %xmm11, 0x250(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x118(%rsp)
vmovsd 0x2daaa5(%rip), %xmm2 # 0x1eec6f0
vmovaps 0x30f2cd(%rip), %ymm3 # 0x1f20f20
vmovaps %ymm26, 0x140(%rsp)
vmovaps %ymm30, 0x120(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm1
vmulss 0x30f255(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm2, 0x1a0(%rsp)
vbroadcastss %xmm2, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x60(%rsp)
vmovaps %ymm0, 0x220(%rsp)
vfmadd231ps %ymm3, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm3) + ymm5
vbroadcastss 0x2daa69(%rip), %ymm0 # 0x1eec714
vsubps %ymm5, %ymm0, %ymm7
vmovaps 0x6e0(%rsp), %ymm12
vmulps %ymm5, %ymm12, %ymm1
vmovaps 0x6c0(%rsp), %ymm13
vmulps %ymm5, %ymm13, %ymm2
vmovaps 0x6a0(%rsp), %ymm14
vmulps %ymm5, %ymm14, %ymm3
vmovaps 0x680(%rsp), %ymm15
vmulps %ymm5, %ymm15, %ymm4
vfmadd231ps 0x760(%rsp), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vfmadd231ps 0x740(%rsp), %ymm7, %ymm2 # ymm2 = (ymm7 * mem) + ymm2
vfmadd231ps 0x720(%rsp), %ymm7, %ymm3 # ymm3 = (ymm7 * mem) + ymm3
vfmadd231ps 0x700(%rsp), %ymm7, %ymm4 # ymm4 = (ymm7 * mem) + ymm4
vmovaps 0x660(%rsp), %ymm16
vmulps %ymm5, %ymm16, %ymm8
vmovaps 0x640(%rsp), %ymm17
vmulps %ymm5, %ymm17, %ymm9
vmovaps 0x620(%rsp), %ymm18
vmulps %ymm5, %ymm18, %ymm10
vmovaps 0x600(%rsp), %ymm19
vmulps %ymm5, %ymm19, %ymm11
vfmadd231ps %ymm12, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm12) + ymm8
vfmadd231ps %ymm13, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm13) + ymm9
vfmadd231ps %ymm14, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm14) + ymm10
vfmadd231ps %ymm15, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm15) + ymm11
vmulps 0x5e0(%rsp), %ymm5, %ymm12
vmulps 0x5c0(%rsp), %ymm5, %ymm13
vmulps 0x5a0(%rsp), %ymm5, %ymm14
vmulps 0x580(%rsp), %ymm5, %ymm15
vfmadd231ps %ymm16, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm16) + ymm12
vfmadd231ps %ymm17, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm17) + ymm13
vfmadd231ps %ymm18, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm19) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x2df1ba(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x34debb(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x2de98c(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2de986(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0x200(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2da740(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2da721(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0xa0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmulps %ymm29, %ymm21, %ymm22
vfmadd231ps %ymm28, %ymm30, %ymm22 # ymm22 = (ymm30 * ymm28) + ymm22
vfmadd231ps %ymm27, %ymm26, %ymm22 # ymm22 = (ymm26 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm21, %ymm17
vfmadd231ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm30) + ymm17
vfmadd231ps %ymm26, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm26) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm21
vmovaps 0x560(%rsp), %ymm15
vsubps %ymm21, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm16
vmovaps %ymm16, 0xc0(%rsp)
vsubps %ymm1, %ymm16, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2daad2(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vxorps %xmm16, %xmm16, %xmm16
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c121c6
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x30edb7(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x460(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x440(%rsp)
vbroadcastss 0x2d98c2(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x2daa17(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x30ed48(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm21, %ymm24
vandps %ymm0, %ymm21, %ymm23
vmovaps 0x4a0(%rsp), %ymm21
vmaxps %ymm23, %ymm21, %ymm23
vmulps 0x2dfd14(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c13f85
vbroadcastss 0x2da556(%rip), %ymm0 # 0x1eec714
vmovaps %ymm24, %ymm21
jmp 0x1c121da
vbroadcastss 0x2d9850(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2da9aa(%rip), %ymm31 # 0x1eecb84
andb $0x7f, %al
je 0x1c125aa
vmovaps %ymm21, 0x3a0(%rsp)
movq 0x20(%rsp), %rcx
vmovaps %ymm0, %ymm16
vmovss 0x200(%r12,%rcx,4), %xmm0
vsubss 0x280(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x540(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x1e0(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x120(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x140(%rsp), %ymm31
vfmadd231ps %ymm11, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm11) + ymm13
vbroadcastss 0x30ec5c(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2ded70(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x30ec37(%rip), %ymm30 # 0x1f20ec0
vxorps %ymm30, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm30, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2da8bc(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2d9739(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm30, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm30, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x360(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c125cf
vmovaps 0x460(%rsp), %ymm0
vmaxps 0x200(%rsp), %ymm23, %ymm1
vminps %ymm16, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x440(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x30eb60(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x30c0cb(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm13
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x460(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x440(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0xc0(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
je 0x1c125ec
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x30ea38(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x140(%rsp), %ymm26
vmulps %ymm12, %ymm26, %ymm0
vmovaps 0x120(%rsp), %ymm30
vmulps %ymm12, %ymm30, %ymm1
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0xa0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm26, %ymm10
vmulps %ymm13, %ymm30, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2d94d7(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x2da62c(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vbroadcastss 0x30e95d(%rip), %ymm7 # 0x1f20ec4
vandps 0x3a0(%rsp), %ymm7, %ymm6
vmovaps 0x4a0(%rsp), %ymm12
vmaxps %ymm6, %ymm12, %ymm6
vmulps 0x2df92d(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm7, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c13fce
vbroadcastss 0x30e91f(%rip), %xmm11 # 0x1f20ec4
jmp 0x1c1262c
vmovaps 0x30e96e(%rip), %ymm3 # 0x1f20f20
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
jmp 0x1c13e76
vmovaps 0x30e949(%rip), %ymm3 # 0x1f20f20
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
jmp 0x1c13e76
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2d9416(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x2da571(%rip), %ymm2 # 0x1eecb84
vbroadcastss 0x30e8a8(%rip), %xmm11 # 0x1f20ec4
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm3, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm26, %ymm5 # ymm5 = (ymm26 * ymm10) + ymm5
vmovaps 0x360(%rsp), %ymm7
vmovaps %ymm7, 0x780(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x7a0(%rsp)
vbroadcastss 0x30e85e(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm5, %ymm4
vmaxps %ymm2, %ymm7, %ymm5
vmovaps %ymm5, 0x4c0(%rsp)
vmovaps %ymm8, 0x4e0(%rsp)
vbroadcastss 0x30e84b(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0x114(%rsp)
vcmpleps %ymm3, %ymm7, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x400(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %edx
andb %al, %dl
movl %edx, %eax
orb %cl, %al
je 0x1c13e3c
movl %edx, 0x3c(%rsp)
movq %r11, 0x1d0(%rsp)
movq %r10, 0x1d8(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm9, %ymm21, %ymm2
vfmadd213ps %ymm2, %ymm30, %ymm1 # ymm1 = (ymm30 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm0) + ymm1
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0x10c(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x30e7c5(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x30e7b7(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %rbx, 0x1c8(%rsp)
vpbroadcastd %ebx, %ymm1
vmovdqa %ymm0, 0x500(%rsp)
vmovdqa %ymm1, 0x480(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0x110(%rsp)
andb %cl, %bl
je 0x1c1321b
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2df6f8(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x50(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x270(%rsp)
vmovaps 0x360(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x420(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2d9228(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x460(%rsp,%rcx), %xmm8
vmovss 0x780(%rsp,%rcx), %xmm9
vmovaps 0x250(%rsp), %xmm0
vucomiss 0x2d91c4(%rip), %xmm0 # 0x1eeba24
vmovss 0x118(%rsp), %xmm0
jae 0x1c128ac
vmovaps 0x250(%rsp), %xmm0
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2df5f1(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x54(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2d9e2a(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2d0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x2de68a(%rip){1to4}, %xmm1, %xmm10 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2d909e(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
jb 0x1c129af
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c129c1
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm10, %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2d9d3b(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2d9d37(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x30e4cb(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2b0(%rsp)
vfnmadd213ss 0x2de5f1(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x58(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x2c(%rsp)
vmovaps %xmm0, 0x2c0(%rsp)
jb 0x1c12a2a
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c12a74
vmovaps %xmm3, 0xf0(%rsp)
vmovss %xmm4, 0x28(%rsp)
vmovss %xmm5, 0xe0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xe0(%rsp), %xmm5
vmovss 0x28(%rsp), %xmm4
vmovaps 0xf0(%rsp), %xmm3
vmovss 0x2c(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm16
vmovaps 0x220(%rsp), %xmm12
vmovaps 0x200(%rsp), %xmm19
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm10, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vdpps $0x7f, %xmm0, %xmm12, %xmm15
vaddss 0x2d9c5f(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2d9c40(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2d9c3a(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2d8f3a(%rip), %xmm0 # 0x1eeba24
jb 0x1c12af5
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c12b8a
vmovss %xmm14, 0x28(%rsp)
vmovaps %xmm15, 0xe0(%rsp)
vmovss %xmm16, 0x4c(%rsp)
vmovss %xmm17, 0x48(%rsp)
vmovaps %xmm18, 0x2a0(%rsp)
vmovss %xmm20, 0x44(%rsp)
vmovaps %xmm6, 0x290(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x290(%rsp), %xmm6
vmovss 0x44(%rsp), %xmm20
vmovaps 0x2a0(%rsp), %xmm18
vmovss 0x48(%rsp), %xmm17
vmovss 0x4c(%rsp), %xmm16
vmovaps 0xe0(%rsp), %xmm15
vmovss 0x2c(%rsp), %xmm7
vmovss 0x28(%rsp), %xmm14
vmovaps 0x200(%rsp), %xmm19
vmovaps 0x220(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x30e331(%rip), %xmm11 # 0x1f20ec4
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovss 0x2dde17(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2d0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x2de415(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2c0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x58(%rsp), %xmm2
vmulss 0x2b0(%rsp), %xmm2, %xmm2
vmulss 0x54(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x30e244(%rip){1to4}, %xmm10, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xf0(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm2, %xmm5
vmovss 0x50(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm12, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x260(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm12, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm12, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm8, %xmm8
vsubss %xmm4, %xmm9, %xmm9
vandps %xmm11, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c12f06
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x270(%rsp), %xmm3
vfmadd231ss 0x2df14a(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm11, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c12f06
vaddss 0x280(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x5c(%rsp), %xmm9
jb 0x1c12f09
movq 0x20(%rsp), %rax
vmovss 0x200(%r12,%rax,4), %xmm4
vucomiss %xmm9, %xmm4
jb 0x1c12f09
vucomiss 0x2d8c72(%rip), %xmm8 # 0x1eeba24
jb 0x1c12f09
vmovss 0x2d9954(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c12f09
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2d9935(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2d992f(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x20(%rsp), %rax
movl 0x240(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c12f26
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vfmadd213ps %xmm10, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm10
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm10, %xmm10, %xmm3 # xmm3 = xmm10[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c12f2b
cmpq $0x0, 0x40(%r15)
jne 0x1c12f2b
movq 0x20(%rsp), %rcx
vmovss %xmm9, 0x200(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x300(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x340(%r12,%rcx,4)
vmovss %xmm0, 0x380(%r12,%rcx,4)
vmovss %xmm8, 0x3c0(%r12,%rcx,4)
movl $0x0, 0x400(%r12,%rcx,4)
movl 0x40(%rsp), %eax
movl %eax, 0x440(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x480(%r12,%rcx,4)
movq 0x30(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%rcx,4)
jmp 0x1c12f09
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c131f5
testb %al, %al
je 0x1c128cf
jmp 0x1c131f5
movq %rcx, %r15
jmp 0x1c12f09
movq 0x30(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm8, %zmm1
vbroadcastss 0x2ff7c0(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x30df88(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x1b0(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rdx)
vmovdqa %ymm0, 0x40(%rdx)
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x20(%rsp), %rax
vmovss %xmm9, 0x200(%r12,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c130b9
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vbroadcastss 0x30de0b(%rip), %xmm11 # 0x1f20ec4
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c131dc
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c13135
testb $0x2, (%rcx)
jne 0x1c130ef
testb $0x40, 0x3e(%r15)
je 0x1c13135
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vbroadcastss 0x30dd8f(%rip), %xmm11 # 0x1f20ec4
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c131dc
movq 0x188(%rsp), %rax
movq 0x190(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c131eb
movq 0x20(%rsp), %rax
vmovss %xmm4, 0x200(%r12,%rax,4)
movq 0x30(%rsp), %r15
jmp 0x1c12f09
movq 0x20(%rsp), %rax
vmovaps 0x420(%rsp), %ymm0
vcmpleps 0x200(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c127eb
vmovaps 0x400(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
movq 0x20(%rsp), %rax
vcmpleps 0x200(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd 0x114(%rsp), %k1
kmovd 0x10c(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x3c(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x30dc73(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x30dc65(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x400(%rsp)
vpcmpled 0x480(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x3c(%rsp)
andb %cl, %bl
je 0x1c13d4c
vmovaps 0x4c0(%rsp), %ymm5
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2debb5(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x50(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x270(%rsp)
vmovaps %ymm5, 0x360(%rsp)
vaddps 0x380(%rsp), %ymm5, %ymm0
vmovaps %ymm0, 0x420(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2d86e5(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x440(%rsp,%rcx), %xmm9
vmovss 0x4e0(%rsp,%rcx), %xmm10
vmovaps 0x250(%rsp), %xmm0
vucomiss 0x2d8681(%rip), %xmm0 # 0x1eeba24
vmovss 0x11c(%rsp), %xmm0
jae 0x1c133ef
vmovaps 0x250(%rsp), %xmm0
vmovaps %xmm9, 0xc0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm9
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2deaae(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x54(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2d92e7(%rip), %xmm1 # 0x1eec714
vsubss %xmm9, %xmm1, %xmm3
vbroadcastss %xmm9, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2d0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x2ddb47(%rip){1to4}, %xmm1, %xmm11 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2d855b(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm11, 0x60(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm9, 0xc0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
jb 0x1c134f2
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c13504
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm11
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm11, %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2d91f8(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2d91f4(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x30d988(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2b0(%rsp)
vfnmadd213ss 0x2ddaae(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x58(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x2c(%rsp)
vmovaps %xmm0, 0x2c0(%rsp)
jb 0x1c1356d
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c135b7
vmovaps %xmm3, 0xf0(%rsp)
vmovss %xmm4, 0x28(%rsp)
vmovss %xmm5, 0xe0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xe0(%rsp), %xmm5
vmovss 0x28(%rsp), %xmm4
vmovaps 0xf0(%rsp), %xmm3
vmovss 0x2c(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm11
vmovaps %xmm0, %xmm16
vmovaps 0x220(%rsp), %xmm12
vmovaps 0x200(%rsp), %xmm19
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm11, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vdpps $0x7f, %xmm0, %xmm12, %xmm15
vaddss 0x2d911c(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2d90fd(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2d90f7(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2d83f7(%rip), %xmm0 # 0x1eeba24
jb 0x1c13638
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c136cd
vmovss %xmm14, 0x28(%rsp)
vmovaps %xmm15, 0xe0(%rsp)
vmovss %xmm16, 0x4c(%rsp)
vmovss %xmm17, 0x48(%rsp)
vmovaps %xmm18, 0x2a0(%rsp)
vmovss %xmm20, 0x44(%rsp)
vmovaps %xmm6, 0x290(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x290(%rsp), %xmm6
vmovss 0x44(%rsp), %xmm20
vmovaps 0x2a0(%rsp), %xmm18
vmovss 0x48(%rsp), %xmm17
vmovss 0x4c(%rsp), %xmm16
vmovaps 0xe0(%rsp), %xmm15
vmovss 0x2c(%rsp), %xmm7
vmovss 0x28(%rsp), %xmm14
vmovaps 0x200(%rsp), %xmm19
vmovaps 0x220(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm11
vbroadcastss 0x30d7ee(%rip), %xmm8 # 0x1f20ec4
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovss 0x2dd2d4(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2d0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm1) + xmm5
vmovss 0x2dd8d2(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm9, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm9
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2c0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm11, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x58(%rsp), %xmm2
vmulss 0x2b0(%rsp), %xmm2, %xmm2
vmulss 0x54(%rsp), %xmm10, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x30d701(%rip){1to4}, %xmm11, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xf0(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm2, %xmm5
vmovss 0x50(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm12, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x260(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm12, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm12, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm9, %xmm9
vsubss %xmm4, %xmm10, %xmm10
vandps %xmm8, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c13a49
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x270(%rsp), %xmm3
vfmadd231ss 0x2de607(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm8, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c13a49
vaddss 0x280(%rsp), %xmm10, %xmm10
movb $0x1, %r13b
vucomiss 0x5c(%rsp), %xmm10
jb 0x1c13a4c
movq 0x20(%rsp), %rax
vmovss 0x200(%r12,%rax,4), %xmm4
vucomiss %xmm10, %xmm4
jb 0x1c13a4c
vucomiss 0x2d812f(%rip), %xmm9 # 0x1eeba24
jb 0x1c13a4c
vmovss 0x2d8e11(%rip), %xmm1 # 0x1eec714
vucomiss %xmm9, %xmm1
jb 0x1c13a4c
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2d8df2(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2d8dec(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x20(%rsp), %rax
movl 0x240(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c13a69
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm12, %xmm1
vfmadd213ps %xmm11, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm11
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm3 # xmm3 = xmm11[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c13a6e
cmpq $0x0, 0x40(%r15)
jne 0x1c13a6e
movq 0x20(%rsp), %rcx
vmovss %xmm10, 0x200(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x300(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x340(%r12,%rcx,4)
vmovss %xmm0, 0x380(%r12,%rcx,4)
vmovss %xmm9, 0x3c0(%r12,%rcx,4)
movl $0x0, 0x400(%r12,%rcx,4)
movl 0x40(%rsp), %eax
movl %eax, 0x440(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x480(%r12,%rcx,4)
movq 0x30(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%rcx,4)
jmp 0x1c13a4c
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c13d26
testb %al, %al
je 0x1c13412
jmp 0x1c13d26
movq %rcx, %r15
jmp 0x1c13a4c
movq 0x30(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm9, %zmm1
vbroadcastss 0x2fec7d(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x30d445(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x1b0(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rdx)
vmovdqa %ymm0, 0x40(%rdx)
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x20(%rsp), %rax
vmovss %xmm10, 0x200(%r12,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm9, 0xc0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c13bf3
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c13d0d
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c13c66
testb $0x2, (%rcx)
jne 0x1c13c29
testb $0x40, 0x3e(%r15)
je 0x1c13c66
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c13d0d
movq 0x188(%rsp), %rax
movq 0x190(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c13d1c
movq 0x20(%rsp), %rax
vmovss %xmm4, 0x200(%r12,%rax,4)
movq 0x30(%rsp), %r15
jmp 0x1c13a4c
movq 0x20(%rsp), %rax
vmovaps 0x420(%rsp), %ymm0
vcmpleps 0x200(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c1332e
vmovdqa 0x480(%rsp), %ymm1
vpcmpltd 0x400(%rsp), %ymm1, %k1
vmovaps 0x780(%rsp), %ymm0
vpcmpltd 0x500(%rsp), %ymm1, %k2
vmovaps 0x380(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq 0x20(%rsp), %rax
vbroadcastss 0x200(%r12,%rax,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x110(%rsp), %ecx
andb %al, %cl
vmovaps 0x4c0(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x3c(%rsp), %edx
andb %al, %dl
orb %cl, %dl
je 0x1c13e46
movq 0x1d0(%rsp), %r11
movl %r11d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %dl, 0xae0(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0xb00(%rsp,%rax)
vmovaps 0x1a0(%rsp), %xmm2
vmovlps %xmm2, 0xb20(%rsp,%rax)
movq 0x1c8(%rsp), %rbx
leal 0x1(%rbx), %ecx
movl %ecx, 0xb28(%rsp,%rax)
incl %r11d
movq 0x1c0(%rsp), %r8
movq 0x1b8(%rsp), %r9
movq 0x1d8(%rsp), %r10
vmovaps 0x30d0e6(%rip), %ymm3 # 0x1f20f20
jmp 0x1c13e7f
vmovaps 0x30d0dc(%rip), %ymm3 # 0x1f20f20
jmp 0x1c13e76
movq 0x1c0(%rsp), %r8
movq 0x1b8(%rsp), %r9
movq 0x1d8(%rsp), %r10
vmovaps 0x30d0ba(%rip), %ymm3 # 0x1f20f20
movq 0x1d0(%rsp), %r11
movq 0x1c8(%rsp), %rbx
vmovaps 0x1a0(%rsp), %xmm2
movl %r11d, %eax
testl %eax, %eax
je 0x1c14010
leal -0x1(%rax), %r11d
leaq (%r11,%r11,2), %rcx
shlq $0x5, %rcx
vmovaps 0xb00(%rsp,%rcx), %ymm0
movzbl 0xae0(%rsp,%rcx), %esi
vaddps 0x380(%rsp), %ymm0, %ymm1
movq 0x20(%rsp), %rdx
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %edx
andl %esi, %edx
je 0x1c13f75
kmovd %edx, %k1
vbroadcastss 0x2d7b46(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %dl, %sil
je 0x1c13f12
movzbl %sil, %edi
jmp 0x1c13f15
movzbl %dl, %edi
leaq (%rsp,%rcx), %rsi
addq $0xae0, %rsi # imm = 0xAE0
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %ebx
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %edx, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c13f4f
movl %eax, %r11d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps %ymm3, %ymm1, %ymm0 # ymm0 = (ymm0 * ymm3) + ymm1
vmovaps %ymm0, 0x880(%rsp)
vmovsd 0x880(%rsp,%rcx,4), %xmm2
movl %r11d, %eax
testb %dl, %dl
je 0x1c13e82
jmp 0x1c11c6b
vcmpleps %ymm16, %ymm1, %k2
vbroadcastss 0x2d8bef(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x2d7a81(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm1, %ymm16, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm16, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %edx
orb %cl, %dl
andb %al, %dl
movl %edx, %eax
jmp 0x1c121b5
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2d8ba6(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2d7a39(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c1259c
movq 0x20(%rsp), %rax
vmovaps 0x520(%rsp), %ymm0
vcmpleps 0x200(%r12,%rax,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r10d
jne 0x1c11946
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 16>::occluded_t<embree::avx512::SweepCurve1IntersectorK<embree::BezierCurveT, 16>, embree::avx512::Occluded1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xc80, %rsp # imm = 0xC80
movq %r8, %r9
movzbl 0x1(%r8), %r10d
leaq (%r10,%r10,4), %r11
leaq (%r11,%r11,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%rdx,4), %xmm1
vmovss 0x100(%rsi,%rdx,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%r10,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%r10,%r10,2), %rbx
vpmovsxbd 0x6(%r8,%rbx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%r10,%r11,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rbx,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %r10, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%r10,%r10,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %r10, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r11d
vpmovsxbd 0x6(%r9,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2fe5b3(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x30cd7c(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x30ccea(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x2dcdff(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2d84e9(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%r10,8), %r8
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%r10,%r10), %rdi
addq %r10, %r11
shlq $0x3, %rbx
subq %r10, %rbx
movl %r10d, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r11), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rbx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc0(%rsi,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x30bbf9(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
movq %rsi, 0x8(%rsp)
vminps 0x200(%rsi,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x30bbcc(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %r10d, %ymm1
vpcmpgtd 0x3465c8(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x740(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r10b
je 0x1c16d1e
movq %rcx, 0x30(%rsp)
leaq (%r9,%rax), %r11
addq $0x6, %r11
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %r11
leaq 0x8c0(%rsp), %rax
addq $0x1c0, %rax # imm = 0x1C0
movq %rax, 0x2a0(%rsp)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x800(%rsp)
movq %rdx, 0x10(%rsp)
movq %r9, 0x2b0(%rsp)
movq %r11, 0x2a8(%rsp)
tzcntq %r14, %rcx
blsrq %r14, %r14
movl 0x6(%r9,%rcx,4), %eax
shll $0x6, %ecx
movq %r14, %rdi
vmovups (%r11,%rcx), %xmm0
subq $0x1, %rdi
jb 0x1c14429
andq %r14, %rdi
tzcntq %r14, %rsi
shll $0x6, %esi
prefetcht0 (%r11,%rsi)
prefetcht0 0x40(%r11,%rsi)
testq %rdi, %rdi
je 0x1c14429
tzcntq %rdi, %rsi
shll $0x6, %esi
prefetcht1 (%r11,%rsi)
prefetcht1 0x40(%r11,%rsi)
vmovups 0x10(%r11,%rcx), %xmm1
vmovups 0x20(%r11,%rcx), %xmm2
vmovups 0x30(%r11,%rcx), %xmm3
movq 0x8(%rsp), %rcx
vmovss (%rcx,%rdx,4), %xmm4
vinsertps $0x1c, 0x40(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
movl 0x2(%r9), %esi
vbroadcastss 0x100(%rcx,%rdx,4), %ymm30
vbroadcastss 0x140(%rcx,%rdx,4), %ymm21
vunpcklps %xmm21, %xmm30, %xmm5 # xmm5 = xmm30[0],xmm21[0],xmm30[1],xmm21[1]
vbroadcastss 0x180(%rcx,%rdx,4), %ymm15
vinsertps $0x28, %xmm15, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm15[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x308e61(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0xc0(%rcx,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2dcb28(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x370(%rsp)
vmovaps %ymm6, 0x5a0(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2fe1ec(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm12
vbroadcastss 0x30c9b6(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm13
vbroadcastss 0x30c9a4(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x500(%rsp)
vpermps %ymm6, %ymm2, %ymm24
vbroadcastss %xmm8, %ymm4
vpermps %ymm8, %ymm0, %ymm6
vpermps %ymm8, %ymm1, %ymm17
vmovaps %ymm8, 0x4c0(%rsp)
vpermps %ymm8, %ymm2, %ymm18
vbroadcastss %xmm7, %ymm19
vpermps %ymm7, %ymm0, %ymm20
vpermps %ymm7, %ymm1, %ymm22
vmovaps %ymm7, 0x4e0(%rsp)
vpermps %ymm7, %ymm2, %ymm23
vbroadcastss %xmm3, %ymm27
vpermps %ymm3, %ymm0, %ymm25
vpermps %ymm3, %ymm1, %ymm29
vmovaps %ymm3, 0x4a0(%rsp)
vpermps %ymm3, %ymm2, %ymm26
vmovaps %ymm4, %ymm2
vmovaps %ymm6, %ymm4
vmovaps %ymm15, 0x580(%rsp)
vmulss %xmm15, %xmm15, %xmm0
vfmadd231ps %ymm21, %ymm21, %ymm0 # ymm0 = (ymm21 * ymm21) + ymm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
vandps %ymm28, %ymm0, %ymm0
vmovaps %ymm0, 0x6c0(%rsp)
vmovss %xmm10, 0x3c(%rsp)
vmovaps %xmm5, 0x430(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x780(%rsp)
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x880(%rsp)
movl $0x1, %r15d
xorl %r13d, %r13d
xorl %ebx, %ebx
movq %rsi, 0x2c8(%rsp)
vpbroadcastd %esi, %zmm0
vmovdqa64 %zmm0, 0x840(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xdc(%rsp)
vmovaps %xmm11, 0x350(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xd8(%rsp)
vmovsd 0x2d8090(%rip), %xmm5 # 0x1eec6f0
vbroadcastss 0x2d80ab(%rip), %ymm3 # 0x1eec714
vmovaps %ymm30, 0x1e0(%rsp)
vmovaps %ymm21, 0x1c0(%rsp)
vmovaps %ymm9, 0x200(%rsp)
vmovaps %ymm12, 0x1a0(%rsp)
vmovaps %ymm13, 0x260(%rsp)
vmovaps %ymm24, 0x180(%rsp)
vmovaps %ymm2, 0x480(%rsp)
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm17, 0x160(%rsp)
vmovaps %ymm18, 0x140(%rsp)
vmovaps %ymm19, 0x3a0(%rsp)
vmovaps %ymm20, 0x380(%rsp)
vmovaps %ymm22, 0x120(%rsp)
vmovaps %ymm23, 0x100(%rsp)
vmovaps %ymm27, 0x760(%rsp)
vmovaps %ymm25, 0xe0(%rsp)
vmovaps %ymm29, 0x240(%rsp)
vmovaps %ymm26, 0x220(%rsp)
vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3]
vsubss %xmm5, %xmm0, %xmm1
vmulss 0x30c7c2(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm5, 0x410(%rsp)
vbroadcastss %xmm5, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x40(%rsp)
vmovaps %ymm0, 0x320(%rsp)
vfmadd231ps 0x30c7e7(%rip), %ymm0, %ymm5 # ymm5 = (ymm0 * mem) + ymm5
vsubps %ymm5, %ymm3, %ymm7
vmulps %ymm5, %ymm2, %ymm1
vmovaps %ymm2, %ymm15
vmulps %ymm5, %ymm4, %ymm2
vmovaps %ymm3, %ymm0
vmulps %ymm5, %ymm17, %ymm3
vmovaps %ymm4, %ymm16
vmulps %ymm5, %ymm18, %ymm4
vfmadd231ps %ymm9, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm9) + ymm1
vfmadd231ps %ymm12, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm12) + ymm2
vfmadd231ps %ymm13, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm13) + ymm3
vfmadd231ps %ymm24, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm24) + ymm4
vmulps %ymm5, %ymm19, %ymm8
vmulps %ymm5, %ymm20, %ymm9
vmulps %ymm5, %ymm22, %ymm10
vmulps %ymm5, %ymm23, %ymm11
vfmadd231ps %ymm15, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm15) + ymm8
vfmadd231ps %ymm16, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm16) + ymm9
vfmadd231ps %ymm17, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm17) + ymm10
vfmadd231ps %ymm18, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm18) + ymm11
vmulps %ymm5, %ymm27, %ymm12
vmulps %ymm5, %ymm25, %ymm13
vmulps %ymm5, %ymm29, %ymm14
vmulps %ymm5, %ymm26, %ymm15
vfmadd231ps %ymm19, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm19) + ymm12
vfmadd231ps %ymm20, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm20) + ymm13
vfmadd231ps %ymm22, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm22) + ymm14
vfmadd231ps %ymm23, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm23) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm12, %ymm5, %ymm26
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm26 # ymm26 = (ymm7 * ymm17) + ymm26
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x2dc778(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x34b479(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm26, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm26, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x2dbf46(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2dbf40(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0x80(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2d7cfa(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2d7cdb(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm3
vmovaps %ymm26, 0x560(%rsp)
vsubps %ymm26, %ymm31, %ymm28
vmovaps %ymm29, 0x3e0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmovaps 0x580(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vfmadd231ps %ymm3, %ymm30, %ymm22 # ymm22 = (ymm30 * ymm3) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm3, %ymm3, %ymm24 # ymm24 = (ymm3 * ymm3) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm30, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm30) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm3, %ymm18 # ymm18 = (ymm3 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm26
vmovaps 0x7a0(%rsp), %ymm27
vsubps %ymm26, %ymm27, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm24
vsubps %ymm1, %ymm24, %ymm1
vmulps %ymm22, %ymm22, %ymm16
vmulps 0x2d8085(%rip){1to8}, %ymm15, %ymm25 # 0x1eecb8c
vmulps %ymm1, %ymm25, %ymm30
vmovaps %ymm16, 0x60(%rsp)
vsubps %ymm30, %ymm16, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c14c46
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x30c368(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x680(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x660(%rsp)
vbroadcastss 0x2d6e73(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x2d7fc8(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x30c2f9(%rip), %ymm0 # 0x1f20ec4
vandps %ymm0, %ymm26, %ymm23
vmovaps 0x6c0(%rsp), %ymm21
vmaxps %ymm23, %ymm21, %ymm23
vmulps 0x2dd2cb(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
je 0x1c14c5a
vcmpleps 0x30c2fb(%rip), %ymm1, %k2 # 0x1f20f00
vbroadcastss 0x2d7f76(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x2d6e08(%rip), %ymm23 # 0x1eeba20
vblendmps %ymm1, %ymm23, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm23, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %al, %sil
movl %esi, %eax
jmp 0x1c14c5a
vbroadcastss 0x2d6dd0(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2d7f2a(%rip), %ymm31 # 0x1eecb84
andb $0x7f, %al
je 0x1c15082
vmovaps %ymm24, 0x540(%rsp)
vmovaps %ymm26, 0x3c0(%rsp)
movq 0x8(%rsp), %rcx
vmovss 0x200(%rcx,%rdx,4), %xmm0
vsubss 0x430(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x780(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm3) + ymm28
vmovaps 0x580(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1c0(%rsp), %ymm26
vfmadd231ps %ymm12, %ymm26, %ymm13 # ymm13 = (ymm26 * ymm12) + ymm13
vmovaps 0x1e0(%rsp), %ymm16
vfmadd231ps %ymm11, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm11) + ymm13
vbroadcastss 0x30c1db(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2dc2ef(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x30c1b6(%rip), %ymm31 # 0x1f20ec0
vxorps %ymm31, %ymm3, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm31, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vbroadcastss 0x2d79e8(%rip), %ymm30 # 0x1eec714
vfnmadd213ps %ymm30, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm30
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2d7e31(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2d6cae(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm26, %ymm8 # ymm8 = -(ymm26 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm31, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm31, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm30, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm30
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x520(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c15097
vmovaps 0x680(%rsp), %ymm0
vmaxps 0x80(%rsp), %ymm23, %ymm1
vminps %ymm30, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x660(%rsp), %ymm9
vminps %ymm30, %ymm9, %ymm9
vmovaps 0x30c0d5(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x309640(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x320(%rsp), %ymm13
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x680(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x660(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0x540(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm25, %ymm0
vmovaps 0x60(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
vmovaps 0x240(%rsp), %ymm29
vmovaps 0x220(%rsp), %ymm16
je 0x1c1512f
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm30, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm30
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x30bf99(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x1e0(%rsp), %ymm30
vmulps %ymm12, %ymm30, %ymm0
vmovaps 0x1c0(%rsp), %ymm21
vmulps %ymm12, %ymm21, %ymm1
vmovaps 0x580(%rsp), %ymm26
vmulps %ymm12, %ymm26, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vmovaps 0x560(%rsp), %ymm3
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0x3e0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm21, %ymm17
vmulps %ymm13, %ymm26, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2d6a27(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x2d7b7c(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vandps 0x3c0(%rsp), %ymm28, %ymm6
vmovaps 0x6c0(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2dce87(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c16cac
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm23
vmovaps 0xe0(%rsp), %ymm25
jmp 0x1c151a7
vbroadcastss 0x30be38(%rip), %ymm28 # 0x1f20ec4
vbroadcastss 0x2d767f(%rip), %ymm3 # 0x1eec714
jmp 0x1c1509d
vmovaps %ymm30, %ymm3
vmovaps 0x1e0(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x260(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x480(%rsp), %ymm2
vmovaps 0x300(%rsp), %ymm4
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x380(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm23
vmovaps 0xe0(%rsp), %ymm25
vmovaps 0x240(%rsp), %ymm29
vmovaps 0x220(%rsp), %ymm16
jmp 0x1c16b7a
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2d68d3(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x2d7a2e(%rip), %ymm2 # 0x1eecb84
vmovaps 0x1e0(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm23
vmovaps 0xe0(%rsp), %ymm25
vmovaps 0x580(%rsp), %ymm26
vmulps %ymm5, %ymm26, %ymm5
vfmadd231ps %ymm3, %ymm21, %ymm5 # ymm5 = (ymm21 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm10) + ymm5
vmovaps 0x520(%rsp), %ymm6
vmovaps %ymm6, 0x7c0(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x7e0(%rsp)
vandps %ymm28, %ymm5, %ymm4
vmaxps %ymm2, %ymm6, %ymm5
vmovaps %ymm5, 0x6e0(%rsp)
vmovaps %ymm8, 0x700(%rsp)
vbroadcastss 0x30bcd7(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0xd4(%rsp)
vcmpleps %ymm3, %ymm6, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x620(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %esi
andb %al, %sil
movl %esi, 0x1c(%rsp)
movl %esi, %eax
orb %cl, %al
vmovaps 0x260(%rsp), %ymm13
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x380(%rsp), %ymm20
je 0x1c15de2
movq %r14, 0x2c0(%rsp)
movb %r10b, 0x7(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm9, %ymm26, %ymm2
vfmadd213ps %ymm2, %ymm21, %ymm1 # ymm1 = (ymm21 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm0) + ymm1
vandps %ymm28, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0xcc(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x30bc38(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x30bc2a(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %r15, 0x2b8(%rsp)
vpbroadcastd %r15d, %ymm1
vmovdqa %ymm0, 0x720(%rsp)
vmovdqa %ymm1, 0x6a0(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %ecx, 0xd0(%rsp)
andb %cl, %r12b
je 0x1c15e0b
vmovaps 0x500(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4e0(%rsp), %ymm3
vmovaps 0x4a0(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x30bb96(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2dcb61(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x24(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x520(%rsp), %ymm0
vaddps 0x5a0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x640(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x2d6690(%rip), %ymm0 # 0x1eeba20
vblendmps 0x520(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x680(%rsp,%rcx), %xmm8
vmovss 0x7c0(%rsp,%rcx), %xmm10
vmovaps 0x350(%rsp), %xmm0
vucomiss 0x2d6628(%rip), %xmm0 # 0x1eeba24
vmovss 0xd8(%rsp), %xmm0
jae 0x1c15442
vmovaps 0x350(%rsp), %xmm0
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
kmovw %k1, 0x40(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x40(%rsp), %k1
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2dca5a(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x28(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2d7293(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x4c0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x540(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x500(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4e0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x4a0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x2dbaf3(%rip){1to4}, %xmm1, %xmm11 # 0x1ef0fec
vmovaps %xmm4, 0x560(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x320(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2d6507(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm11, 0x40(%rsp)
vmovaps %xmm0, 0x3e0(%rsp)
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
jb 0x1c15543
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c15555
vzeroupper
callq 0x6aa20
vmovaps 0x40(%rsp), %xmm11
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm11, %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2d71a7(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2d71a3(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x30b937(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x460(%rsp)
vfnmadd213ss 0x2dba5d(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x290(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x3c0(%rsp)
vmovaps %xmm0, 0x470(%rsp)
jb 0x1c155c2
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c1560f
vmovaps %xmm3, 0xb0(%rsp)
vmovss %xmm4, 0xa0(%rsp)
vmovss %xmm5, 0x280(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x280(%rsp), %xmm5
vmovss 0xa0(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm3
vmovss 0x3c0(%rsp), %xmm7
vmovaps 0x40(%rsp), %xmm11
vmovss %xmm0, 0x360(%rsp)
vmovaps 0x320(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm13
vmulps %xmm13, %xmm11, %xmm0
vmovaps %xmm0, 0x450(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x2d70c0(%rip), %xmm7, %xmm31 # 0x1eec714
vmovaps %xmm0, 0xb0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3e0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xa0(%rsp)
vmulss 0x2d708b(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x280(%rsp)
vmulss 0x2d707e(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x2c(%rsp)
vucomiss 0x2d6378(%rip), %xmm0 # 0x1eeba24
jb 0x1c156b4
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c156f6
vmovss %xmm31, 0x20(%rsp)
vmovaps %xmm13, 0x440(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x440(%rsp), %xmm13
vmovss 0x3c0(%rsp), %xmm7
vmovss 0x20(%rsp), %xmm31
vmovaps 0x320(%rsp), %xmm15
vmovaps 0x40(%rsp), %xmm11
vbroadcastss 0x30b7c4(%rip), %ymm26 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x260(%rsp), %ymm25
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x100(%rsp), %ymm20
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm29
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vmovss 0x2db25b(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x540(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x2db859(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x4a0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4c0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x500(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x470(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm11, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x290(%rsp), %xmm1
vmulss 0x460(%rsp), %xmm1, %xmm3
vmulss 0x28(%rsp), %xmm10, %xmm1
vmovss 0x24(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x30b67b(%rip){1to4}, %xmm11, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm13, %xmm2
vmovaps 0x450(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm3, %xmm4
vdivss 0x360(%rsp), %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm31 # xmm31 = (xmm5 * xmm31) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xa0(%rsp), %xmm6
vmulss 0x2c(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x280(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xb0(%rsp), %xmm28
vfnmadd231ss %xmm4, %xmm28, %xmm3 # xmm3 = -(xmm28 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm28, %xmm7 # xmm7 = -(xmm28 * xmm5) + xmm7
vpermilps $0xff, 0x560(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm28, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm28, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm8, %xmm8
vsubss %xmm3, %xmm10, %xmm10
vbroadcastss 0x30b59b(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm28, %xmm3
vucomiss %xmm3, %xmm31
movb $0x1, %al
jbe 0x1c1598b
vaddss %xmm31, %xmm1, %xmm1
vmovaps 0x420(%rsp), %xmm3
vfmadd231ss 0x2dc563(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c1598b
vaddss 0x430(%rsp), %xmm10, %xmm10
vucomiss 0x3c(%rsp), %xmm10
jb 0x1c15986
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm5
vucomiss %xmm10, %xmm5
jae 0x1c159a1
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c15da7
decq %r15
jne 0x1c15466
jmp 0x1c15da4
xorl %eax, %eax
vucomiss 0x2d6079(%rip), %xmm8 # 0x1eeba24
jb 0x1c15988
vmovss 0x2d6d5f(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c15988
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3e0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2d6d3d(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2d6d39(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x30(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2c8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c15986
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c15a35
cmpq $0x0, 0x48(%r14)
jne 0x1c15a35
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c1598b
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm11, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm11
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm3 # xmm3 = xmm11[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x30(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm8, %zmm1
vbroadcastss 0x2fcc6c(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x30b434(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x8c0(%rsp)
vmovaps %zmm3, 0x900(%rsp)
vmovaps %zmm0, 0x940(%rsp)
vmovaps %zmm1, 0x980(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovaps 0x880(%rsp), %zmm0
vmovaps %zmm0, 0xa00(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa40(%rsp)
movq 0x2a0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xac0(%rsp)
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss %xmm10, 0x200(%rcx,%rax,4)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x5c0(%rsp)
leaq 0x5c0(%rsp), %rax
movq %rax, 0x2d0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2d8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x2e0(%rsp)
movq %rcx, 0x2e8(%rsp)
leaq 0x8c0(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movl $0x10, 0x2f8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
vmovss %xmm5, 0x40(%rsp)
je 0x1c15c6b
leaq 0x2d0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm29
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x260(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm30
vbroadcastss 0x30b263(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x30b259(%rip), %ymm26 # 0x1f20ec4
vmovdqa64 0x5c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c15d80
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c15d48
testb $0x2, (%rcx)
jne 0x1c15ca9
testb $0x40, 0x3e(%r14)
je 0x1c15d48
leaq 0x2d0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm29
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x260(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm30
vbroadcastss 0x30b186(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x30b17c(%rip), %ymm26 # 0x1f20ec4
vmovdqa64 0x5c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x2e8(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x2d6e15(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c15d83
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c15a2e
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss %xmm5, 0x200(%rcx,%rax,4)
jmp 0x1c15a2e
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x10(%rsp), %rdx
movq 0x8(%rsp), %rax
vmovaps 0x640(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
vmovaps %ymm26, %ymm28
jne 0x1c15382
jmp 0x1c15e47
vbroadcastss 0x2d6929(%rip), %ymm3 # 0x1eec714
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x480(%rsp), %ymm2
vmovaps 0x300(%rsp), %ymm4
jmp 0x1c16b7a
vmovaps %ymm25, %ymm19
vmovaps %ymm13, %ymm25
vmovaps %ymm23, %ymm20
vmovaps %ymm22, %ymm23
vmovaps %ymm18, %ymm22
vmovaps %ymm17, %ymm18
vmovaps %ymm24, %ymm17
vbroadcastss 0x30b086(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x5a0(%rsp), %ymm3
vaddps 0x620(%rsp), %ymm3, %ymm0
movq 0x8(%rsp), %rax
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xd4(%rsp), %k1
kmovd 0xcc(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x1c(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x30b047(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x30b039(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x620(%rsp)
vpcmpled 0x6a0(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x1c(%rsp)
andb %cl, %r12b
je 0x1c16a09
vmovaps 0x6e0(%rsp), %ymm7
vmovaps 0x500(%rsp), %ymm1
vmovaps 0x4c0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4e0(%rsp), %ymm5
vmovaps 0x4a0(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2dbf88(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x28(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x420(%rsp)
vmovaps %ymm7, 0x520(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x640(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x2d5abc(%rip), %ymm0 # 0x1eeba20
vblendmps 0x520(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x660(%rsp,%rcx), %xmm8
vmovss 0x700(%rsp,%rcx), %xmm10
vmovaps 0x350(%rsp), %xmm0
vucomiss 0x2d5a54(%rip), %xmm0 # 0x1eeba24
vmovss 0xdc(%rsp), %xmm0
jae 0x1c16016
vmovaps 0x350(%rsp), %xmm0
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
kmovw %k1, 0x40(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x40(%rsp), %k1
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2dbe86(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x2c(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2d66bf(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x4c0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x540(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x500(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4e0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x4a0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x2daf1f(%rip){1to4}, %xmm1, %xmm11 # 0x1ef0fec
vmovaps %xmm4, 0x560(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x320(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2d5933(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm11, 0x40(%rsp)
vmovaps %xmm0, 0x3e0(%rsp)
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
jb 0x1c16117
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c16129
vzeroupper
callq 0x6aa20
vmovaps 0x40(%rsp), %xmm11
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm11, %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2d65d3(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2d65cf(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x30ad63(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x360(%rsp)
vfnmadd213ss 0x2dae89(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x460(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x3c0(%rsp)
vmovaps %xmm0, 0x470(%rsp)
jb 0x1c16198
vsqrtss %xmm0, %xmm0, %xmm26
jmp 0x1c161eb
vmovaps %xmm3, 0xb0(%rsp)
vmovss %xmm4, 0x290(%rsp)
vmovss %xmm5, 0xa0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xa0(%rsp), %xmm5
vmovss 0x290(%rsp), %xmm4
vmovaps 0xb0(%rsp), %xmm3
vmovss 0x3c0(%rsp), %xmm7
vmovaps 0x40(%rsp), %xmm11
vmovaps %xmm0, %xmm26
vmovaps 0x320(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm14
vmulps %xmm14, %xmm11, %xmm0
vmovaps %xmm0, 0x280(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x2d64ed(%rip), %xmm7, %xmm31 # 0x1eec714
vmovaps %xmm0, 0xb0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3e0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0x290(%rsp)
vmulss 0x2d64b8(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0xa0(%rsp)
vmulss 0x2d64ab(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x450(%rsp)
vucomiss 0x2d57a2(%rip), %xmm0 # 0x1eeba24
jb 0x1c1628a
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c162dc
vmovss %xmm31, 0x24(%rsp)
vmovss %xmm26, 0x20(%rsp)
vmovaps %xmm14, 0x440(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x440(%rsp), %xmm14
vmovss 0x20(%rsp), %xmm26
vmovss 0x3c0(%rsp), %xmm7
vmovss 0x24(%rsp), %xmm31
vmovaps 0x320(%rsp), %xmm15
vmovaps 0x40(%rsp), %xmm11
vbroadcastss 0x30abde(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x260(%rsp), %ymm13
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x380(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm23
vmovaps 0xe0(%rsp), %ymm25
vmovaps 0x240(%rsp), %ymm29
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vmovss 0x2da664(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x540(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x2dac62(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x4a0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4c0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x500(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x470(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm11, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x460(%rsp), %xmm1
vmulss 0x360(%rsp), %xmm1, %xmm3
vmulss 0x2c(%rsp), %xmm10, %xmm1
vmovss 0x28(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x30aa84(%rip){1to4}, %xmm11, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm14, %xmm2
vmovaps 0x280(%rsp), %xmm14
vdpps $0x7f, %xmm14, %xmm3, %xmm4
vdivss %xmm26, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm31 # xmm31 = (xmm5 * xmm31) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm14, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0x290(%rsp), %xmm6
vmulss 0x450(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0xa0(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xb0(%rsp), %xmm26
vfnmadd231ss %xmm4, %xmm26, %xmm3 # xmm3 = -(xmm26 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm26, %xmm7 # xmm7 = -(xmm26 * xmm5) + xmm7
vpermilps $0xff, 0x560(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm11, %xmm11, %xmm0 # xmm0 = xmm11[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm26, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm26, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm8, %xmm8
vsubss %xmm3, %xmm10, %xmm10
vbroadcastss 0x30a9a4(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm26, %xmm3
vucomiss %xmm3, %xmm31
movb $0x1, %al
jbe 0x1c1658d
vaddss %xmm31, %xmm1, %xmm1
vmovaps 0x420(%rsp), %xmm3
vfmadd231ss 0x2db96c(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c1658d
vaddss 0x430(%rsp), %xmm10, %xmm10
vucomiss 0x3c(%rsp), %xmm10
vmovaps 0x300(%rsp), %ymm4
jb 0x1c16586
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm5
vucomiss %xmm10, %xmm5
jae 0x1c165ac
xorl %eax, %eax
xorl %r14d, %r14d
jmp 0x1c16596
vmovaps 0x300(%rsp), %ymm4
testb %al, %al
je 0x1c169d4
decq %r15
jne 0x1c1603a
jmp 0x1c169d1
xorl %eax, %eax
vucomiss 0x2d546e(%rip), %xmm8 # 0x1eeba24
jb 0x1c16588
vmovss 0x2d6154(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c16588
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3e0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2d6132(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2d612e(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x30(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2c8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c16586
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c16640
cmpq $0x0, 0x48(%r14)
jne 0x1c16640
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c16596
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm11, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm11
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm3 # xmm3 = xmm11[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x30(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm8, %zmm1
vbroadcastss 0x2fc061(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x30a829(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x8c0(%rsp)
vmovaps %zmm3, 0x900(%rsp)
vmovaps %zmm0, 0x940(%rsp)
vmovaps %zmm1, 0x980(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovaps 0x880(%rsp), %zmm0
vmovaps %zmm0, 0xa00(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa40(%rsp)
movq 0x2a0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xac0(%rsp)
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss %xmm10, 0x200(%rcx,%rax,4)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x5c0(%rsp)
leaq 0x5c0(%rsp), %rax
movq %rax, 0x2d0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2d8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x2e0(%rsp)
movq %rcx, 0x2e8(%rsp)
leaq 0x8c0(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movl $0x10, 0x2f8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm8, 0x80(%rsp)
vmovaps %xmm10, 0x60(%rsp)
vmovss %xmm5, 0x40(%rsp)
je 0x1c16887
leaq 0x2d0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm29
vmovaps 0xe0(%rsp), %ymm25
vmovaps 0x100(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x380(%rsp), %ymm20
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x300(%rsp), %ymm4
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm30
vbroadcastss 0x30a63d(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x5c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c169ad
movq 0x30(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c16975
testb $0x2, (%rcx)
jne 0x1c168c5
testb $0x40, 0x3e(%r14)
je 0x1c16975
leaq 0x2d0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm29
vmovaps 0xe0(%rsp), %ymm25
vmovaps 0x100(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x380(%rsp), %ymm20
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x300(%rsp), %ymm4
vmovaps 0x180(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm13
vmovaps 0x1a0(%rsp), %ymm12
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm30
vbroadcastss 0x30a54f(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x5c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x2e8(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x2d61e8(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c169b0
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c16639
movq 0x10(%rsp), %rax
movq 0x8(%rsp), %rcx
vmovss %xmm5, 0x200(%rcx,%rax,4)
jmp 0x1c16639
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x10(%rsp), %rdx
movq 0x8(%rsp), %rax
vmovaps 0x640(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c15f56
jmp 0x1c16a4c
vmovaps 0x300(%rsp), %ymm4
vmovaps %ymm17, %ymm24
vmovaps %ymm18, %ymm17
vmovaps %ymm22, %ymm18
vmovaps %ymm23, %ymm22
vmovaps %ymm20, %ymm23
vmovaps %ymm25, %ymm13
vmovaps %ymm19, %ymm25
vmovaps 0x3a0(%rsp), %ymm19
vmovaps 0x380(%rsp), %ymm20
vmovdqa 0x6a0(%rsp), %ymm1
vpcmpltd 0x620(%rsp), %ymm1, %k1
vmovaps 0x7c0(%rsp), %ymm0
vpcmpltd 0x720(%rsp), %ymm1, %k2
vmovaps 0x5a0(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq 0x8(%rsp), %rax
vbroadcastss 0x200(%rax,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xd0(%rsp), %ecx
andb %al, %cl
vmovaps 0x6e0(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x1c(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c16b43
movl %r13d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0xb20(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0xb40(%rsp,%rax)
vmovaps 0x410(%rsp), %xmm0
vmovlps %xmm0, 0xb60(%rsp,%rax)
movq 0x2b8(%rsp), %r15
leal 0x1(%r15), %ecx
movl %ecx, 0xb68(%rsp,%rax)
incl %r13d
movq 0x2b0(%rsp), %r9
vbroadcastss 0x2d5bf1(%rip), %ymm3 # 0x1eec714
movb 0x7(%rsp), %r10b
movq 0x2a8(%rsp), %r11
movq 0x2c0(%rsp), %r14
vmovaps 0x480(%rsp), %ymm2
jmp 0x1c16b7a
movq 0x2b0(%rsp), %r9
vbroadcastss 0x2d5bc0(%rip), %ymm3 # 0x1eec714
movb 0x7(%rsp), %r10b
movq 0x2a8(%rsp), %r11
movq 0x2c0(%rsp), %r14
vmovaps 0x480(%rsp), %ymm2
movq 0x2b8(%rsp), %r15
testl %r13d, %r13d
je 0x1c16cee
leal -0x1(%r13), %r8d
leaq (%r8,%r8,2), %rcx
shlq $0x5, %rcx
vmovaps 0xb40(%rsp,%rcx), %ymm0
movzbl 0xb20(%rsp,%rcx), %esi
vaddps 0x5a0(%rsp), %ymm0, %ymm1
movq 0x8(%rsp), %rax
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %eax
andl %esi, %eax
je 0x1c16c85
kmovd %eax, %k1
vbroadcastss 0x2d4e4d(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %al, %sil
je 0x1c16c0b
movzbl %sil, %edi
jmp 0x1c16c0e
movzbl %al, %edi
leaq (%rsp,%rcx), %rsi
addq $0xb20, %rsi # imm = 0xB20
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r15d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %eax, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
vmovaps 0x480(%rsp), %ymm2
je 0x1c16c52
movl %r13d, %r8d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x30a2b6(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x8c0(%rsp)
vmovsd 0x8c0(%rsp,%rcx,4), %xmm0
vmovaps %xmm0, 0x410(%rsp)
movl %r8d, %r13d
testb %al, %al
je 0x1c16b7a
vmovaps 0x410(%rsp), %xmm5
vmovaps 0x760(%rsp), %ymm27
vmovaps %ymm16, %ymm26
jmp 0x1c146fe
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2d5ec8(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2d4d5b(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c15044
testb $0x1, %bl
jne 0x1c16d1e
movq 0x8(%rsp), %rax
vmovaps 0x740(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
setne %r10b
jne 0x1c143da
andb $0x1, %r10b
movl %r10d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersector1<8>::intersect_t<embree::avx512::RibbonCurve1Intersector1<embree::BezierCurveT, 8>, embree::avx512::Intersect1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersector1<M>::intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,Epilog(ray,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x620, %rsp # imm = 0x620
movq %rcx, %r9
movzbl 0x1(%rcx), %r8d
leaq (%r8,%r8,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r9,%rax), %xmm0
movq %rdx, %r10
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps 0x10(%rsi), %xmm0, %xmm0
vpmovsxbd 0x6(%r9,%r8,4), %ymm1
vpmovsxbd 0x6(%r9,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%r8,%r8,2), %rdx
vpmovsxbd 0x6(%r9,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%r8,%rcx,2), %r11
vpmovsxbd 0x6(%r9,%r11), %ymm1
leal (,%rdx,4), %r11d
vpmovsxbd 0x6(%r9,%r11), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %r8, %r11
vpmovsxbd 0x6(%r9,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%r8,%r8,8), %r11
leal (%r11,%r11), %ebx
vpmovsxbd 0x6(%r9,%rbx), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %r8, %rbx
vpmovsxbd 0x6(%r9,%rbx), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r9,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2fb8f4(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x30a0c2(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x30a037(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2da149(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2d5837(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%r8,8), %rbx
subq %r8, %rbx
vpmovsxwd 0x6(%r9,%rbx), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r9,%r11), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%r8,%r8), %r11
addq %r8, %rcx
shlq $0x3, %rdx
subq %r8, %rdx
vpbroadcastd %r8d, %ymm7
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %r11, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r9,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc(%rsi){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x308f43(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x20(%rsi){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x308f1f(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x343921(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x5e0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c18e14
leaq (%r9,%rax), %r13
addq $0x6, %r13
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %r13
leaq 0x5102ac(%rip), %r8 # 0x21272e4
leaq 0x5126c5(%rip), %r11 # 0x2129704
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
vmovaps %ymm20, 0x200(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r9,%rax,4), %ecx
movl %ecx, 0x10(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r9), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x38(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%r13,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c170cc
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%r13,%rdx)
prefetcht0 0x40(%r13,%rdx)
testq %rcx, %rcx
je 0x1c170cc
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r13,%rcx)
prefetcht1 0x40(%r13,%rcx)
vmovups 0x10(%r13,%rax), %xmm13
vmovups 0x20(%r13,%rax), %xmm27
vmovups 0x30(%r13,%rax), %xmm23
movl 0x248(%rbx), %edx
vmovaps (%rsi), %xmm1
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%rdi), %xmm4
vmovaps 0x20(%rdi), %xmm5
vmovaps 0x30(%rdi), %xmm6
vmulps %xmm6, %xmm0, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %r12
vmovups (%r8,%r12), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2fb554(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r8,%r12), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r8,%r12), %ymm15
vbroadcastss %xmm10, %ymm30
vpermps %ymm10, %ymm1, %ymm29
vmovups 0xd8c(%r8,%r12), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm31
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm31, %ymm4
vfmadd231ps %ymm30, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm30) + ymm5
vfmadd231ps %ymm29, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm29) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
vmovups (%r11,%r12), %ymm2
vmovups 0x484(%r11,%r12), %ymm17
vmovups 0x908(%r11,%r12), %ymm18
vmovups 0xd8c(%r11,%r12), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm31, %ymm6
vfmadd231ps %ymm30, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm30) + ymm7
vfmadd231ps %ymm29, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm29) + ymm6
vmovaps %ymm21, 0x120(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x440(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x240(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x140(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm26
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm26, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x520(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x540(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x560(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x580(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x2a0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm26, 0x280(%rsp)
vfmadd231ps %ymm26, %ymm26, %ymm21 # ymm21 = (ymm26 * ymm26) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x40(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x309b34(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x190(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x180(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x170(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %edx, %xmm23, %xmm12
vmovaps %xmm12, 0x2c0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x309b23(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2d9bbd(%rip), %xmm3, %xmm27 # 0x1ef0fe4
vbroadcastss 0x309aac(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0xc(%rsi), %xmm8
vmovaps %ymm30, 0x5c0(%rsp)
vmovaps %ymm29, 0x5a0(%rsp)
vmovaps %ymm20, 0x500(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
vmovaps %ymm22, 0x4c0(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
je 0x1c17bb6
vmovaps %xmm8, 0x50(%rsp)
vmovaps %xmm27, 0x260(%rsp)
vmovaps %ymm3, %ymm27
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm27, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r8,%r12), %ymm3
vmovups 0x1694(%r8,%r12), %ymm10
vmovups 0x1b18(%r8,%r12), %ymm11
vmovups 0x1f9c(%r8,%r12), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm31, %ymm8
vmulps %ymm12, %ymm27, %ymm12
vfmadd231ps %ymm30, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm30) + ymm9
vfmadd231ps %ymm29, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm29) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x120(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x440(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x240(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x140(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%r11,%r12), %ymm10
vmovups 0x1b18(%r11,%r12), %ymm11
vmovups 0x1f9c(%r11,%r12), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x1c0(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm31, 0x1a0(%rsp)
vmulps %ymm13, %ymm31, %ymm14
vmulps %ymm13, %ymm27, %ymm13
vmovaps 0x260(%rsp), %xmm27
vfmadd231ps %ymm30, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm30) + ymm3
vfmadd231ps %ymm29, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm29) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%r11,%r12), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x3098d9(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm27, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x280(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x2a0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x30985f(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm23, %xmm23, %xmm23
vfmadd213ps %ymm23, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm23
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2d5092(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2d5070(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm23, %ymm9, %ymm13
vfmadd213ps %ymm23, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm23
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm23, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm13) + ymm3
vcmpleps %ymm23, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x200(%rsp), %ymm20
vmovaps 0x1e0(%rsp), %ymm21
vmovaps %ymm17, %ymm22
je 0x1c1899b
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d4e72(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x50(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x1a0(%rsp), %ymm31
je 0x1c189ce
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
je 0x1c189eb
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d4df3(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x420(%rsp)
movzbl %al, %r15d
vmovaps %ymm2, %ymm3
testw %r15w, %r15w
je 0x1c17bf3
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r15b
je 0x1c17bf3
vbroadcastss 0x2d904c(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x420(%rsp), %ymm1
vfmadd132ps 0x2d9665(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm3, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %edx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x190(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %r15b, 0x390(%rsp)
movl 0x24(%rsi), %eax
testl %eax, 0x34(%rbx)
je 0x1c17bf3
vaddps 0x30951c(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2d4ce8(%rip), %xmm1 # 0x1eec714
vdivss 0x2c0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
kmovd %r15d, %k1
vbroadcastss 0x2d3fb0(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c18a02
cmpq $0x0, 0x40(%rbx)
jne 0x1c18a02
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x2d4c32(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmovaps %ymm3, %ymm29
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d94f4(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d8ed7(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmovaps %ymm6, %ymm30
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d94e0(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d94cc(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x170(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x180(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps %ymm30, %ymm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vmovss 0x3e0(%rsp,%rcx,4), %xmm4
vmovss %xmm4, 0x20(%rsi)
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovaps %ymm29, %ymm3
vmovlps %xmm2, 0x30(%rsi)
vextractps $0x2, %xmm2, 0x38(%rsi)
vmovss %xmm0, 0x3c(%rsi)
vmovss %xmm1, 0x40(%rsi)
movl 0x10(%rsp), %eax
movl %eax, 0x44(%rsi)
movq 0x38(%rsp), %rax
movl %eax, 0x48(%rsi)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c(%rsi)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%rsi)
jmp 0x1c17bf3
vxorps %xmm23, %xmm23, %xmm23
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x200(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
cmpl $0x9, %edx
jge 0x1c17c1b
vmovaps 0x5e0(%rsp), %ymm0
vcmpleps 0x20(%rsi){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c1703f
jmp 0x1c18e14
vpbroadcastd %edx, %ymm0
vmovdqa %ymm0, 0x2a0(%rsp)
vbroadcastss %xmm27, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x260(%rsp)
vmovss 0x2d4ac5(%rip), %xmm0 # 0x1eec714
vdivss 0x2c0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
movl $0x8, %ebx
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vpbroadcastd %ebx, %ymm0
vpor 0x342c98(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x2a0(%rsp), %ymm0, %k1
leaq (%r12,%r8), %rcx
vmovups (%rcx,%rbx,4), %ymm3
vmovups 0x484(%rcx,%rbx,4), %ymm10
vmovups 0x908(%rcx,%rbx,4), %ymm11
vmovups 0xd8c(%rcx,%rbx,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm31, %ymm4
vmovaps 0x520(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x5c0(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm26) + ymm5
vmovaps 0x5a0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm19, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm19) + ymm5
vmovaps 0x440(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x560(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%r12,%r11), %rax
vmovups (%rax,%rbx,4), %ymm2
vmovups 0x484(%rax,%rbx,4), %ymm13
vmovaps 0x580(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%rbx,4), %ymm14
vmovups 0xd8c(%rax,%rbx,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm31, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm26, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm26) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm19, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm19) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm19, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm19
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c18512
vmovaps %ymm25, %ymm16
vmovaps %ymm27, %ymm25
vmovaps %ymm26, %ymm27
vmovaps 0x4a0(%rsp), %ymm26
vmulps %ymm15, %ymm26, %ymm15
vmovaps 0x4c0(%rsp), %ymm30
vfmadd213ps %ymm15, %ymm30, %ymm14 # ymm14 = (ymm30 * ymm14) + ymm15
vmovaps 0x4e0(%rsp), %ymm29
vfmadd213ps %ymm14, %ymm29, %ymm13 # ymm13 = (ymm29 * ymm13) + ymm14
vmovaps 0x500(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm26, %ymm12
vfmadd213ps %ymm12, %ymm30, %ymm11 # ymm11 = (ymm30 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm29, %ymm10 # ymm10 = (ymm29 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%rbx,4), %ymm12
vmovups 0x1694(%rcx,%rbx,4), %ymm13
vmovups 0x1b18(%rcx,%rbx,4), %ymm14
vmovups 0x1f9c(%rcx,%rbx,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm31, %ymm10
vmulps %ymm15, %ymm26, %ymm15
vfmadd231ps %ymm27, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm27) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm16, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm16) + ymm10
vfmadd231ps %ymm13, %ymm29, %ymm15 # ymm15 = (ymm29 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm19, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm19) + ymm10
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%rbx,4), %ymm13
vmovups 0x1b18(%rax,%rbx,4), %ymm14
vmovups 0x1f9c(%rax,%rbx,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm31, %ymm12
vmulps %ymm16, %ymm26, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vmovups 0x1694(%rax,%rbx,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x440(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm29, %ymm16 # ymm16 = (ymm29 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x308f83(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x280(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x308f0f(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm23, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm23
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2d4746(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2d4724(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm23, %ymm14, %ymm13
vfmadd213ps %ymm23, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm23
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm23, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm13) + ymm6
vcmpleps %ymm23, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
je 0x1c1853e
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d4539(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x260(%rsp), %ymm2, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c1853e
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
je 0x1c1856a
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d44c3(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x400(%rsp)
movzbl %al, %ecx
vmovaps %ymm2, %ymm21
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
testw %cx, %cx
je 0x1c1852d
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %cl
je 0x1c1852d
vbroadcastss 0x2d870b(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x400(%rsp), %ymm1
vfmadd132ps 0x2d8d24(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x400(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %ebx, 0x340(%rsp)
movl %edx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x190(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %cl, 0x390(%rsp)
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movl %ecx, %r15d
movq 0x38(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x24(%rsi), %eax
movq %rcx, 0x50(%rsp)
testl %eax, 0x34(%rcx)
je 0x1c1852d
vaddps 0x308bc5(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %ebx, %xmm22, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2c0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
kmovd %r15d, %k1
vbroadcastss 0x2d365c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movl %r15d, 0xe0(%rsp)
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r15d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c18571
movq 0x50(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c18571
vmovss 0x3a0(%rsp,%r15,4), %xmm0
vmovss 0x3c0(%rsp,%r15,4), %xmm1
vmovss 0x2d42ce(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d8b96(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d8579(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d8b88(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d8b74(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x170(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x180(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vmovss 0x3e0(%rsp,%r15,4), %xmm4
vmovss %xmm4, 0x20(%rsi)
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovlps %xmm2, 0x30(%rsi)
vextractps $0x2, %xmm2, 0x38(%rsi)
vmovss %xmm0, 0x3c(%rsi)
vmovss %xmm1, 0x40(%rsi)
movl 0x10(%rsp), %eax
movl %eax, 0x44(%rsi)
movq 0x38(%rsp), %rax
movl %eax, 0x48(%rsi)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c(%rsi)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%rsi)
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c1852d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps %ymm19, %ymm18
vmovaps %ymm24, %ymm19
addq $0x8, %rbx
cmpl %ebx, %edx
jg 0x1c17c7a
jmp 0x1c17bf8
xorl %ecx, %ecx
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
jmp 0x1c18288
xorl %ecx, %ecx
jmp 0x1c18279
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x490(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
vmovaps %ymm20, 0x200(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
movl 0xe0(%rsp), %ecx
movq %r9, 0x30(%rsp)
movq %r10, 0x28(%rsp)
movq %rsi, 0x20(%rsp)
movq %rdi, 0x18(%rsp)
movl %edx, 0xc(%rsp)
vmovaps %ymm28, 0x1c0(%rsp)
vmovaps %ymm31, 0x1a0(%rsp)
movl %ecx, 0xe0(%rsp)
vmovss 0x3a0(%rsp,%r15,4), %xmm0
vmovss 0x3c0(%rsp,%r15,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x220(%rsp)
vmovss 0x3e0(%rsp,%r15,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%r10), %rax
movq %rax, 0x230(%rsp)
vmovss 0x2d40e2(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d89aa(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d838d(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d899c(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d8988(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x470(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x490(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x10(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x14(%rsp) # imm = 0xFFFFFFFF
leaq 0x14(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x50(%rsp), %rax
movq 0x18(%rax), %rcx
movq %rcx, 0x98(%rsp)
movq 0x230(%rsp), %rax
movq %rax, 0xa0(%rsp)
movq %rsi, 0xa8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, 0xb8(%rsp)
movq 0x50(%rsp), %rax
movq 0x40(%rax), %rax
testq %rax, %rax
je 0x1c187e9
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x510f47(%rip), %r11 # 0x2129704
leaq 0x50eb20(%rip), %r8 # 0x21272e4
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c188ed
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1888e
testb $0x2, (%rcx)
jne 0x1c1880a
movq 0x50(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c18881
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x510e9e(%rip), %r11 # 0x2129704
leaq 0x50ea77(%rip), %r8 # 0x21272e4
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c188ed
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0xc(%rsp), %edx
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c188fb
vmovss 0x220(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
movzbl 0xe0(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%rsi){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %ecx
ktestb %k1, %k0
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c1898e
kmovd %ecx, %k1
vbroadcastss 0x2d30d5(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %cl, %al
movzbl %al, %r15d
movl %ecx, %eax
movzbl %al, %ecx
cmovnel %r15d, %ecx
tzcntl %ecx, %r15d
movl %eax, %ecx
testb %cl, %cl
jne 0x1c185e6
jmp 0x1c1852d
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x50(%rsp), %xmm8
jmp 0x1c17946
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
jmp 0x1c17946
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c17946
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x220(%rsp)
movq %r9, 0x30(%rsp)
movq %r10, 0x28(%rsp)
movq %rsi, 0x20(%rsp)
movq %rdi, 0x18(%rsp)
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
movl %edx, 0xc(%rsp)
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x280(%rsp)
movq %rcx, 0x2a0(%rsp)
vmovss 0x3e0(%rsp,%rcx,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%r10), %rax
vmovss 0x2d3c73(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d853b(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d7f1e(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d852d(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d8519(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x220(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x230(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xe0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x10(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x14(%rsp) # imm = 0xFFFFFFFF
leaq 0x14(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %rsi, 0xa8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, 0xb8(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
je 0x1c18c54
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x260(%rsp), %xmm27
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x510adc(%rip), %r11 # 0x2129704
leaq 0x50e6b5(%rip), %r8 # 0x21272e4
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c18d65
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c18d06
testb $0x2, (%rcx)
jne 0x1c18c74
testb $0x40, 0x3e(%rbx)
je 0x1c18cf9
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x260(%rsp), %xmm27
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x510a26(%rip), %r11 # 0x2129704
leaq 0x50e5ff(%rip), %r8 # 0x21272e4
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c18d65
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0xc(%rsp), %edx
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c18d73
vmovss 0x280(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
vmovaps 0x60(%rsp), %ymm3
movl $0x1, %eax
movq 0x2a0(%rsp), %rcx
shlxl %ecx, %eax, %eax
kmovd %eax, %k0
movzbl %r15b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%rsi){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r15d
ktestb %k1, %k0
vmovaps 0xc0(%rsp), %ymm6
je 0x1c18e06
kmovd %r15d, %k1
vbroadcastss 0x2d2c58(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
testb %r15b, %r15b
jne 0x1c18a5f
jmp 0x1c17bf3
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersector1<8>::occluded_t<embree::avx512::RibbonCurve1Intersector1<embree::BezierCurveT, 8>, embree::avx512::Occluded1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersector1<M>::intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x660, %rsp # imm = 0x660
movq %rcx, %r9
movq %rdi, 0x58(%rsp)
movzbl 0x1(%rcx), %ecx
leaq (%rcx,%rcx,4), %r10
leaq (%r10,%r10,4), %rax
vbroadcastss 0x12(%r9,%rax), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rax), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%r9,%rcx,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rcx,%rcx,2), %r11
vpmovsxbd 0x6(%r9,%r11,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rcx,%r10,2), %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%r11,4), %edi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rcx, %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rcx,%rcx,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm0
addq %rcx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %r10d
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x2f9803(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x307fcb(%rip), %ymm17 # 0x1f20edc
vpermps %ymm2, %ymm17, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm17, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x307f3f(%rip), %ymm8 # 0x1f20ec4
vandps %ymm4, %ymm8, %ymm5
vbroadcastss 0x2d8056(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm1, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm0, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2d3744(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r8
subq %rcx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rcx,%rcx), %rdi
addq %rcx, %r10
shlq $0x3, %r11
subq %rcx, %r11
movl %ecx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r10), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%r11), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%rsi){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x306e59(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%rsi){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x306e35(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %ecx, %ymm1
vpcmpgtd 0x341831(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x620(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r10b
je 0x1c1ab64
leaq (%r9,%rax), %r14
addq $0x6, %r14
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
addq $0x10, %r14
leaq 0x50e1b8(%rip), %r11 # 0x21272e4
movq 0x58(%rsp), %r8
vmovaps %ymm6, 0xa0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
vmovaps %ymm21, 0x200(%rsp)
vmovaps %ymm20, 0x220(%rsp)
tzcntq %r12, %rax
blsrq %r12, %r12
movl 0x6(%r9,%rax,4), %ecx
movl %ecx, 0x3c(%rsp)
shll $0x6, %eax
movq %r12, %rcx
movl 0x2(%r9), %ebx
movq (%rdx), %rdi
movq 0x1e8(%rdi), %rdi
movq %rbx, 0x78(%rsp)
movq (%rdi,%rbx,8), %r15
vmovups (%r14,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c191be
andq %r12, %rcx
tzcntq %r12, %rdi
shll $0x6, %edi
prefetcht0 (%r14,%rdi)
prefetcht0 0x40(%r14,%rdi)
testq %rcx, %rcx
je 0x1c191be
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r14,%rcx)
prefetcht1 0x40(%r14,%rcx)
vmovups 0x10(%r14,%rax), %xmm13
vmovups 0x20(%r14,%rax), %xmm30
vmovups 0x30(%r14,%rax), %xmm23
movl 0x248(%r15), %r13d
vmovaps (%rsi), %xmm1
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%r8), %xmm4
vmovaps 0x20(%r8), %xmm5
vmovaps 0x30(%r8), %xmm6
vmulps %xmm6, %xmm0, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm30, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r13d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
vmovups (%r11,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2f945e(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r11,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r11,%rbx), %ymm15
vbroadcastss %xmm10, %ymm26
vpermps %ymm10, %ymm1, %ymm29
vmovups 0xd8c(%r11,%rbx), %ymm16
vbroadcastss %xmm11, %ymm27
vpermps %ymm11, %ymm1, %ymm28
vmulps %ymm16, %ymm27, %ymm5
vmulps %ymm16, %ymm28, %ymm4
vfmadd231ps %ymm26, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm26) + ymm5
vfmadd231ps %ymm29, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm29) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x5103e0(%rip), %rdi # 0x2129704
vmovups (%rdi,%rbx), %ymm2
vmovups 0x484(%rdi,%rbx), %ymm17
vmovups 0x908(%rdi,%rbx), %ymm18
vmovups 0xd8c(%rdi,%rbx), %ymm19
vmulps %ymm19, %ymm27, %ymm7
vmulps %ymm19, %ymm28, %ymm6
vfmadd231ps %ymm26, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm26) + ymm7
vfmadd231ps %ymm29, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm29) + ymm6
vmovaps %ymm21, 0x160(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x4a0(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x260(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x240(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm30, %xmm30, %xmm0 # xmm0 = xmm30[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x520(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x540(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x560(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x580(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x120(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x1e0(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x40(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x307a38(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2b0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm30, %xmm3
vmovaps %xmm30, 0x2a0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x290(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r13d, %xmm23, %xmm12
vmovaps %xmm12, 0x60(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x307a2a(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2d7ac6(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x3079b5(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0xc(%rsi), %xmm9
vmovaps %ymm26, 0x600(%rsp)
vmovaps %ymm29, 0x5e0(%rsp)
vmovaps %ymm27, 0x5c0(%rsp)
vmovaps %ymm28, 0x5a0(%rsp)
vmovaps %ymm20, 0x500(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
vmovaps %ymm22, 0x4c0(%rsp)
je 0x1c19eca
vmovaps %xmm9, 0x1c0(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r11,%rbx), %ymm3
vmovups 0x1694(%r11,%rbx), %ymm10
vmovups 0x1b18(%r11,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%r11,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm27, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm28, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm26, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm26) + ymm9
vfmadd231ps %ymm29, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm29) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x160(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x4a0(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x260(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x240(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdi,%rbx), %ymm10
vmovups 0x1b18(%rdi,%rbx), %ymm11
vmovups 0x1f9c(%rdi,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmulps %ymm13, %ymm27, %ymm3
vmulps %ymm13, %ymm28, %ymm14
vmovaps %ymm15, 0x180(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm26, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm26) + ymm3
vfmadd231ps %ymm29, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm29) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdi,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x3077ef(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x1e0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x120(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x30776f(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm28, %xmm28, %xmm28
vfmadd213ps %ymm28, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm28
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2d2fa2(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2d2f80(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm28, %ymm9, %ymm13
vfmadd213ps %ymm28, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm28
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm28, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm13) + ymm3
vcmpleps %ymm28, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm28, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x200(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c1aad1
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm28, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm28) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d2d76(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c1ab02
vcmpneqps %ymm28, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
je 0x1c1ab2a
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d2cfb(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x440(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c19ec6
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r8){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c19ec6
vbroadcastss 0x2d6f5b(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x440(%rsp), %ymm1
vfmadd132ps 0x2d7574(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm2, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %r13d, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %al, 0x390(%rsp)
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r15)
je 0x1c19ec6
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c19b26
movb $0x1, %cl
cmpq $0x0, 0x48(%r15)
je 0x1c19f11
vmovaps %ymm6, 0xa0(%rsp)
vaddps 0x307409(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2d2bd5(%rip), %xmm1 # 0x1eec714
vdivss 0x60(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
vmovaps %ymm2, 0x3e0(%rsp)
movzbl %al, %eax
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
movq %rax, 0x1e0(%rsp)
tzcntq %rax, %rax
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x150(%rsp)
movb $0x1, %cl
vmovaps %xmm14, 0x2c0(%rsp)
movq %r9, 0x50(%rsp)
movq %rdx, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movb %r10b, 0x1b(%rsp)
movl %ecx, 0x120(%rsp)
vmovss 0x3a0(%rsp,%rax,4), %xmm0
vmovss 0x3c0(%rsp,%rax,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x20(%rsp)
movq %rax, 0x400(%rsp)
vmovss 0x3e0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%rdx), %rax
vmovss 0x2d2aee(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d73b6(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d6d99(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d73a8(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d7394(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x150(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x1a0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x1b0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x3c(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x1c(%rsp) # imm = 0xFFFFFFFF
leaq 0x1c(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x18(%r15), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %rsi, 0xd8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x1, 0xe8(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x1c19dd7
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x2c0(%rsp), %xmm14
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x220(%rsp), %ymm20
vxorps %xmm28, %xmm28, %xmm28
leaq 0x50f95e(%rip), %rdi # 0x2129704
leaq 0x50d537(%rip), %r11 # 0x21272e4
movb 0x1b(%rsp), %r10b
movq 0x58(%rsp), %r8
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x50(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c19e8c
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1ab43
testb $0x2, (%rcx)
jne 0x1c19df8
testb $0x40, 0x3e(%r15)
je 0x1c19e7b
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x2c0(%rsp), %xmm14
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x220(%rsp), %ymm20
vxorps %xmm28, %xmm28, %xmm28
leaq 0x50f8a9(%rip), %rdi # 0x2129704
leaq 0x50d482(%rip), %r11 # 0x21272e4
movb 0x1b(%rsp), %r10b
movq 0x58(%rsp), %r8
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x50(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x1c1ab43
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0x400(%rsp), %rax
movq 0x1e0(%rsp), %rcx
btcq %rax, %rcx
movq %rcx, 0x1e0(%rsp)
tzcntq %rcx, %rax
setae %cl
jae 0x1c19be0
jmp 0x1c1ab4a
xorl %ecx, %ecx
jmp 0x1c19f11
xorl %ecx, %ecx
vxorps %xmm28, %xmm28, %xmm28
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps %xmm12, %xmm14
cmpl $0x9, %r13d
jge 0x1c19f47
testb $0x1, %cl
jne 0x1c1ab64
vmovaps 0x620(%rsp), %ymm0
vcmpleps 0x20(%rsi){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r12d
setne %r10b
jne 0x1c19131
jmp 0x1c1ab64
vmovaps %ymm8, 0x180(%rsp)
vpbroadcastd %r13d, %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovss 0x2d2791(%rip), %xmm0 # 0x1eec714
vdivss 0x60(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
movl $0x8, %r15d
vmovaps %ymm6, 0xa0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
movl %ecx, 0x120(%rsp)
vpbroadcastd %r15d, %ymm0
vpor 0x34095c(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x1e0(%rsp), %ymm0, %k1
leaq (%rbx,%r11), %rcx
vmovups (%rcx,%r15,4), %ymm3
vmovups 0x484(%rcx,%r15,4), %ymm10
vmovups 0x908(%rcx,%r15,4), %ymm11
vmovups 0xd8c(%rcx,%r15,4), %ymm12
vmovaps 0x5c0(%rsp), %ymm19
vmulps %ymm12, %ymm19, %ymm5
vmovaps 0x5a0(%rsp), %ymm29
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x520(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x600(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm26) + ymm5
vmovaps 0x5e0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x4a0(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x560(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdi), %rax
vmovups (%rax,%r15,4), %ymm2
vmovups 0x484(%rax,%r15,4), %ymm13
vmovaps 0x580(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r15,4), %ymm14
vmovups 0xd8c(%rax,%r15,4), %ymm15
vmulps %ymm15, %ymm19, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm26, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm26) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c1a9bd
vmovaps %ymm23, %ymm16
vmovaps 0x180(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x4c0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x4e0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x500(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r15,4), %ymm12
vmovups 0x1694(%rcx,%r15,4), %ymm13
vmovups 0x1b18(%rcx,%r15,4), %ymm14
vmovups 0x1f9c(%rcx,%r15,4), %ymm15
vmulps %ymm15, %ymm19, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm26, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm26) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm19, %ymm17
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r15,4), %ymm13
vmovups 0x1b18(%rax,%r15,4), %ymm14
vmovups 0x1f9c(%rax,%r15,4), %ymm16
vmulps %ymm16, %ymm17, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm27, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm27) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r15,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x306c2c(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x1c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x306bb8(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm28, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm28
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2d23ef(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2d23cd(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm28, %ymm14, %ymm13
vfmadd213ps %ymm28, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm28
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm28, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm28, %ymm6 # ymm6 = (ymm28 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm28, %ymm6 # ymm6 = (ymm28 * ymm13) + ymm6
vcmpleps %ymm28, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm28, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c1aa09
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm28, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm28) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d21dc(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x400(%rsp), %ymm2, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl 0x120(%rsp), %ecx
je 0x1c1aa37
vcmpneqps %ymm28, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
je 0x1c1aa5e
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d2167(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x420(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
testw %ax, %ax
je 0x1c1a9f7
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r8){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c1a9db
vbroadcastss 0x2d63ac(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x420(%rsp), %ymm1
vfmadd132ps 0x2d69c5(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %r15d, 0x340(%rsp)
movl %r13d, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movl %eax, 0x20(%rsp)
movb %al, 0x390(%rsp)
movq (%rdx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x78(%rsp), %rax
movq (%rcx,%rax,8), %rax
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rax)
je 0x1c1a9e4
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c1a6ea
movb $0x1, %cl
movl %ecx, 0x60(%rsp)
cmpq $0x0, 0x48(%rax)
je 0x1c1a9ec
movq %rax, 0x150(%rsp)
movb %r10b, 0x1b(%rsp)
movq %r9, 0x50(%rsp)
vmovaps %ymm20, 0x220(%rsp)
vaddps 0x306832(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r15d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2c0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x200(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
movzbl 0x20(%rsp), %r8d
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x490(%rsp)
tzcntq %r8, %rcx
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
movb $0x1, %al
movq %rdx, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movl %eax, 0x60(%rsp)
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x1a0(%rsp)
movq %rcx, 0x1b0(%rsp)
vmovss 0x3e0(%rsp,%rcx,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%rdx), %rax
vmovss 0x2d1f30(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d67f8(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d61db(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d67ea(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d67d6(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x470(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x490(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x3c(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x1c(%rsp) # imm = 0xFFFFFFFF
leaq 0x1c(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x150(%rsp), %rdi
movq 0x18(%rdi), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %rsi, 0xd8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x1, 0xe8(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
movq %r8, 0x20(%rsp)
je 0x1c1a939
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x20(%rsp), %r8
vmovaps 0x40(%rsp), %xmm7
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c1a990
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1aa65
testb $0x2, (%rcx)
jne 0x1c1a95d
movq 0x150(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1a96a
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
movq 0x30(%rsp), %rdx
movq 0x28(%rsp), %rsi
vmovaps 0x40(%rsp), %xmm7
movq 0x20(%rsp), %r8
jne 0x1c1aa65
vmovss 0x1a0(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0x1b0(%rsp), %rax
btcq %rax, %r8
tzcntq %r8, %rcx
setae %al
jae 0x1c1a79e
jmp 0x1c1aa69
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
movl 0x120(%rsp), %ecx
jmp 0x1c1a9f7
movl $0x0, 0x60(%rsp)
movl 0x120(%rsp), %ecx
orb 0x60(%rsp), %cl
addq $0x8, %r15
cmpl %r15d, %r13d
jg 0x1c19faf
jmp 0x1c19f17
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
movl 0x120(%rsp), %ecx
jmp 0x1c1a5e7
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
jmp 0x1c1a5e7
xorl %eax, %eax
jmp 0x1c1a5d5
movl 0x60(%rsp), %eax
andb $0x1, %al
movl %eax, 0x60(%rsp)
movq 0x50(%rsp), %r9
movq 0x58(%rsp), %r8
movb 0x1b(%rsp), %r10b
leaq 0x50c85f(%rip), %r11 # 0x21272e4
leaq 0x50ec78(%rip), %rdi # 0x2129704
vxorps %xmm28, %xmm28, %xmm28
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
jmp 0x1c1a9ec
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %xmm9
jmp 0x1c19a39
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
jmp 0x1c19a39
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
jmp 0x1c19a39
movl 0x120(%rsp), %ecx
andb $0x1, %cl
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
jmp 0x1c19f11
andb $0x1, %r10b
movl %r10d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 4>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 4, 8>, embree::avx512::Intersect1KEpilogMU<8, 4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x6c0, %rsp # imm = 0x6C0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0xa0(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x40(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x10(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2f7a7c(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x30624a(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x3061bf(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2d62d1(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2d19bf(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x3050cb(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x80(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x3050a6(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x33faa8(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x680(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c1cf1c
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0xa0(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x10, %rdi
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x2f0(%rsp)
leaq 0x50c3fe(%rip), %r13 # 0x21272e4
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x4(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x28(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c1af69
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c1af69
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm27
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x260(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x10(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2f769c(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm30
vpermps %ymm10, %ymm1, %ymm26
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm30, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm30) + ymm5
vfmadd231ps %ymm26, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm26) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x50e61e(%rip), %rdx # 0x2129704
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm30, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm30) + ymm7
vfmadd231ps %ymm26, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm26) + ymm6
vmovaps %ymm21, 0xe0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x500(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x100(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x5e0(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x600(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x620(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x280(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x2c0(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x30(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x305c76(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x150(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x140(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x130(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x120(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x305c65(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2d5cff(%rip), %xmm3, %xmm27 # 0x1ef0fe4
vbroadcastss 0x305bee(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0x30(%r12,%r15,4), %xmm8
vmovaps %ymm30, 0x660(%rsp)
vmovaps %ymm26, 0x640(%rsp)
vmovaps %ymm20, 0x5a0(%rsp)
vmovaps %ymm21, 0x580(%rsp)
vmovaps %ymm22, 0x560(%rsp)
vmovaps %ymm3, 0x540(%rsp)
je 0x1c1bae0
vmovaps %xmm8, 0x240(%rsp)
vmovaps %xmm27, 0x80(%rsp)
vmovaps %ymm3, %ymm27
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm27, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm27, %ymm12
vfmadd231ps %ymm30, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm30) + ymm9
vfmadd231ps %ymm26, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm26) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0xe0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x500(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x100(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x180(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x160(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm27, %ymm13
vmovaps 0x80(%rsp), %xmm27
vfmadd231ps %ymm30, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm30) + ymm3
vfmadd231ps %ymm26, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm26) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x305a1a(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm27, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x280(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x3059a0(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm23, %xmm23, %xmm23
vfmadd213ps %ymm23, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm23
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2d11d3(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2d11b1(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm23, %ymm9, %ymm13
vfmadd213ps %ymm23, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm23
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm23, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm13) + ymm3
vcmpleps %ymm23, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x1a0(%rsp), %ymm21
vmovaps %ymm17, %ymm22
je 0x1c1c9ee
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d0fb3(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x240(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
je 0x1c1ca21
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
je 0x1c1ca3b
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d0f30(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4e0(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c1bad7
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xa0(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c1bad7
vbroadcastss 0x2d5180(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4e0(%rsp), %ymm1
vfmadd132ps 0x2d5799(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm1, 0x4e0(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl $0x0, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %r13b, 0x3b0(%rsp)
movl 0x90(%r12,%r15,4), %eax
movq 0x260(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c1bad7
movl %ecx, (%rsp)
vaddps 0x30563f(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2d0e0b(%rip), %xmm1 # 0x1eec714
vdivss 0x120(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2d00d3(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
jne 0x1c1ca4f
movq 0x260(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c1ca4f
vmovss 0x3c0(%rsp,%r11,4), %xmm0
vmovss 0x3e0(%rsp,%r11,4), %xmm1
vmovss 0x2d0d3e(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d5606(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d4fe9(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d55f8(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d55e4(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x130(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps 0x60(%rsp), %ymm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x150(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x400(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x80(%r12,%r15,4)
vmovaps 0x40(%rsp), %ymm3
vmovss %xmm2, 0xc0(%r12,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%r12,%r15,4)
vmovss %xmm0, 0xf0(%r12,%r15,4)
vmovss %xmm1, 0x100(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x110(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x120(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%r15,4)
movl (%rsp), %ecx
leaq 0x50b806(%rip), %r13 # 0x21272e4
jmp 0x1c1bb1a
vxorps %xmm23, %xmm23, %xmm23
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm19
cmpl $0x9, %ecx
jge 0x1c1bb43
vmovaps 0x680(%rsp), %ymm0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c1aee6
jmp 0x1c1cf1c
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x280(%rsp)
vbroadcastss %xmm27, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x260(%rsp)
vmovss 0x2d0b9d(%rip), %xmm0 # 0x1eec714
vdivss 0x120(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x240(%rsp)
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x120(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x230(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
movl %ecx, (%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x33ed48(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x280(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x5c0(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x660(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm26) + ymm5
vmovaps 0x640(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x5e0(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm19, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm19) + ymm5
vmovaps 0x500(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x600(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x620(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm26, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm26) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm19, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm19) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm19, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm19
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c1c4c3
vmovaps %ymm25, %ymm16
vmovaps %ymm27, %ymm25
vmovaps %ymm26, %ymm27
vmovaps 0x540(%rsp), %ymm26
vmulps %ymm15, %ymm26, %ymm15
vmovaps 0x560(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x580(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x5a0(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm26, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm26, %ymm15
vfmadd231ps %ymm27, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm27) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm16, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm16) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm19, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm19) + ymm10
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm26, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x500(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x305024(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x304fb0(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm23, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm23
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2d07e7(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2d07c5(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm23, %ymm14, %ymm13
vfmadd213ps %ymm23, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm23
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm23, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm13) + ymm6
vcmpleps %ymm23, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
je 0x1c1c4f0
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2d05da(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x260(%rsp), %ymm2, %k1
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl (%rsp), %ecx
je 0x1c1c50a
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x100(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm19
je 0x1c1c534
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2d0560(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4c0(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c1c4de
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xa0(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c1c4de
movl %r11d, %eax
vbroadcastss 0x2d479c(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4c0(%rsp), %ymm1
vfmadd132ps 0x2d4db5(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x300(%rsp)
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm21, 0x340(%rsp)
movl %r9d, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %al, 0x3b0(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x28(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x90(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c1c4b7
vaddps 0x304c55(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm22, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x240(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm21, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2cf6ec(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0x80(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0x80(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c1c53c
cmpq $0x0, 0x40(%rcx)
jne 0x1c1c53c
vmovss 0x3c0(%rsp,%r13,4), %xmm0
vmovss 0x3e0(%rsp,%r13,4), %xmm1
vmovss 0x2d035b(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d4c23(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d4606(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d4c15(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d4c01(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x130(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x150(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x400(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x80(%r12,%r15,4)
vmovss %xmm2, 0xc0(%r12,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%r12,%r15,4)
vmovss %xmm0, 0xf0(%r12,%r15,4)
vmovss %xmm1, 0x100(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x110(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x120(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
leaq 0x50ae26(%rip), %r13 # 0x21272e4
movl (%rsp), %ecx
jmp 0x1c1c4de
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
movl (%rsp), %ecx
vmovaps %ymm19, %ymm18
vmovaps %ymm24, %ymm19
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c1bbca
jmp 0x1c1bb1f
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
movl (%rsp), %ecx
jmp 0x1c1c51f
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x100(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm19
jmp 0x1c1c1e9
xorl %r11d, %r11d
jmp 0x1c1c1dd
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x530(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
movq %r9, 0xd8(%rsp)
movl %r11d, %eax
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovaps %ymm28, 0x180(%rsp)
vmovaps %ymm29, 0x160(%rsp)
movl %eax, 0x220(%rsp)
vmovss 0x80(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x210(%rsp)
vmovss 0x400(%rsp,%r13,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r13,4), %xmm1
vbroadcastss 0x3e0(%rsp,%r13,4), %xmm2
vmovss %xmm0, 0x80(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2d0115(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d49dd(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d43c0(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d49cf(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d49bb(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x530(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x200(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x430(%rsp)
vmovaps %xmm4, 0x440(%rsp)
vmovaps %xmm0, 0x450(%rsp)
vmovaps %xmm1, 0x460(%rsp)
vmovaps %xmm2, 0x470(%rsp)
vmovaps 0x230(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovdqa 0x120(%rsp), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
movq %rcx, %r11
leaq 0x4a0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xa8(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xb0(%rsp)
movq %rax, 0xb8(%rsp)
movq %r12, 0xc0(%rsp)
leaq 0x430(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl $0x4, 0xd0(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c1c7d9
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd8(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x50cf3f(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c1c93f
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1c8a6
testb $0x2, (%rcx)
jne 0x1c1c826
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1c8a6
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd8(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm23, %xmm23, %xmm23
leaq 0x50ce72(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c1c93f
movq 0xc0(%rsp), %rax
movq 0xc8(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c1c952
vmovd 0x210(%rsp), %xmm0
vmovd %xmm0, 0x80(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x220(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c1c9d9
kmovd %eax, %k1
vbroadcastss 0x2cf08c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0x80(%rsp), %rcx
jne 0x1c1c5b1
jmp 0x1c1c4b7
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x240(%rsp), %xmm8
jmp 0x1c1b809
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
jmp 0x1c1b809
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
jmp 0x1c1b809
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x230(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x220(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovss 0x80(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x400(%rsp,%r11,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r11,4), %xmm1
vbroadcastss 0x3e0(%rsp,%r11,4), %xmm2
vmovss %xmm0, 0x80(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2cfc0d(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d44d5(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d3eb8(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d44c7(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d44b3(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x200(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x430(%rsp)
vmovaps %xmm4, 0x440(%rsp)
vmovaps %xmm0, 0x450(%rsp)
vmovaps %xmm1, 0x460(%rsp)
vmovaps %xmm2, 0x470(%rsp)
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovdqa 0x230(%rsp), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x4a0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xa8(%rsp)
movq 0x260(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xb0(%rsp)
movq %rax, 0xb8(%rsp)
movq %r12, 0xc0(%rsp)
leaq 0x430(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl $0x4, 0xd0(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x280(%rsp)
je 0x1c1cd03
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x280(%rsp), %r11
vmovaps 0x240(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm27
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x50ca15(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c1ce7a
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1cde1
testb $0x2, (%rcx)
jne 0x1c1cd50
movq 0x260(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1cde1
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x280(%rsp), %r11
vmovaps 0x240(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm27
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm19
vmovaps 0x100(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm23, %xmm23, %xmm23
leaq 0x50c937(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c1ce7a
movq 0xc0(%rsp), %rax
movq 0xc8(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c1ce8d
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x80(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c1cf0b
kmovd %r13d, %k1
vbroadcastss 0x2ceb54(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl (%rsp), %ecx
jne 0x1c1cac0
jmp 0x1c1bad7
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 4>::occluded_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 4, 8>, embree::avx512::Occluded1KEpilogMU<8, 4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x720, %rsp # imm = 0x720
movq %r8, %r9
movq %rdx, %r14
movq %rsi, %r15
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rsi
leaq (%rsi,%rsi,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%r15,%r14,4), %xmm1
vmovss 0x40(%r15,%r14,4), %xmm2
vinsertps $0x10, 0x10(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x8(%rsp)
vinsertps $0x20, 0x60(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rdx,%rdx,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rdx,%rsi,2), %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %r10
leal (%r10,%r10), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2f56ce(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x303e97(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x303e06(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm4, %ymm5
vbroadcastss 0x2d3f1d(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2cf60b(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r8
subq %rdx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%r10), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rdx,%rdx), %r10
addq %rdx, %rsi
shlq $0x3, %rcx
subq %rdx, %rcx
movl %edx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rsi), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x30(%r15,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x302d1b(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x80(%r15,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x302cf6(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %edx, %ymm1
vpcmpgtd 0x33d6f2(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x6e0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %sil
je 0x1c1ef54
leaq (%r9,%rax), %r12
addq $0x6, %r12
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
addq $0x10, %r12
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r10
addq $0x10, %r10
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x300(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
vmovaps %ymm21, 0x280(%rsp)
vmovaps %ymm20, 0x2a0(%rsp)
tzcntq %r13, %rax
blsrq %r13, %r13
movl 0x6(%r9,%rax,4), %r11d
shll $0x6, %eax
movq %r13, %rcx
movl 0x2(%r9), %r8d
movq 0x8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq %r8, 0xa8(%rsp)
movq (%rdx,%r8,8), %r8
vmovups (%r12,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c1d31c
andq %r13, %rcx
tzcntq %r13, %rdx
shll $0x6, %edx
prefetcht0 (%r12,%rdx)
prefetcht0 0x40(%r12,%rdx)
testq %rcx, %rcx
je 0x1c1d31c
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r12,%rcx)
prefetcht1 0x40(%r12,%rcx)
vmovups 0x10(%r12,%rax), %xmm13
vmovups 0x20(%r12,%rax), %xmm27
vmovups 0x30(%r12,%rax), %xmm23
movq %r8, 0x260(%rsp)
movl 0x248(%r8), %r8d
vmovss (%r15,%r14,4), %xmm0
vinsertps $0x1c, 0x10(%r15,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r15,%r14,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r10), %xmm4
vmovaps 0x10(%r10), %xmm5
vmovaps 0x20(%r10), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r8d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
leaq 0x509ed5(%rip), %rdx # 0x21272e4
vmovups (%rdx,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2f52e0(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%rdx,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%rdx,%rbx), %ymm15
vbroadcastss %xmm10, %ymm29
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%rdx,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm26
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm26, %ymm4
vfmadd231ps %ymm29, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm29) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x50c264(%rip), %rcx # 0x2129704
vmovups (%rcx,%rbx), %ymm2
vmovups 0x484(%rcx,%rbx), %ymm17
vmovups 0x908(%rcx,%rbx), %ymm18
vmovups 0xd8c(%rcx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm26, %ymm6
vfmadd231ps %ymm29, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm29) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x180(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x580(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2e0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x2c0(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x620(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x640(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x660(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x680(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0xe0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x160(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x10(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x3038bc(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x330(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x320(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x310(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r8d, %xmm23, %xmm12
vmovaps %xmm12, 0x150(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x3038ab(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2d3947(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x303836(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0x30(%r15,%r14,4), %xmm9
vmovaps %ymm29, 0x6c0(%rsp)
vmovaps %ymm30, 0x6a0(%rsp)
vmovaps %ymm20, 0x600(%rsp)
vmovaps %ymm21, 0x5e0(%rsp)
vmovaps %ymm22, 0x5c0(%rsp)
je 0x1c1e1a2
vmovaps %xmm9, 0x200(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%rdx,%rbx), %ymm3
vmovups 0x1694(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%rdx,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm26, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm29, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm29) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x180(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x580(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2e0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x2c0(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rcx,%rbx), %ymm10
vmovups 0x1b18(%rcx,%rbx), %ymm11
vmovups 0x1f9c(%rcx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x240(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm26, 0x220(%rsp)
vmulps %ymm13, %ymm26, %ymm14
vmovaps %ymm15, 0x1a0(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm29, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm29) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rcx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x303672(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x160(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0xe0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x3035f2(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm24, %xmm24, %xmm24
vfmadd213ps %ymm24, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm24
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2cee25(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2cee03(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm24, %ymm9, %ymm13
vfmadd213ps %ymm24, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm24
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm24, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm13) + ymm3
vcmpleps %ymm24, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x2a0(%rsp), %ymm20
vmovaps 0x280(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c1eed3
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2cebf9(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x200(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x240(%rsp), %ymm28
vmovaps 0x220(%rsp), %ymm29
je 0x1c1ef0e
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm8
je 0x1c1ef30
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2ceb6d(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x560(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c1e1eb
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c1e1ef
movl %r11d, 0xe0(%rsp)
vbroadcastss 0x2d2dc4(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x560(%rsp), %ymm1
vfmadd132ps 0x2d33dd(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x420(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm2, 0x460(%rsp)
movl $0x0, 0x480(%rsp)
movl %r8d, 0x484(%rsp)
vmovaps %xmm7, 0x490(%rsp)
vmovaps 0x330(%rsp), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vmovaps 0x320(%rsp), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovdqa 0x310(%rsp), %xmm0
vmovdqa %xmm0, 0x4c0(%rsp)
movb %al, 0x4d0(%rsp)
movl 0x90(%r15,%r14,4), %ecx
movq 0x260(%rsp), %r11
testl %ecx, 0x34(%r11)
je 0x1c1e1fa
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c1dcd8
movb $0x1, %r11b
movq 0x260(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
je 0x1c1e1fd
vaddps 0x303260(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2cea2c(%rip), %xmm1 # 0x1eec714
vdivss 0x150(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x500(%rsp)
vmovaps %ymm2, 0x520(%rsp)
movzbl %al, %ecx
tzcntq %rcx, %r11
movq 0xa8(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x140(%rsp)
vmovaps 0x4a0(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
movl 0xe0(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x120(%rsp)
movq %r11, %rax
vmovaps 0x4b0(%rsp), %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps 0x4c0(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
movb $0x1, %r11b
vmovaps %xmm14, 0x100(%rsp)
movq %r9, 0x38(%rsp)
movq %rdi, 0x30(%rsp)
movb %sil, 0x3(%rsp)
movq %r10, 0x28(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movq %rcx, 0x360(%rsp)
movl %r11d, 0x160(%rsp)
vmovss 0x80(%r15,%r14,4), %xmm10
vmovss 0x520(%rsp,%rax,4), %xmm0
vbroadcastss 0x4e0(%rsp,%rax,4), %xmm1
movq %rax, 0x1e0(%rsp)
vbroadcastss 0x500(%rsp,%rax,4), %xmm2
vmovss %xmm0, 0x80(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2ce8fd(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d31c5(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d2ba8(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d31b7(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d31a3(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1d0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x110(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x130(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x390(%rsp)
vmovaps %xmm4, 0x3a0(%rsp)
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps %xmm1, 0x3c0(%rsp)
vmovaps %xmm2, 0x3d0(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
vmovdqa 0x140(%rsp), %xmm0
vmovdqa %xmm0, 0x3f0(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x400(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x400(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x410(%rsp)
vmovaps 0x300(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xb0(%rsp)
movq 0x260(%rsp), %r11
movq 0x18(%r11), %rcx
movq %rcx, 0xb8(%rsp)
movq %rax, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%r11), %rax
testq %rax, %rax
movq %rdi, %r11
vmovss %xmm10, 0x340(%rsp)
je 0x1c1e027
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x100(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x220(%rsp), %ymm29
vmovaps 0x240(%rsp), %ymm28
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x2c0(%rsp), %ymm18
vmovaps 0x2e0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x280(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm20
vxorps %xmm24, %xmm24, %xmm24
leaq 0x5092d1(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c1e161
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
je 0x1c1e11a
testb $0x2, (%rcx)
jne 0x1c1e079
movq 0x260(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1e11a
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x100(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x220(%rsp), %ymm29
vmovaps 0x240(%rsp), %ymm28
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x2c0(%rsp), %ymm18
vmovaps 0x2e0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x280(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x2a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm24, %xmm24, %xmm24
leaq 0x5091de(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
movq %r11, %rdi
vmovdqa 0x90(%rsp), %xmm0
movq 0xc8(%rsp), %rax
vmovaps 0x80(%rax), %xmm1
vptestmd %xmm0, %xmm0, %k1
vbroadcastss 0x2cea3e(%rip), %xmm1 {%k1} # 0x1eecb84
vmovaps %xmm1, 0x80(%rax)
kortestb %k1, %k1
movq 0x360(%rsp), %rcx
je 0x1c1e178
jmp 0x1c1ef43
movq %r11, %rdi
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
movq 0x360(%rsp), %rcx
vmovss %xmm10, 0x80(%r15,%r14,4)
movq 0x1e0(%rsp), %rax
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %r11b
jae 0x1c1ddbd
jmp 0x1c1ef4b
xorl %eax, %eax
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x2a0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x280(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps %ymm26, %ymm29
vmovaps %xmm12, %xmm14
jmp 0x1c1e20f
xorl %eax, %eax
jmp 0x1c1e20f
xorl %eax, %eax
leaq 0x50b50c(%rip), %rcx # 0x2129704
jmp 0x1c1e20f
xorl %r11d, %r11d
leaq 0x50b500(%rip), %rcx # 0x2129704
movl %r11d, %eax
movl 0xe0(%rsp), %r11d
cmpl $0x9, %r8d
jge 0x1c1e245
testb $0x1, %al
jne 0x1c1ef54
vmovaps 0x6e0(%rsp), %ymm0
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne %sil
jne 0x1c1d291
jmp 0x1c1ef54
vmovaps %ymm8, 0x1a0(%rsp)
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovss 0x2ce493(%rip), %xmm0 # 0x1eec714
vdivss 0x150(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
movl %r11d, 0xe0(%rsp)
movl %eax, %r11d
movq 0xa8(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x150(%rsp)
movl %r11d, %eax
movl 0xe0(%rsp), %r11d
vpbroadcastd %r11d, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
movl $0x8, %r11d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movl %eax, 0x160(%rsp)
vpbroadcastd %r11d, %ymm0
vpor 0x33c620(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
movq %rcx, %rax
leaq (%rbx,%rdx), %rcx
vmovups (%rcx,%r11,4), %ymm3
vmovups 0x484(%rcx,%r11,4), %ymm10
vmovups 0x908(%rcx,%r11,4), %ymm11
vmovups 0xd8c(%rcx,%r11,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x620(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x6c0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x6a0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x640(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x580(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x660(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rax), %rax
vmovups (%rax,%r11,4), %ymm2
vmovups 0x484(%rax,%r11,4), %ymm13
vmovaps 0x680(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r11,4), %ymm14
vmovups 0xd8c(%rax,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm26
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c1ee44
vmovaps %ymm23, %ymm16
vmovaps 0x1a0(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x5c0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x5e0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x600(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r11,4), %ymm12
vmovups 0x1694(%rcx,%r11,4), %ymm13
vmovups 0x1b18(%rcx,%r11,4), %ymm14
vmovups 0x1f9c(%rcx,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm26, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm26) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm27, %ymm25
vmovaps %ymm19, %ymm27
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r11,4), %ymm13
vmovups 0x1b18(%rax,%r11,4), %ymm14
vmovups 0x1f9c(%rax,%r11,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r11,4), %ymm14
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps 0x580(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x3028f3(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x200(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x30287f(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm24, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm24
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2ce0b6(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2ce094(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm24, %ymm14, %ymm13
vfmadd213ps %ymm24, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm24
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm24, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm13) + ymm6
vcmpleps %ymm24, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c1eea0
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2cdea3(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x360(%rsp), %ymm2, %k1
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
leaq 0x50ae5b(%rip), %rcx # 0x2129704
je 0x1c1eeab
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm17
je 0x1c1eecc
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2cde2d(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x540(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
testw %ax, %ax
je 0x1c1ee6c
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c1ee65
vbroadcastss 0x2d2077(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x540(%rsp), %ymm1
vfmadd132ps 0x2d2690(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x420(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm21, 0x460(%rsp)
movl %r11d, 0x480(%rsp)
movl %r8d, 0x484(%rsp)
vmovaps %xmm7, 0x490(%rsp)
vmovaps 0x330(%rsp), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vmovaps 0x320(%rsp), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovdqa 0x310(%rsp), %xmm0
vmovdqa %xmm0, 0x4c0(%rsp)
movb %al, 0x4d0(%rsp)
movq 0x8(%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0xa8(%rsp), %r8
movq (%rcx,%r8,8), %r8
movl 0x90(%r15,%r14,4), %ecx
movq %r8, 0xe0(%rsp)
testl %ecx, 0x34(%r8)
je 0x1c1ee75
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
leaq 0x50acda(%rip), %rcx # 0x2129704
jne 0x1c1ea42
movq 0xe0(%rsp), %r8
cmpq $0x0, 0x48(%r8)
movb $0x1, %r8b
je 0x1c1ee7f
vmovaps %ymm29, 0x220(%rsp)
vmovaps %ymm28, 0x240(%rsp)
movq %r10, 0x28(%rsp)
movb %sil, 0x3(%rsp)
movq %rdi, 0x30(%rsp)
movq %r9, 0x38(%rsp)
vmovaps %ymm20, 0x2a0(%rsp)
vaddps 0x3024c8(%rip), %ymm20, %ymm0 # 0x1f20f40
movq %r11, 0x1f8(%rsp)
vcvtsi2ss %r11d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x340(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps %ymm0, 0x500(%rsp)
vmovaps %ymm21, 0x280(%rsp)
vmovaps %ymm21, 0x520(%rsp)
movzbl %al, %edi
vmovaps 0x4a0(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
tzcntq %rdi, %r8
vmovaps 0x4b0(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x4c0(%rsp), %xmm0
vmovaps %xmm0, 0x5b0(%rsp)
movb $0x1, %dl
vmovss 0x80(%r15,%r14,4), %xmm8
vmovss 0x520(%rsp,%r8,4), %xmm0
vbroadcastss 0x4e0(%rsp,%r8,4), %xmm1
vbroadcastss 0x500(%rsp,%r8,4), %xmm2
vmovss %xmm0, 0x80(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2cdbce(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d2496(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d1e79(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d2488(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d2474(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x5b0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x100(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x1d0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x390(%rsp)
vmovaps %xmm4, 0x3a0(%rsp)
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps %xmm1, 0x3c0(%rsp)
vmovaps %xmm2, 0x3d0(%rsp)
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
vmovdqa 0x150(%rsp), %xmm0
vmovdqa %xmm0, 0x3f0(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x400(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x400(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x410(%rsp)
vmovaps 0x300(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xb0(%rsp)
movq 0xe0(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0xb8(%rsp)
movq %rax, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
leaq 0x390(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x4, 0xd8(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movl %edx, 0x140(%rsp)
movq %rdi, 0x130(%rsp)
movq %r8, 0x120(%rsp)
vmovss %xmm8, 0x110(%rsp)
je 0x1c1ed04
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x110(%rsp), %xmm8
movq 0x120(%rsp), %r8
movq 0x130(%rsp), %rdi
movl 0x140(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c1edac
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c1ed75
testb $0x2, (%rcx)
jne 0x1c1ed42
movq 0xe0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c1ed75
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x110(%rsp), %xmm8
movq 0x120(%rsp), %r8
movq 0x130(%rsp), %rdi
movl 0x140(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa 0x90(%rsp), %xmm0
movq 0xc8(%rsp), %rax
vmovaps 0x80(%rax), %xmm1
vptestmd %xmm0, %xmm0, %k1
vbroadcastss 0x2cdde6(%rip), %xmm1 {%k1} # 0x1eecb84
vmovaps %xmm1, 0x80(%rax)
kortestb %k1, %k1
jne 0x1c1edc8
vmovss %xmm8, 0x80(%r15,%r14,4)
btcq %r8, %rdi
tzcntq %rdi, %r8
setae %dl
jae 0x1c1eb03
andb $0x1, %dl
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rdi
movb 0x3(%rsp), %sil
movq 0x28(%rsp), %r10
movl %edx, %r8d
leaq 0x5084fb(%rip), %rdx # 0x21272e4
leaq 0x50a914(%rip), %rcx # 0x2129704
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x2a0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x280(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x240(%rsp), %ymm28
vmovaps 0x220(%rsp), %ymm29
movq 0x1f8(%rsp), %r11
jmp 0x1c1ee7f
leaq 0x50a8b9(%rip), %rcx # 0x2129704
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm26, %ymm17
jmp 0x1c1ee6c
leaq 0x50a898(%rip), %rcx # 0x2129704
movl 0x160(%rsp), %eax
jmp 0x1c1ee8e
xorl %r8d, %r8d
leaq 0x50a885(%rip), %rcx # 0x2129704
movl 0x160(%rsp), %eax
orb %r8b, %al
movl 0x4(%rsp), %r8d
addq $0x8, %r11
cmpl %r11d, %r8d
jg 0x1c1e2eb
jmp 0x1c1e215
xorl %eax, %eax
leaq 0x50a85b(%rip), %rcx # 0x2129704
jmp 0x1c1eead
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm17
jmp 0x1c1e91b
xorl %eax, %eax
jmp 0x1c1e90f
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x240(%rsp), %ymm28
vmovaps 0x220(%rsp), %ymm29
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x200(%rsp), %xmm9
jmp 0x1c1dbc7
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm8
jmp 0x1c1dbc7
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
jmp 0x1c1dbc7
movl 0x160(%rsp), %r11d
andb $0x1, %r11b
jmp 0x1c1e1fd
andb $0x1, %sil
movl %esi, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 8>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 8, 8>, embree::avx512::Intersect1KEpilogMU<8, 8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x7a0, %rsp # imm = 0x7A0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0x98(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x80(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2f3685(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x301e53(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x301dc8(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2d1eda(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2cd5c8(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x300cd4(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x100(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x300caf(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x33b6b1(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x760(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c2136a
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0x98(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x20, %rdi
leaq 0x500(%rsp), %rax
leaq 0xe0(%rax), %rax
movq %rax, 0xd8(%rsp)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
leaq 0x507ff0(%rip), %r13 # 0x21272e4
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
vmovaps %ymm21, 0x180(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x4(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x28(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c1f377
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c1f377
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm26
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x240(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x20(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm26, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2f328e(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x50a210(%rip), %rdx # 0x2129704
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0xe0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x4a0(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm27
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm27, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm26, %xmm26, %xmm0 # xmm0 = xmm26[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x6a0(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x6c0(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x6e0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x700(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm27, 0x2c0(%rsp)
vfmadd231ps %ymm27, %ymm27, %ymm21 # ymm21 = (ymm27 * ymm27) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x30(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x301868(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x130(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm26, %xmm3
vmovaps %xmm26, 0x120(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x110(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x2e0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x301857(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2d18f1(%rip), %xmm3, %xmm26 # 0x1ef0fe4
vbroadcastss 0x3017e0(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0x60(%r12,%r15,4), %xmm8
vmovaps %ymm31, 0x740(%rsp)
vmovaps %ymm30, 0x720(%rsp)
vmovaps %ymm20, 0x680(%rsp)
vmovaps %ymm21, 0x660(%rsp)
vmovaps %ymm22, 0x640(%rsp)
vmovaps %ymm3, 0x620(%rsp)
je 0x1c1fef4
vmovaps %xmm8, 0x220(%rsp)
vmovaps %xmm26, 0x80(%rsp)
vmovaps %ymm3, %ymm26
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm26, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm26, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0xe0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x4a0(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x160(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x140(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm26, %ymm13
vmovaps 0x80(%rsp), %xmm26
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x30160c(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm26, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x260(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x301592(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm27, %xmm27, %xmm27
vfmadd213ps %ymm27, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm27
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2ccdc5(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2ccda3(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm27, %ymm9, %ymm13
vfmadd213ps %ymm27, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm27
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm27, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm13) + ymm3
vcmpleps %ymm27, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x180(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
je 0x1c20e1d
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2ccb9f(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
je 0x1c20e50
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
je 0x1c20e6a
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2ccb1c(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x480(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c1feeb
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x98(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c1feeb
vbroadcastss 0x2d0d6c(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x480(%rsp), %ymm1
vfmadd132ps 0x2d1385(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl $0x0, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %r13b, 0x3b0(%rsp)
movl 0x120(%r12,%r15,4), %eax
movq 0x240(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c1feeb
movl %ecx, (%rsp)
vaddps 0x30122b(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2cc9f7(%rip), %xmm1 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2cbcbf(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
jne 0x1c20e7e
movq 0x240(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c20e7e
vmovss 0x3c0(%rsp,%r11,4), %xmm0
vmovss 0x3e0(%rsp,%r11,4), %xmm1
vmovss 0x2cc92a(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d11f2(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d0bd5(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d11e4(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d11d0(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x110(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps 0x60(%rsp), %ymm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x130(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x400(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x100(%r12,%r15,4)
vmovaps 0x40(%rsp), %ymm3
vmovss %xmm2, 0x180(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x1a0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x1c0(%r12,%r15,4)
vmovss %xmm0, 0x1e0(%r12,%r15,4)
vmovss %xmm1, 0x200(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x220(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x240(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%r15,4)
movl (%rsp), %ecx
leaq 0x5073f2(%rip), %r13 # 0x21272e4
jmp 0x1c1ff2e
vxorps %xmm27, %xmm27, %xmm27
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm17
cmpl $0x9, %ecx
jge 0x1c1ff57
vmovaps 0x760(%rsp), %ymm0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c1f2f4
jmp 0x1c2136a
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm26, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovss 0x2cc789(%rip), %xmm0 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x2e0(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
movl %ecx, (%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x33a934(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x6a0(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x740(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x720(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm26) + ymm4
vmovaps 0x6c0(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x4a0(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x6e0(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x700(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm26, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm26) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c208db
vmovaps %ymm23, %ymm16
vmovaps 0x620(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x640(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x660(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x680(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm26, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm26) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm26, %ymm25
vmovaps %ymm19, %ymm26
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x4a0(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x300c0a(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x300b96(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm27, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm27
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2cc3cd(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2cc3ab(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm27, %ymm14, %ymm13
vfmadd213ps %ymm27, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm27
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm27, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm13) + ymm6
vcmpleps %ymm27, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c20908
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2cc1ba(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x240(%rsp), %ymm2, %k1
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl (%rsp), %ecx
je 0x1c20922
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
vmovaps 0xe0(%rsp), %ymm17
je 0x1c20944
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2cc148(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c208f6
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x98(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c208f6
movl %r11d, %eax
vbroadcastss 0x2d0384(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x2d099d(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x300(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm21, 0x340(%rsp)
movl %r9d, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %al, 0x3b0(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x28(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x120(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c208cf
vaddps 0x30083d(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x220(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm21, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2cb2d4(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0x80(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0x80(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c2094c
cmpq $0x0, 0x40(%rcx)
jne 0x1c2094c
vmovss 0x3c0(%rsp,%r13,4), %xmm0
vmovss 0x3e0(%rsp,%r13,4), %xmm1
vmovss 0x2cbf43(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2d080b(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2d01ee(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2d07fd(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2d07e9(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x110(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x130(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x400(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x100(%r12,%r15,4)
vmovss %xmm2, 0x180(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x1a0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x1c0(%r12,%r15,4)
vmovss %xmm0, 0x1e0(%r12,%r15,4)
vmovss %xmm1, 0x200(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x220(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x240(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
leaq 0x506a0e(%rip), %r13 # 0x21272e4
movl (%rsp), %ecx
jmp 0x1c208f6
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
movl (%rsp), %ecx
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c1ffde
jmp 0x1c1ff33
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
movl (%rsp), %ecx
jmp 0x1c20937
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0xe0(%rsp), %ymm17
jmp 0x1c20601
xorl %r11d, %r11d
jmp 0x1c205f5
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x4d0(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
vmovaps %ymm21, 0x180(%rsp)
movq %r9, 0xd0(%rsp)
movl %r11d, %eax
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovaps %ymm28, 0x160(%rsp)
vmovaps %ymm29, 0x140(%rsp)
movl %eax, 0x420(%rsp)
vmovss 0x100(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x210(%rsp)
vmovss 0x400(%rsp,%r13,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r13,4), %ymm1
vbroadcastss 0x3e0(%rsp,%r13,4), %ymm2
vmovss %xmm0, 0x100(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2cbd05(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d05cd(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cffb0(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d05bf(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d05ab(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4d0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x200(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2f1c73(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x30043d(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm4, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps %ymm2, 0x580(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x2e0(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq %rcx, %r11
movq 0xd8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %r12, 0xb8(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x8, 0xc8(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c20c00
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x508b18(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c20d6e
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c20cc8
testb $0x2, (%rcx)
jne 0x1c20c48
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c20cc8
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x508a50(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c20d6e
vptestmd %ymm0, %ymm0, %k1
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c20d81
vmovd 0x210(%rsp), %xmm0
vmovd %xmm0, 0x100(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x420(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c20e08
kmovd %eax, %k1
vbroadcastss 0x2cac5d(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0x80(%rsp), %rcx
jne 0x1c209c1
jmp 0x1c208cf
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x220(%rsp), %xmm8
jmp 0x1c1fc1d
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
jmp 0x1c1fc1d
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
jmp 0x1c1fc1d
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x420(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovss 0x100(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x400(%rsp,%r11,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r11,4), %ymm1
vbroadcastss 0x3e0(%rsp,%r11,4), %ymm2
vmovss %xmm0, 0x100(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2cb7de(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2d00a6(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cfa89(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2d0098(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2d0084(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x200(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2f174c(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2fff16(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm4, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps %ymm2, 0x580(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x440(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq 0xd8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x240(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %r12, 0xb8(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x8, 0xc8(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x260(%rsp)
je 0x1c21149
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x5085cf(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c212c8
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c21222
testb $0x2, (%rcx)
jne 0x1c21191
movq 0x240(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c21222
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x5084f6(%rip), %rdx # 0x2129704
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c212c8
vptestmd %ymm0, %ymm0, %k1
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c212db
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x100(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c21359
kmovd %r13d, %k1
vbroadcastss 0x2ca706(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl (%rsp), %ecx
jne 0x1c20eef
jmp 0x1c1feeb
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 8>::occluded_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 8, 8>, embree::avx512::Occluded1KEpilogMU<8, 8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x800, %rsp # imm = 0x800
movq %r8, %r9
movq %rdx, %r14
movq %rsi, %r15
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rsi
leaq (%rsi,%rsi,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%r15,%r14,4), %xmm1
vmovss 0x80(%r15,%r14,4), %xmm2
vinsertps $0x10, 0x20(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x8(%rsp)
vinsertps $0x20, 0xc0(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rdx,%rdx,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rdx,%rsi,2), %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %r10
leal (%r10,%r10), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2f1277(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2ffa40(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2ff9af(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm4, %ymm5
vbroadcastss 0x2cfac6(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2cb1b4(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r8
subq %rdx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%r10), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rdx,%rdx), %r10
addq %rdx, %rsi
shlq $0x3, %rcx
subq %rdx, %rcx
movl %edx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rsi), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r15,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2fe8c4(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r15,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2fe89f(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %edx, %ymm1
vpcmpgtd 0x33929b(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x7c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %sil
je 0x1c233e8
leaq (%r9,%rax), %r12
addq $0x6, %r12
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
addq $0x10, %r12
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r10
addq $0x20, %r10
leaq 0x580(%rsp), %rax
leaq 0xe0(%rax), %rax
movq %rax, 0x88(%rsp)
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x560(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
vmovaps %ymm21, 0x240(%rsp)
vmovaps %ymm20, 0x260(%rsp)
tzcntq %r13, %rax
blsrq %r13, %r13
movl 0x6(%r9,%rax,4), %r11d
shll $0x6, %eax
movq %r13, %rcx
movl 0x2(%r9), %r8d
movq 0x8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq %r8, 0x80(%rsp)
movq (%rdx,%r8,8), %r8
vmovups (%r12,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c2178a
andq %r13, %rcx
tzcntq %r13, %rdx
shll $0x6, %edx
prefetcht0 (%r12,%rdx)
prefetcht0 0x40(%r12,%rdx)
testq %rcx, %rcx
je 0x1c2178a
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r12,%rcx)
prefetcht1 0x40(%r12,%rcx)
vmovups 0x10(%r12,%rax), %xmm13
vmovups 0x20(%r12,%rax), %xmm27
vmovups 0x30(%r12,%rax), %xmm23
movq %r8, 0x220(%rsp)
movl 0x248(%r8), %r8d
vmovss (%r15,%r14,4), %xmm0
vinsertps $0x1c, 0x20(%r15,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r15,%r14,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r10), %xmm4
vmovaps 0x10(%r10), %xmm5
vmovaps 0x20(%r10), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r8d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
leaq 0x505a67(%rip), %rdx # 0x21272e4
vmovups (%rdx,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2f0e72(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%rdx,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%rdx,%rbx), %ymm15
vbroadcastss %xmm10, %ymm29
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%rdx,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm26
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm26, %ymm4
vfmadd231ps %ymm29, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm29) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x507df6(%rip), %rcx # 0x2129704
vmovups (%rcx,%rbx), %ymm2
vmovups 0x484(%rcx,%rbx), %ymm17
vmovups 0x908(%rcx,%rbx), %ymm18
vmovups 0xd8c(%rcx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm26, %ymm6
vfmadd231ps %ymm29, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm29) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x140(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x520(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x700(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x720(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x740(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x760(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0xc0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x120(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x10(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2ff44e(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2f0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x2e0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x2d0(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r8d, %xmm23, %xmm12
vmovaps %xmm12, 0x380(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2ff43d(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2cf4d9(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x2ff3c8(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0x60(%r15,%r14,4), %xmm9
vmovaps %ymm29, 0x7a0(%rsp)
vmovaps %ymm30, 0x780(%rsp)
vmovaps %ymm20, 0x6e0(%rsp)
vmovaps %ymm21, 0x6c0(%rsp)
vmovaps %ymm22, 0x6a0(%rsp)
je 0x1c22623
vmovaps %xmm9, 0x1c0(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%rdx,%rbx), %ymm3
vmovups 0x1694(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%rdx,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm26, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm29, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm29) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x140(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x520(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rcx,%rbx), %ymm10
vmovups 0x1b18(%rcx,%rbx), %ymm11
vmovups 0x1f9c(%rcx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x200(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm26, 0x1e0(%rsp)
vmulps %ymm13, %ymm26, %ymm14
vmovaps %ymm15, 0x160(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm29, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm29) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rcx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2ff204(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x120(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0xc0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2ff184(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm24, %xmm24, %xmm24
vfmadd213ps %ymm24, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm24
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2ca9b7(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2ca995(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm24, %ymm9, %ymm13
vfmadd213ps %ymm24, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm24
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm24, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm13) + ymm3
vcmpleps %ymm24, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x240(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c23367
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2ca78b(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x200(%rsp), %ymm28
vmovaps 0x1e0(%rsp), %ymm29
je 0x1c233a2
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm8
je 0x1c233c4
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2ca6ff(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x500(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c2266c
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c22670
movl %r11d, 0xc0(%rsp)
vbroadcastss 0x2ce956(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x500(%rsp), %ymm1
vfmadd132ps 0x2cef6f(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x3a0(%rsp)
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm2, 0x3e0(%rsp)
movl $0x0, 0x400(%rsp)
movl %r8d, 0x404(%rsp)
vmovaps %xmm7, 0x410(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
movb %al, 0x450(%rsp)
movl 0x120(%r15,%r14,4), %ecx
movq 0x220(%rsp), %r11
testl %ecx, 0x34(%r11)
je 0x1c2267b
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c22146
movb $0x1, %r11b
movq 0x220(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
je 0x1c2267e
vaddps 0x2fedf2(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2ca5be(%rip), %xmm1 # 0x1eec714
vdivss 0x380(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps %ymm2, 0x4a0(%rsp)
movzbl %al, %ecx
tzcntq %rcx, %r11
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x320(%rsp)
vmovaps 0x420(%rsp), %xmm0
vmovaps %xmm0, 0x110(%rsp)
movl 0xc0(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x300(%rsp)
movq %r11, %rax
vmovaps 0x430(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x440(%rsp), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
movb $0x1, %r11b
vmovaps %xmm14, 0xf0(%rsp)
movq %r9, 0x38(%rsp)
movq %rdi, 0x30(%rsp)
movb %sil, 0x3(%rsp)
movq %r10, 0x28(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movq %rcx, 0x360(%rsp)
movl %r11d, 0x120(%rsp)
vmovss 0x100(%r15,%r14,4), %xmm10
vmovss 0x4a0(%rsp,%rax,4), %xmm0
vbroadcastss 0x460(%rsp,%rax,4), %ymm1
movq %rax, 0x4c0(%rsp)
vbroadcastss 0x480(%rsp,%rax,4), %ymm2
vmovss %xmm0, 0x100(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2ca48f(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2ced57(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2ce73a(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2ced49(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2ced35(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1a0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x100(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x110(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2f03fd(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2febc7(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x580(%rsp)
vmovaps %ymm4, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm1, 0x5e0(%rsp)
vmovaps %ymm2, 0x600(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovdqa 0x320(%rsp), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x660(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x180(%rsp)
leaq 0x180(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x220(%rsp), %r11
movq 0x18(%r11), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x8, 0xb8(%rsp)
movq 0x48(%r11), %rax
testq %rax, %rax
movq %rdi, %r11
vmovss %xmm10, 0x340(%rsp)
je 0x1c224ac
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm29
vmovaps 0x200(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x260(%rsp), %ymm20
vxorps %xmm24, %xmm24, %xmm24
leaq 0x504e4c(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
vmovdqa 0x180(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c225e2
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
je 0x1c2259a
testb $0x2, (%rcx)
jne 0x1c224f9
movq 0x220(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c2259a
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm29
vmovaps 0x200(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm24, %xmm24, %xmm24
leaq 0x504d5e(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
movq %r11, %rdi
vmovdqa 0x180(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0xa8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2ca5be(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
movq 0x360(%rsp), %rcx
je 0x1c225f9
jmp 0x1c233d7
movq %r11, %rdi
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
movq 0x360(%rsp), %rcx
vmovss %xmm10, 0x100(%r15,%r14,4)
movq 0x4c0(%rsp), %rax
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %r11b
jae 0x1c2222b
jmp 0x1c233df
xorl %eax, %eax
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps %ymm26, %ymm29
vmovaps %xmm12, %xmm14
jmp 0x1c22690
xorl %eax, %eax
jmp 0x1c22690
xorl %eax, %eax
leaq 0x50708b(%rip), %rcx # 0x2129704
jmp 0x1c22690
xorl %r11d, %r11d
leaq 0x50707f(%rip), %rcx # 0x2129704
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
cmpl $0x9, %r8d
jge 0x1c226c6
testb $0x1, %al
jne 0x1c233e8
vmovaps 0x7c0(%rsp), %ymm0
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne %sil
jne 0x1c216ff
jmp 0x1c233e8
vmovaps %ymm8, 0x160(%rsp)
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x220(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovss 0x2ca012(%rip), %xmm0 # 0x1eec714
vdivss 0x380(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
movl %r11d, 0xc0(%rsp)
movl %eax, %r11d
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
vpbroadcastd %r11d, %ymm0
vmovdqa %ymm0, 0x4c0(%rsp)
movl $0x8, %r11d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movl %eax, 0x120(%rsp)
vpbroadcastd %r11d, %ymm0
vpor 0x33819f(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x220(%rsp), %ymm0, %k1
movq %rcx, %rax
leaq (%rbx,%rdx), %rcx
vmovups (%rcx,%r11,4), %ymm3
vmovups 0x484(%rcx,%r11,4), %ymm10
vmovups 0x908(%rcx,%r11,4), %ymm11
vmovups 0xd8c(%rcx,%r11,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x700(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x7a0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x780(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x720(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x520(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x740(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rax), %rax
vmovups (%rax,%r11,4), %ymm2
vmovups 0x484(%rax,%r11,4), %ymm13
vmovaps 0x760(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r11,4), %ymm14
vmovups 0xd8c(%rax,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm26
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c232d8
vmovaps %ymm23, %ymm16
vmovaps 0x160(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x6a0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x6c0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x6e0(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r11,4), %ymm12
vmovups 0x1694(%rcx,%r11,4), %ymm13
vmovups 0x1b18(%rcx,%r11,4), %ymm14
vmovups 0x1f9c(%rcx,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm26, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm26) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm27, %ymm25
vmovaps %ymm19, %ymm27
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r11,4), %ymm13
vmovups 0x1b18(%rax,%r11,4), %ymm14
vmovups 0x1f9c(%rax,%r11,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r11,4), %ymm14
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps 0x520(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2fe472(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x1c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2fe3fe(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm24, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm24
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2c9c35(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2c9c13(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm24, %ymm14, %ymm13
vfmadd213ps %ymm24, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm24
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm24, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm13) + ymm6
vcmpleps %ymm24, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c23334
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2c9a22(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x360(%rsp), %ymm2, %k1
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
leaq 0x5069da(%rip), %rcx # 0x2129704
je 0x1c2333f
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm17
je 0x1c23360
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2c99ac(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4e0(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
testw %ax, %ax
je 0x1c23300
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c232f9
vbroadcastss 0x2cdbf6(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4e0(%rsp), %ymm1
vfmadd132ps 0x2ce20f(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x3a0(%rsp)
vmovaps %ymm1, 0x4e0(%rsp)
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
movl %r11d, 0x400(%rsp)
movl %r8d, 0x404(%rsp)
vmovaps %xmm7, 0x410(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
movb %al, 0x450(%rsp)
movq 0x8(%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x80(%rsp), %r8
movq (%rcx,%r8,8), %r8
movl 0x120(%r15,%r14,4), %ecx
movq %r8, 0xc0(%rsp)
testl %ecx, 0x34(%r8)
je 0x1c23309
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
leaq 0x506859(%rip), %rcx # 0x2129704
jne 0x1c22ec3
movq 0xc0(%rsp), %r8
cmpq $0x0, 0x48(%r8)
movb $0x1, %r8b
je 0x1c23313
vmovaps %ymm29, 0x1e0(%rsp)
vmovaps %ymm28, 0x200(%rsp)
movq %r10, 0x28(%rsp)
movb %sil, 0x3(%rsp)
movq %rdi, 0x30(%rsp)
movq %r9, 0x38(%rsp)
vmovaps %ymm20, 0x260(%rsp)
vaddps 0x2fe047(%rip), %ymm20, %ymm0 # 0x1f20f40
movq %r11, 0x1b8(%rsp)
vcvtsi2ss %r11d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x340(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps %ymm21, 0x240(%rsp)
vmovaps %ymm21, 0x4a0(%rsp)
movzbl %al, %edi
vmovaps 0x420(%rsp), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
tzcntq %rdi, %r8
vmovaps 0x430(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x440(%rsp), %xmm0
vmovaps %xmm0, 0x550(%rsp)
movb $0x1, %dl
vmovss 0x100(%r15,%r14,4), %xmm8
vmovss 0x4a0(%rsp,%r8,4), %xmm0
vbroadcastss 0x460(%rsp,%r8,4), %ymm1
vbroadcastss 0x480(%rsp,%r8,4), %ymm2
vmovss %xmm0, 0x100(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2c974d(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2ce015(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cd9f8(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2ce007(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2cdff3(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x550(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0xf0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x1a0(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2ef6bb(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2fde85(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x580(%rsp)
vmovaps %ymm4, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm1, 0x5e0(%rsp)
vmovaps %ymm2, 0x600(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x660(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x180(%rsp)
leaq 0x180(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0xc0(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x8, 0xb8(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movl %edx, 0x320(%rsp)
movq %rdi, 0x110(%rsp)
movq %r8, 0x300(%rsp)
vmovss %xmm8, 0x100(%rsp)
je 0x1c2319c
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm8
movq 0x300(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x320(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa 0x180(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c23240
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c23208
testb $0x2, (%rcx)
jne 0x1c231d5
movq 0xc0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c23208
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm8
movq 0x300(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x320(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa 0x180(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0xa8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2c9953(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
jne 0x1c2325c
vmovss %xmm8, 0x100(%r15,%r14,4)
btcq %r8, %rdi
tzcntq %rdi, %r8
setae %dl
jae 0x1c22f84
andb $0x1, %dl
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rdi
movb 0x3(%rsp), %sil
movq 0x28(%rsp), %r10
movl %edx, %r8d
leaq 0x504067(%rip), %rdx # 0x21272e4
leaq 0x506480(%rip), %rcx # 0x2129704
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x200(%rsp), %ymm28
vmovaps 0x1e0(%rsp), %ymm29
movq 0x1b8(%rsp), %r11
jmp 0x1c23313
leaq 0x506425(%rip), %rcx # 0x2129704
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm26, %ymm17
jmp 0x1c23300
leaq 0x506404(%rip), %rcx # 0x2129704
movl 0x120(%rsp), %eax
jmp 0x1c23322
xorl %r8d, %r8d
leaq 0x5063f1(%rip), %rcx # 0x2129704
movl 0x120(%rsp), %eax
orb %r8b, %al
movl 0x4(%rsp), %r8d
addq $0x8, %r11
cmpl %r11d, %r8d
jg 0x1c2276c
jmp 0x1c22696
xorl %eax, %eax
leaq 0x5063c7(%rip), %rcx # 0x2129704
jmp 0x1c23341
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm17
jmp 0x1c22d9c
xorl %eax, %eax
jmp 0x1c22d90
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x200(%rsp), %ymm28
vmovaps 0x1e0(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %xmm9
jmp 0x1c22035
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm8
jmp 0x1c22035
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
jmp 0x1c22035
movl 0x120(%rsp), %r11d
andb $0x1, %r11b
jmp 0x1c2267e
andb $0x1, %sil
movl %esi, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 16>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 16, 8>, embree::avx512::Intersect1KEpilogMU<8, 16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayHitK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x9c0, %rsp # imm = 0x9C0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0xb8(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x100(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2ef1ee(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2fd9bc(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2fd931(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2cda43(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2c9131(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc0(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2fc83d(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x200(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2fc815(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x337217(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x6e0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c25865
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0xb8(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x40, %rdi
leaq 0x740(%rsp), %rax
leaq 0x1c0(%rax), %rax
movq %rax, 0xf8(%rsp)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x700(%rsp)
leaq 0x503b57(%rip), %r13 # 0x21272e4
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x24(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x48(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c23813
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c23813
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm26
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x240(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x40(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm26, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2eedef(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x505d71(%rip), %rdx # 0x2129704
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x100(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x480(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm27
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm27, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm26, %xmm26, %xmm0 # xmm0 = xmm26[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x620(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x640(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x660(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x680(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm27, 0x2c0(%rsp)
vfmadd231ps %ymm27, %ymm27, %ymm21 # ymm21 = (ymm27 * ymm27) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x50(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2fd3c9(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x150(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm26, %xmm3
vmovaps %xmm26, 0x140(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x130(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x4c0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2fd3b8(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2cd452(%rip), %xmm3, %xmm26 # 0x1ef0fe4
vbroadcastss 0x2fd341(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0xc0(%r12,%r15,4), %xmm8
vmovaps %ymm31, 0x6c0(%rsp)
vmovaps %ymm30, 0x6a0(%rsp)
vmovaps %ymm20, 0x600(%rsp)
vmovaps %ymm21, 0x5e0(%rsp)
vmovaps %ymm22, 0x5c0(%rsp)
vmovaps %ymm3, 0x5a0(%rsp)
je 0x1c243a1
vmovaps %xmm8, 0x220(%rsp)
vmovaps %xmm26, 0xa0(%rsp)
vmovaps %ymm3, %ymm26
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm26, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm26, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x100(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x480(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x180(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x160(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm26, %ymm13
vmovaps 0xa0(%rsp), %xmm26
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2fd16a(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm26, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x260(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2fd0f0(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm27, %xmm27, %xmm27
vfmadd213ps %ymm27, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm27
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2c8923(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2c8901(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm27, %ymm9, %ymm13
vfmadd213ps %ymm27, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm27
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm27, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm13) + ymm3
vcmpleps %ymm27, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x1a0(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
je 0x1c252fc
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2c86fd(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
je 0x1c25332
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x50(%rsp), %xmm7
je 0x1c2534f
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2c8677(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c24398
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xb8(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c24398
vbroadcastss 0x2cc8c7(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x2ccee0(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm3, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %ecx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x380(%rsp)
movb %r13b, 0x390(%rsp)
movl 0x240(%r12,%r15,4), %eax
movq 0x240(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c24398
movl %ecx, 0x20(%rsp)
vaddps 0x2fcd85(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2c8551(%rip), %xmm1 # 0x1eec714
vdivss 0x4c0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2c7819(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
jne 0x1c25366
movq 0x240(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c25366
vmovss 0x3a0(%rsp,%r11,4), %xmm0
vmovss 0x3c0(%rsp,%r11,4), %xmm1
vmovss 0x2c8481(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2ccd49(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cc72c(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2ccd3b(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2ccd27(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x130(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps 0x80(%rsp), %ymm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x150(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x3e0(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x200(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm3
vmovss %xmm2, 0x300(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x340(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x380(%r12,%r15,4)
vmovss %xmm0, 0x3c0(%r12,%r15,4)
vmovss %xmm1, 0x400(%r12,%r15,4)
movl 0x24(%rsp), %eax
movl %eax, 0x440(%r12,%r15,4)
movq 0x48(%rsp), %rax
movl %eax, 0x480(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%r15,4)
movl 0x20(%rsp), %ecx
leaq 0x502f45(%rip), %r13 # 0x21272e4
jmp 0x1c243de
vxorps %xmm27, %xmm27, %xmm27
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x100(%rsp), %ymm17
cmpl $0x9, %ecx
jge 0x1c2440a
vmovaps 0x6e0(%rsp), %ymm0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c2378d
jmp 0x1c25865
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm26, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovss 0x2c82d6(%rip), %xmm0 # 0x1eec714
vdivss 0x4c0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
movq 0x48(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x4c0(%rsp)
movl 0x24(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x540(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
movl %ecx, 0x20(%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x33647f(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x620(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x6c0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x6a0(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm26) + ymm4
vmovaps 0x640(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x480(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x660(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x680(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm26, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm26) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c24d9b
vmovaps %ymm23, %ymm16
vmovaps 0x5a0(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x5c0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x5e0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x600(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm26, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm26) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm26, %ymm25
vmovaps %ymm19, %ymm26
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x480(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2fc755(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2fc6e1(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm27, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm27
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2c7f18(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2c7ef6(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm27, %ymm14, %ymm13
vfmadd213ps %ymm27, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm27
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm27, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm13) + ymm6
vcmpleps %ymm27, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c24dcc
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2c7d05(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x240(%rsp), %ymm2, %k1
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl 0x20(%rsp), %ecx
je 0x1c24dea
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x100(%rsp), %ymm17
je 0x1c24e0f
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2c7c8f(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x440(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c24dba
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xb8(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c24dba
movl %r11d, %eax
vbroadcastss 0x2cbec8(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x440(%rsp), %ymm1
vfmadd132ps 0x2cc4e1(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %r9d, 0x340(%rsp)
movl %ecx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x380(%rsp)
movb %al, 0x390(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x48(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x240(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c24d8e
vaddps 0x2fc381(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x220(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2c6e18(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0xa0(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0xa0(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c24e17
cmpq $0x0, 0x40(%rcx)
jne 0x1c24e17
vmovss 0x3a0(%rsp,%r13,4), %xmm0
vmovss 0x3c0(%rsp,%r13,4), %xmm1
vmovss 0x2c7a87(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2cc34f(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cbd32(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2cc341(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x2cc32d(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x130(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x150(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd213ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm3
vmovss 0x3e0(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x200(%r12,%r15,4)
vmovss %xmm2, 0x300(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x340(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x380(%r12,%r15,4)
vmovss %xmm0, 0x3c0(%r12,%r15,4)
vmovss %xmm1, 0x400(%r12,%r15,4)
movl 0x24(%rsp), %eax
movl %eax, 0x440(%r12,%r15,4)
movq 0x48(%rsp), %rax
movl %eax, 0x480(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%r15,4)
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
leaq 0x50254f(%rip), %r13 # 0x21272e4
movl 0x20(%rsp), %ecx
jmp 0x1c24dba
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
movl 0x20(%rsp), %ecx
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c24493
jmp 0x1c243e3
xorl %r11d, %r11d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm7
movl 0x20(%rsp), %ecx
jmp 0x1c24e02
xorl %r11d, %r11d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x100(%rsp), %ymm17
jmp 0x1c24abd
xorl %r11d, %r11d
jmp 0x1c24aae
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
movq %r9, 0xf0(%rsp)
movl %r11d, %eax
movq %r8, 0x40(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
vmovaps %ymm28, 0x180(%rsp)
vmovaps %ymm29, 0x160(%rsp)
movl %eax, 0x500(%rsp)
vmovss 0x200(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x210(%rsp)
vmovss 0x3e0(%rsp,%r13,4), %xmm0
vbroadcastss 0x3a0(%rsp,%r13,4), %zmm1
vbroadcastss 0x3c0(%rsp,%r13,4), %zmm2
vmovss %xmm0, 0x200(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2c7838(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2cc100(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cbae3(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2cc0f2(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2cc0de(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4b0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x200(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2ed7a4(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2fbf6c(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x740(%rsp)
vmovaps %zmm4, 0x780(%rsp)
vmovaps %zmm0, 0x7c0(%rsp)
vmovaps %zmm1, 0x800(%rsp)
vmovaps %zmm2, 0x840(%rsp)
vmovaps 0x540(%rsp), %zmm0
vmovaps %zmm0, 0x880(%rsp)
vmovdqa64 0x4c0(%rsp), %zmm0
vmovdqa64 %zmm0, 0x8c0(%rsp)
movq %rcx, %r11
movq 0xf8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x900(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x700(%rsp), %zmm0
vmovaps %zmm0, 0x400(%rsp)
leaq 0x400(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %r12, 0xd8(%rsp)
leaq 0x740(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x10, 0xe8(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c250d1
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x504647(%rip), %rdx # 0x2129704
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c2524a
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c251a3
testb $0x2, (%rcx)
jne 0x1c25120
movq 0xa0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c251a3
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x504575(%rip), %rdx # 0x2129704
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c2524a
movq 0xd8(%rsp), %rax
movq 0xe0(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c2525d
vmovd 0x210(%rsp), %xmm0
vmovd %xmm0, 0x200(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x500(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c252e7
kmovd %eax, %k1
vbroadcastss 0x2c677e(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0xa0(%rsp), %rcx
jne 0x1c24e8c
jmp 0x1c24d8e
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x220(%rsp), %xmm8
jmp 0x1c240c2
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm7
jmp 0x1c240c2
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c240c2
movq 0x48(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x540(%rsp)
movl 0x24(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x500(%rsp)
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r8, 0x40(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
vmovss 0x200(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x3e0(%rsp,%r11,4), %xmm0
vbroadcastss 0x3a0(%rsp,%r11,4), %zmm1
vbroadcastss 0x3c0(%rsp,%r11,4), %zmm2
vmovss %xmm0, 0x200(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2c72f6(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2cbbbe(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2cb5a1(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2cbbb0(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2cbb9c(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x200(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2ed262(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2fba2a(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x740(%rsp)
vmovaps %zmm4, 0x780(%rsp)
vmovaps %zmm0, 0x7c0(%rsp)
vmovaps %zmm1, 0x800(%rsp)
vmovaps %zmm2, 0x840(%rsp)
vmovaps 0x500(%rsp), %zmm0
vmovaps %zmm0, 0x880(%rsp)
vmovdqa64 0x540(%rsp), %zmm0
vmovdqa64 %zmm0, 0x8c0(%rsp)
movq 0xf8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x900(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x700(%rsp), %zmm0
vmovaps %zmm0, 0x400(%rsp)
leaq 0x400(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x240(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %r12, 0xd8(%rsp)
leaq 0x740(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x10, 0xe8(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x260(%rsp)
je 0x1c25635
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm26
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x5040e3(%rip), %rdx # 0x2129704
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c257bf
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c25718
testb $0x2, (%rcx)
jne 0x1c25684
movq 0x240(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c25718
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm26
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x504000(%rip), %rdx # 0x2129704
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c257bf
movq 0xd8(%rsp), %rax
movq 0xe0(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c257d2
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x200(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c25853
kmovd %r13d, %k1
vbroadcastss 0x2c620c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl 0x20(%rsp), %ecx
jne 0x1c253d5
jmp 0x1c24398
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 16>::occluded_t<embree::avx512::RibbonCurve1IntersectorK<embree::BezierCurveT, 16, 8>, embree::avx512::Occluded1KEpilogMU<8, 16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xa00, %rsp # imm = 0xA00
movq %r8, %r9
movq %rdx, %r14
movq %rsi, %r15
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rsi
leaq (%rsi,%rsi,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%r15,%r14,4), %xmm1
vmovss 0x100(%r15,%r14,4), %xmm2
vinsertps $0x10, 0x40(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x8(%rsp)
vinsertps $0x20, 0x180(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rdx,%rdx,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rdx,%rsi,2), %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %r10
leal (%r10,%r10), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2ecd78(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2fb541(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2fb4b0(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm4, %ymm5
vbroadcastss 0x2cb5c7(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2c6cb5(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r8
subq %rdx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%r10), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rdx,%rdx), %r10
addq %rdx, %rsi
shlq $0x3, %rcx
subq %rdx, %rcx
movl %edx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rsi), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc0(%r15,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2fa3c5(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x200(%r15,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2fa39d(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %edx, %ymm1
vpcmpgtd 0x334d99(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x720(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %sil
je 0x1c27900
leaq (%r9,%rax), %r12
addq $0x6, %r12
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
addq $0x10, %r12
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r10
addq $0x40, %r10
leaq 0x780(%rsp), %rax
leaq 0x1c0(%rax), %rax
movq %rax, 0x88(%rsp)
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x740(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
vmovaps %ymm21, 0x220(%rsp)
vmovaps %ymm20, 0x240(%rsp)
tzcntq %r13, %rax
blsrq %r13, %r13
movl 0x6(%r9,%rax,4), %r11d
shll $0x6, %eax
movq %r13, %rcx
movl 0x2(%r9), %r8d
movq 0x8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq %r8, 0x80(%rsp)
movq (%rdx,%r8,8), %r8
vmovups (%r12,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c25c8b
andq %r13, %rcx
tzcntq %r13, %rdx
shll $0x6, %edx
prefetcht0 (%r12,%rdx)
prefetcht0 0x40(%r12,%rdx)
testq %rcx, %rcx
je 0x1c25c8b
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r12,%rcx)
prefetcht1 0x40(%r12,%rcx)
vmovups 0x10(%r12,%rax), %xmm13
vmovups 0x20(%r12,%rax), %xmm27
vmovups 0x30(%r12,%rax), %xmm23
movq %r8, 0x200(%rsp)
movl 0x248(%r8), %r8d
vmovss (%r15,%r14,4), %xmm0
vinsertps $0x1c, 0x40(%r15,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r15,%r14,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r10), %xmm4
vmovaps 0x10(%r10), %xmm5
vmovaps 0x20(%r10), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r8d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
leaq 0x501563(%rip), %rdx # 0x21272e4
vmovups (%rdx,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2ec96e(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%rdx,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%rdx,%rbx), %ymm15
vbroadcastss %xmm10, %ymm29
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%rdx,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm26
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm26, %ymm4
vfmadd231ps %ymm29, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm29) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x5038f2(%rip), %rcx # 0x2129704
vmovups (%rcx,%rbx), %ymm2
vmovups 0x484(%rcx,%rbx), %ymm17
vmovups 0x908(%rcx,%rbx), %ymm18
vmovups 0xd8c(%rcx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm26, %ymm6
vfmadd231ps %ymm29, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm29) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x140(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x4c0(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x280(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x260(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x660(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x680(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x6a0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x6c0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0xc0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x120(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x10(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2faf4a(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2d0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x2c0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x2b0(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r8d, %xmm23, %xmm12
vmovaps %xmm12, 0x580(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2faf39(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2cafd5(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x2faec4(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0xc0(%r15,%r14,4), %xmm9
vmovaps %ymm29, 0x700(%rsp)
vmovaps %ymm30, 0x6e0(%rsp)
vmovaps %ymm20, 0x640(%rsp)
vmovaps %ymm21, 0x620(%rsp)
vmovaps %ymm22, 0x600(%rsp)
je 0x1c26b31
vmovaps %xmm9, 0x1a0(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%rdx,%rbx), %ymm3
vmovups 0x1694(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%rdx,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm26, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm29, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm29) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x140(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x4c0(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x280(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x260(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rcx,%rbx), %ymm10
vmovups 0x1b18(%rcx,%rbx), %ymm11
vmovups 0x1f9c(%rcx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x1e0(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm26, 0x1c0(%rsp)
vmulps %ymm13, %ymm26, %ymm14
vmovaps %ymm15, 0x160(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm29, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm29) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rcx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2facfd(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x120(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0xc0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2fac7d(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm24, %xmm24, %xmm24
vfmadd213ps %ymm24, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm24
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2c64b0(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2c648e(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm24, %ymm9, %ymm13
vfmadd213ps %ymm24, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm24
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm24, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm13) + ymm3
vcmpleps %ymm24, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x220(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c2787f
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2c6284(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x1a0(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x200(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
je 0x1c278ba
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm8
je 0x1c278dc
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2c61f5(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4a0(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c26b7a
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c26b7e
movl %r11d, 0xc0(%rsp)
vbroadcastss 0x2ca44c(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4a0(%rsp), %ymm1
vfmadd132ps 0x2caa65(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x320(%rsp)
vmovaps %ymm1, 0x4a0(%rsp)
vmovaps %ymm1, 0x340(%rsp)
vmovaps %ymm2, 0x360(%rsp)
movl $0x0, 0x380(%rsp)
movl %r8d, 0x384(%rsp)
vmovaps %xmm7, 0x390(%rsp)
vmovaps 0x2d0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps 0x2c0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovdqa 0x2b0(%rsp), %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
movb %al, 0x3d0(%rsp)
movl 0x240(%r15,%r14,4), %ecx
movq 0x200(%rsp), %r11
testl %ecx, 0x34(%r11)
je 0x1c26b89
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c26650
movb $0x1, %r11b
movq 0x200(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
je 0x1c26b8c
vaddps 0x2fa8e8(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2c60b4(%rip), %xmm1 # 0x1eec714
vdivss 0x580(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps %ymm2, 0x420(%rsp)
movzbl %al, %ecx
tzcntq %rcx, %r11
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x540(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x110(%rsp)
movl 0xc0(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x500(%rsp)
movq %r11, %rax
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x180(%rsp)
movb $0x1, %r11b
vmovaps %xmm14, 0xf0(%rsp)
movq %r9, 0x38(%rsp)
movq %rdi, 0x30(%rsp)
movb %sil, 0x3(%rsp)
movq %r10, 0x28(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movq %rcx, 0x300(%rsp)
movl %r11d, 0x120(%rsp)
vmovss 0x200(%r15,%r14,4), %xmm10
vmovss 0x420(%rsp,%rax,4), %xmm0
vbroadcastss 0x3e0(%rsp,%rax,4), %zmm1
movq %rax, 0x5c0(%rsp)
vbroadcastss 0x400(%rsp,%rax,4), %zmm2
vmovss %xmm0, 0x200(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2c5f85(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2ca84d(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2ca230(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2ca83f(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2ca82b(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x180(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x100(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x110(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2ebef1(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2fa6b9(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x780(%rsp)
vmovaps %zmm4, 0x7c0(%rsp)
vmovaps %zmm0, 0x800(%rsp)
vmovaps %zmm1, 0x840(%rsp)
vmovaps %zmm2, 0x880(%rsp)
vmovaps 0x500(%rsp), %zmm0
vmovaps %zmm0, 0x8c0(%rsp)
vmovdqa64 0x540(%rsp), %zmm0
vmovdqa64 %zmm0, 0x900(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x740(%rsp), %zmm0
vmovaps %zmm0, 0x440(%rsp)
leaq 0x440(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x200(%rsp), %r11
movq 0x18(%r11), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x780(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x10, 0xb8(%rsp)
movq 0x48(%r11), %rax
testq %rax, %rax
movq %rdi, %r11
vmovss %xmm10, 0x2e0(%rsp)
je 0x1c269ba
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x2e0(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x260(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x240(%rsp), %ymm20
vxorps %xmm24, %xmm24, %xmm24
leaq 0x50093e(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
vmovdqa64 0x440(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c26af0
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
je 0x1c26aac
testb $0x2, (%rcx)
jne 0x1c26a0b
movq 0x200(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c26aac
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x2e0(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x260(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm24, %xmm24, %xmm24
leaq 0x50084c(%rip), %rdx # 0x21272e4
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
movq %r11, %rdi
vmovdqa64 0x440(%rsp), %zmm0
movq 0xa8(%rsp), %rax
vmovaps 0x200(%rax), %zmm1
vptestmd %zmm0, %zmm0, %k1
vbroadcastss 0x2c60ae(%rip), %zmm1 {%k1} # 0x1eecb84
vmovaps %zmm1, 0x200(%rax)
kortestw %k1, %k1
movq 0x300(%rsp), %rcx
je 0x1c26b07
jmp 0x1c278ef
movq %r11, %rdi
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
movq 0x300(%rsp), %rcx
vmovss %xmm10, 0x200(%r15,%r14,4)
movq 0x5c0(%rsp), %rax
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %r11b
jae 0x1c26733
jmp 0x1c278f7
xorl %eax, %eax
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps %ymm26, %ymm29
vmovaps %xmm12, %xmm14
jmp 0x1c26b9e
xorl %eax, %eax
jmp 0x1c26b9e
xorl %eax, %eax
leaq 0x502b7d(%rip), %rcx # 0x2129704
jmp 0x1c26b9e
xorl %r11d, %r11d
leaq 0x502b71(%rip), %rcx # 0x2129704
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
cmpl $0x9, %r8d
jge 0x1c26bd7
testb $0x1, %al
jne 0x1c27900
vmovaps 0x720(%rsp), %ymm0
vcmpleps 0x200(%r15,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne %sil
jne 0x1c25c00
jmp 0x1c27900
vmovaps %ymm8, 0x160(%rsp)
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x200(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x300(%rsp)
vmovss 0x2c5b01(%rip), %xmm0 # 0x1eec714
vdivss 0x580(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
movl %r11d, 0xc0(%rsp)
movl %eax, %r11d
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x580(%rsp)
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
vpbroadcastd %r11d, %zmm0
vmovdqa64 %zmm0, 0x5c0(%rsp)
movl $0x8, %r11d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movl %eax, 0x120(%rsp)
vpbroadcastd %r11d, %ymm0
vpor 0x333c90(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x200(%rsp), %ymm0, %k1
movq %rcx, %rax
leaq (%rbx,%rdx), %rcx
vmovups (%rcx,%r11,4), %ymm3
vmovups 0x484(%rcx,%r11,4), %ymm10
vmovups 0x908(%rcx,%r11,4), %ymm11
vmovups 0xd8c(%rcx,%r11,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x660(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x700(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x6e0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x680(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x4c0(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x6a0(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rax), %rax
vmovups (%rax,%r11,4), %ymm2
vmovups 0x484(%rax,%r11,4), %ymm13
vmovaps 0x6c0(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r11,4), %ymm14
vmovups 0xd8c(%rax,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm26
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c277f0
vmovaps %ymm23, %ymm16
vmovaps 0x160(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x600(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x620(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x640(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r11,4), %ymm12
vmovups 0x1694(%rcx,%r11,4), %ymm13
vmovups 0x1b18(%rcx,%r11,4), %ymm14
vmovups 0x1f9c(%rcx,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm26, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm26) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm27, %ymm25
vmovaps %ymm19, %ymm27
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r11,4), %ymm13
vmovups 0x1b18(%rax,%r11,4), %ymm14
vmovups 0x1f9c(%rax,%r11,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r11,4), %ymm14
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps 0x4c0(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2f9f63(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x1a0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2f9eef(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm24, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm24
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2c5726(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2c5704(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm24, %ymm14, %ymm13
vfmadd213ps %ymm24, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm24
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm24, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm13) + ymm6
vcmpleps %ymm24, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c2784c
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2c5513(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x300(%rsp), %ymm2, %k1
vcmpleps 0x200(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
leaq 0x5024c8(%rip), %rcx # 0x2129704
je 0x1c27857
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm17
je 0x1c27878
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2c549a(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x480(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
testw %ax, %ax
je 0x1c27818
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c27811
vbroadcastss 0x2c96e4(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x480(%rsp), %ymm1
vfmadd132ps 0x2c9cfd(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x320(%rsp)
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm1, 0x340(%rsp)
vmovaps %ymm21, 0x360(%rsp)
movl %r11d, 0x380(%rsp)
movl %r8d, 0x384(%rsp)
vmovaps %xmm7, 0x390(%rsp)
vmovaps 0x2d0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps 0x2c0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovdqa 0x2b0(%rsp), %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
movb %al, 0x3d0(%rsp)
movq 0x8(%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x80(%rsp), %r8
movq (%rcx,%r8,8), %r8
movl 0x240(%r15,%r14,4), %ecx
movq %r8, 0xc0(%rsp)
testl %ecx, 0x34(%r8)
je 0x1c27821
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
leaq 0x502347(%rip), %rcx # 0x2129704
jne 0x1c273d5
movq 0xc0(%rsp), %r8
cmpq $0x0, 0x48(%r8)
movb $0x1, %r8b
je 0x1c2782b
vmovaps %ymm29, 0x1c0(%rsp)
vmovaps %ymm28, 0x1e0(%rsp)
movq %r10, 0x28(%rsp)
movb %sil, 0x3(%rsp)
movq %rdi, 0x30(%rsp)
movq %r9, 0x38(%rsp)
vmovaps %ymm20, 0x240(%rsp)
vaddps 0x2f9b35(%rip), %ymm20, %ymm0 # 0x1f20f40
movq %r11, 0x198(%rsp)
vcvtsi2ss %r11d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2e0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps %ymm21, 0x220(%rsp)
vmovaps %ymm21, 0x420(%rsp)
movzbl %al, %edi
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x180(%rsp)
tzcntq %rdi, %r8
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x4f0(%rsp)
movb $0x1, %dl
vmovss 0x200(%r15,%r14,4), %xmm8
vmovss 0x420(%rsp,%r8,4), %xmm0
vbroadcastss 0x3e0(%rsp,%r8,4), %zmm1
vbroadcastss 0x400(%rsp,%r8,4), %zmm2
vmovss %xmm0, 0x200(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2c5239(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x2c9b01(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2c94e4(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2c9af3(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2c9adf(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0xf0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x180(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd213ps %xmm3, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm3
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2eb1a5(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2f996d(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x780(%rsp)
vmovaps %zmm4, 0x7c0(%rsp)
vmovaps %zmm0, 0x800(%rsp)
vmovaps %zmm1, 0x840(%rsp)
vmovaps %zmm2, 0x880(%rsp)
vmovaps 0x5c0(%rsp), %zmm0
vmovaps %zmm0, 0x8c0(%rsp)
vmovdqa64 0x580(%rsp), %zmm0
vmovdqa64 %zmm0, 0x900(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x740(%rsp), %zmm0
vmovaps %zmm0, 0x440(%rsp)
leaq 0x440(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0xc0(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x780(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x10, 0xb8(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movl %edx, 0x540(%rsp)
movq %rdi, 0x110(%rsp)
movq %r8, 0x500(%rsp)
vmovss %xmm8, 0x100(%rsp)
je 0x1c276b4
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm8
movq 0x500(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x540(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa64 0x440(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c27758
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c27724
testb $0x2, (%rcx)
jne 0x1c276f1
movq 0xc0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c27724
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm8
movq 0x500(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x540(%rsp), %edx
vmovaps 0x10(%rsp), %xmm7
vmovdqa64 0x440(%rsp), %zmm0
movq 0xa8(%rsp), %rax
vmovaps 0x200(%rax), %zmm1
vptestmd %zmm0, %zmm0, %k1
vbroadcastss 0x2c5439(%rip), %zmm1 {%k1} # 0x1eecb84
vmovaps %zmm1, 0x200(%rax)
kortestw %k1, %k1
jne 0x1c27774
vmovss %xmm8, 0x200(%r15,%r14,4)
btcq %r8, %rdi
tzcntq %rdi, %r8
setae %dl
jae 0x1c27496
andb $0x1, %dl
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rdi
movb 0x3(%rsp), %sil
movq 0x28(%rsp), %r10
movl %edx, %r8d
leaq 0x4ffb4f(%rip), %rdx # 0x21272e4
leaq 0x501f68(%rip), %rcx # 0x2129704
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
movq 0x198(%rsp), %r11
jmp 0x1c2782b
leaq 0x501f0d(%rip), %rcx # 0x2129704
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm26, %ymm17
jmp 0x1c27818
leaq 0x501eec(%rip), %rcx # 0x2129704
movl 0x120(%rsp), %eax
jmp 0x1c2783a
xorl %r8d, %r8d
leaq 0x501ed9(%rip), %rcx # 0x2129704
movl 0x120(%rsp), %eax
orb %r8b, %al
movl 0x4(%rsp), %r8d
addq $0x8, %r11
cmpl %r11d, %r8d
jg 0x1c26c7b
jmp 0x1c26ba4
xorl %eax, %eax
leaq 0x501eaf(%rip), %rcx # 0x2129704
jmp 0x1c27859
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm17
jmp 0x1c272ae
xorl %eax, %eax
jmp 0x1c272a2
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm8
vmovaps 0x1a0(%rsp), %xmm9
jmp 0x1c2653f
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm8
jmp 0x1c2653f
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
jmp 0x1c2653f
movl 0x120(%rsp), %r11d
andb $0x1, %r11b
jmp 0x1c26b8c
andb $0x1, %sil
movl %esi, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNiIntersector1<8>::occluded_n<embree::avx512::OrientedCurve1Intersector1<embree::BezierCurveT, 7, 8>, embree::avx512::Occluded1Epilog1<true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x348, %rsp # imm = 0x348
movq %rdx, %r9
movq %rsi, %r15
movq %rdi, %r12
movzbl 0x1(%rcx), %eax
leaq (%rax,%rax,4), %r8
leaq (%r8,%r8,4), %rdx
vbroadcastss 0x12(%rcx,%rdx), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%rcx,%rdx), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%rcx,%rax,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%rcx,%r8), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rax,%rax,2), %rdx
vpmovsxbd 0x6(%rcx,%rdx,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rax,%r8,2), %rsi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rax, %rsi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%rcx,%rdi), %ymm0
addq %rax, %rdi
vpmovsxbd 0x6(%rcx,%rdi), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %r8d
vpmovsxbd 0x6(%rcx,%r8), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x2e8e06(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x2f75ce(%rip), %ymm27 # 0x1f20edc
vpermps %ymm2, %ymm27, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm27, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x2f7542(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x2c7659(%rip), %ymm8 # 0x1ef0fe8
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm8, %ymm5, %k1
vmovaps %ymm8, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2c2d47(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%rcx,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%rcx,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %r8
shlq $0x3, %rdx
subq %rax, %rdx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%rcx,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%rcx,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%rcx,%r8), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%rcx,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%r15){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2f645e(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2f643a(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x330e36(%rip), %ymm1, %k0 # 0x1f5a920
vmovups %ymm6, 0x2c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0xb(%rsp)
je 0x1c2b69b
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
vbroadcastss 0x2c74cd(%rip), %xmm30 # 0x1ef0fec
vbroadcastss 0x2f739b(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x2f738d(%rip), %xmm18 # 0x1f20ec0
movq %rcx, 0xa8(%rsp)
movq %r9, 0x20(%rsp)
tzcntq %r13, %rax
movl 0x2(%rcx), %edx
movl 0x6(%rcx,%rax,4), %edi
movq (%r9), %rax
movq 0x1e8(%rax), %rax
movq %rdx, 0x58(%rsp)
movq (%rax,%rdx,8), %rsi
movq 0x58(%rsi), %rax
movq 0x68(%rsi), %rbp
movq %rbp, %rdx
movq %rdi, 0xa0(%rsp)
imulq %rdi, %rdx
movl (%rax,%rdx), %r9d
movq 0xa0(%rsi), %rdx
movq %rdx, %rdi
imulq %r9, %rdi
leaq 0x1(%r9), %r11
leaq 0x2(%r9), %r10
leaq 0x3(%r9), %r8
movq 0xd8(%rsi), %rbx
imulq %rbx, %r9
movq 0xc8(%rsi), %r14
vmovups (%r14,%r9), %xmm4
movq %rdx, %r9
imulq %r11, %r9
imulq %rbx, %r11
vmovups (%r14,%r11), %xmm5
movq %rdx, %r11
imulq %r10, %r11
imulq %rbx, %r10
vmovups (%r14,%r10), %xmm6
imulq %r8, %rbx
vmovups (%r14,%rbx), %xmm7
movq %rdx, %r10
imulq %r8, %r10
movq 0x90(%rsi), %rsi
vmovaps (%rsi,%rdi), %xmm8
vmovaps (%rsi,%r9), %xmm9
vmovaps (%rsi,%r11), %xmm10
blsrq %r13, %r13
vmovaps (%rsi,%r10), %xmm3
movq %r13, %rdi
subq $0x1, %rdi
jb 0x1c29c51
andq %r13, %rdi
tzcntq %r13, %r8
movl 0x6(%rcx,%r8,4), %r8d
imulq %rbp, %r8
movl (%rax,%r8), %r8d
imulq %rdx, %r8
prefetcht0 (%rsi,%r8)
prefetcht0 0x40(%rsi,%r8)
testq %rdi, %rdi
je 0x1c29c51
tzcntq %rdi, %rdi
movl 0x6(%rcx,%rdi,4), %edi
imulq %rdi, %rbp
movl (%rax,%rbp), %eax
imulq %rax, %rdx
prefetcht1 (%rsi,%rdx)
prefetcht1 0x40(%rsi,%rdx)
vxorps %xmm16, %xmm16, %xmm16
vmulps %xmm16, %xmm3, %xmm0
vfmadd231ps %xmm16, %xmm10, %xmm0 # xmm0 = (xmm10 * xmm16) + xmm0
vpxor %xmm1, %xmm1, %xmm1
vfmadd213ps %xmm0, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm1) + xmm0
vaddps %xmm1, %xmm8, %xmm1
vfmadd231ps %xmm30, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm30) + xmm0
vfnmadd231ps %xmm30, %xmm8, %xmm0 # xmm0 = -(xmm8 * xmm30) + xmm0
vmulps %xmm16, %xmm7, %xmm11
vfmadd231ps %xmm16, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm16) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm11, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm11
vaddps %xmm2, %xmm4, %xmm12
vfmadd231ps %xmm30, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm30) + xmm11
vfnmadd231ps %xmm30, %xmm4, %xmm11 # xmm11 = -(xmm4 * xmm30) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm3, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm3
vfmadd231ps %xmm16, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm16) + xmm2
vfmadd231ps %xmm16, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm16) + xmm2
vmulps %xmm30, %xmm3, %xmm3
vfnmadd231ps %xmm10, %xmm30, %xmm3 # xmm3 = -(xmm30 * xmm10) + xmm3
vfmadd231ps %xmm9, %xmm16, %xmm3 # xmm3 = (xmm16 * xmm9) + xmm3
vfnmadd231ps %xmm8, %xmm16, %xmm3 # xmm3 = -(xmm16 * xmm8) + xmm3
vxorps %xmm8, %xmm8, %xmm8
vfmadd213ps %xmm7, %xmm6, %xmm8 # xmm8 = (xmm6 * xmm8) + xmm7
vfmadd231ps %xmm16, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm16) + xmm8
vfmadd231ps %xmm16, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm16) + xmm8
vmulps %xmm30, %xmm7, %xmm7
vfnmadd231ps %xmm6, %xmm30, %xmm7 # xmm7 = -(xmm30 * xmm6) + xmm7
vfmadd231ps %xmm5, %xmm16, %xmm7 # xmm7 = (xmm16 * xmm5) + xmm7
vfnmadd231ps %xmm4, %xmm16, %xmm7 # xmm7 = -(xmm16 * xmm4) + xmm7
vshufps $0xc9, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[1,2,0,3]
vmulps %xmm5, %xmm0, %xmm5
vfmsub231ps %xmm12, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm12) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm5 # xmm5 = xmm11[1,2,0,3]
vmulps %xmm5, %xmm0, %xmm5
vfmsub231ps %xmm11, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm11) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm3, %xmm4
vfmsub231ps %xmm8, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm8) - xmm4
vshufps $0xc9, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vmulps %xmm3, %xmm8, %xmm8
vfmsub231ps %xmm7, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm7) - xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[1,2,0,3]
vdpps $0x7f, %xmm6, %xmm6, %xmm7
vmovss %xmm7, %xmm16, %xmm8 # xmm8 = xmm7[0],xmm16[1,2,3]
vrsqrt14ss %xmm8, %xmm16, %xmm10
vmovss 0x2c29a6(%rip), %xmm19 # 0x1eec718
vmulss %xmm19, %xmm10, %xmm11
vmovss 0x2c2e00(%rip), %xmm14 # 0x1eecb80
vmulss %xmm7, %xmm14, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm9, %xmm6, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm6, %xmm10, %xmm12
vbroadcastss %xmm7, %xmm13
vmulps %xmm13, %xmm9, %xmm9
vbroadcastss %xmm11, %xmm11
vmulps %xmm6, %xmm11, %xmm6
vsubps %xmm6, %xmm9, %xmm6
vrcp14ss %xmm8, %xmm16, %xmm8
vmovss 0x2c722c(%rip), %xmm15 # 0x1ef0ff8
vfnmadd213ss %xmm15, %xmm8, %xmm7 # xmm7 = -(xmm8 * xmm7) + xmm15
vmulss %xmm7, %xmm8, %xmm7
vbroadcastss %xmm7, %xmm7
vmulps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm10, %xmm6
vdpps $0x7f, %xmm4, %xmm4, %xmm7
vmovss %xmm7, %xmm16, %xmm8 # xmm8 = xmm7[0],xmm16[1,2,3]
vrsqrt14ss %xmm8, %xmm16, %xmm9
vmulss %xmm19, %xmm9, %xmm10
vmulss %xmm7, %xmm14, %xmm11
vmulss %xmm9, %xmm11, %xmm11
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm10, %xmm9
vbroadcastss %xmm9, %xmm9
vdpps $0x7f, %xmm5, %xmm4, %xmm10
vmulps %xmm4, %xmm9, %xmm11
vbroadcastss %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vbroadcastss %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm8, %xmm16, %xmm5
vfnmadd213ss %xmm15, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm15
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vmulps %xmm4, %xmm9, %xmm4
vshufps $0xff, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm7
vsubps %xmm7, %xmm1, %xmm13
vshufps $0xff, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm6, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vsubps %xmm5, %xmm0, %xmm6
vaddps %xmm7, %xmm1, %xmm14
vaddps %xmm5, %xmm0, %xmm0
vshufps $0xff, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[3,3,3,3]
vmulps %xmm1, %xmm11, %xmm5
vsubps %xmm5, %xmm2, %xmm15
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vmulps %xmm7, %xmm11, %xmm7
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm7, %xmm1
vsubps %xmm1, %xmm3, %xmm4
vaddps %xmm5, %xmm2, %xmm16
vaddps %xmm1, %xmm3, %xmm1
vbroadcastss 0x2c8009(%rip), %xmm3 # 0x1ef1ebc
vmulps %xmm3, %xmm6, %xmm2
vaddps %xmm2, %xmm13, %xmm19
vmulps %xmm3, %xmm4, %xmm2
vsubps %xmm2, %xmm15, %xmm20
vmulps %xmm3, %xmm0, %xmm0
vaddps %xmm0, %xmm14, %xmm21
vmulps %xmm3, %xmm1, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vmovaps (%r15), %xmm4
vsubps %xmm4, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%r12), %xmm3
vmovaps 0x20(%r12), %xmm5
vmovaps 0x30(%r12), %xmm6
vmulps %xmm0, %xmm6, %xmm0
vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm4, %xmm19, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm7, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm7) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm4, %xmm20, %xmm2
vbroadcastss %xmm2, %xmm7
vshufps $0x55, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm7, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm7) + xmm2
vsubps %xmm4, %xmm15, %xmm7
vbroadcastss %xmm7, %xmm8
vshufps $0x55, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[1,1,1,1]
vshufps $0xaa, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
vmulps %xmm7, %xmm6, %xmm7
vfmadd231ps %xmm9, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm9) + xmm7
vfmadd231ps %xmm8, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm8) + xmm7
vsubps %xmm4, %xmm14, %xmm8
vbroadcastss %xmm8, %xmm9
vshufps $0x55, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,1,1,1]
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm8
vfmadd231ps %xmm10, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm10) + xmm8
vfmadd231ps %xmm9, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm9) + xmm8
vsubps %xmm4, %xmm21, %xmm9
vbroadcastss %xmm9, %xmm10
vshufps $0x55, %xmm9, %xmm9, %xmm11 # xmm11 = xmm9[1,1,1,1]
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm11, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm11) + xmm9
vfmadd231ps %xmm10, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm10) + xmm9
vsubps %xmm4, %xmm22, %xmm10
vbroadcastss %xmm10, %xmm11
vshufps $0x55, %xmm10, %xmm10, %xmm12 # xmm12 = xmm10[1,1,1,1]
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm12, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm12) + xmm10
vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10
vsubps %xmm4, %xmm16, %xmm4
vbroadcastss %xmm4, %xmm11
vshufps $0x55, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmulps %xmm4, %xmm6, %xmm4
vfmadd231ps %xmm12, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm12) + xmm4
vfmadd231ps %xmm11, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm11) + xmm4
vmovlhps %xmm8, %xmm0, %xmm29 # xmm29 = xmm0[0],xmm8[0]
vmovlhps %xmm9, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm9[0]
vmovlhps %xmm10, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm10[0]
vmovlhps %xmm4, %xmm7, %xmm24 # xmm24 = xmm7[0],xmm4[0]
vminps %xmm12, %xmm29, %xmm3
vmaxps %xmm12, %xmm29, %xmm5
vminps %xmm24, %xmm23, %xmm6
vminps %xmm6, %xmm3, %xmm3
vmaxps %xmm24, %xmm23, %xmm6
vmaxps %xmm6, %xmm5, %xmm5
vshufpd $0x3, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1]
vminps %xmm6, %xmm3, %xmm3
vmaxps %xmm11, %xmm5, %xmm5
vandps %xmm17, %xmm3, %xmm3
vandps %xmm17, %xmm5, %xmm5
vmaxps %xmm5, %xmm3, %xmm3
vmovshdup %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm5, %xmm3
vmulss 0x2c7e41(%rip), %xmm3, %xmm3 # 0x1ef1eb8
vmovddup %xmm0, %xmm6 # xmm6 = xmm0[0,0]
vmovddup %xmm1, %xmm11 # xmm11 = xmm1[0,0]
vmovddup %xmm2, %xmm17 # xmm17 = xmm2[0,0]
vmovddup %xmm7, %xmm7 # xmm7 = xmm7[0,0]
vmovddup %xmm8, %xmm5 # xmm5 = xmm8[0,0]
vmovddup %xmm9, %xmm8 # xmm8 = xmm9[0,0]
vmovddup %xmm10, %xmm9 # xmm9 = xmm10[0,0]
vmovddup %xmm4, %xmm10 # xmm10 = xmm4[0,0]
vmovaps %xmm3, 0x170(%rsp)
vbroadcastss %xmm3, %ymm31
vxorps %xmm18, %xmm31, %xmm0
vbroadcastss %xmm0, %ymm28
vsubps %xmm29, %xmm12, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps %xmm12, 0x90(%rsp)
vsubps %xmm12, %xmm23, %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps %xmm23, 0x80(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps %xmm13, 0x160(%rsp)
vmovaps %xmm14, 0x150(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x280(%rsp)
vmovaps %xmm19, 0x120(%rsp)
vmovaps %xmm21, 0x100(%rsp)
vsubps %xmm19, %xmm21, %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovaps %xmm20, 0x110(%rsp)
vmovaps %xmm22, 0xf0(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x260(%rsp)
vmovaps %xmm15, 0x140(%rsp)
vmovaps %xmm16, 0x130(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x250(%rsp)
xorl %edi, %edi
vmovsd 0x2c2571(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm14
xorl %ebp, %ebp
movq 0x20(%rsp), %r9
vmovaps %xmm29, 0x30(%rsp)
vmovaps %xmm6, 0x200(%rsp)
vmovaps %xmm11, 0x1f0(%rsp)
vmovaps %xmm17, 0x1e0(%rsp)
vmovaps %xmm7, 0x1d0(%rsp)
vmovaps %xmm5, 0x1c0(%rsp)
vmovaps %xmm8, 0x1b0(%rsp)
vmovaps %xmm9, 0x1a0(%rsp)
vmovaps %xmm10, 0x190(%rsp)
vmovups %ymm31, 0x300(%rsp)
vmovups %ymm28, 0x2e0(%rsp)
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vbroadcastss 0x2c2516(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm6, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm6) + xmm3
vfmadd231ps %xmm11, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm11) + xmm4
vfmadd231ps %xmm17, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm17) + xmm5
vfmadd231ps %xmm2, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2f6c99(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2e84b1(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2f6c48(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2f6c5f(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2c6c97(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x3359a7(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm31, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c2a661
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm31, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c2a681
movl %ebp, %eax
movl %ecx, 0x210(%rsp,%rax,4)
vmovlps %xmm0, 0x2a0(%rsp,%rax,8)
vmovlps %xmm26, 0x320(%rsp,%rax,8)
incl %ebp
vbroadcastss 0x2f6839(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x2f682b(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x2c694d(%rip), %ymm19 # 0x1ef0fec
vmovss 0x2f6837(%rip), %xmm20 # 0x1f20ee0
vmovss 0x2c2061(%rip), %xmm21 # 0x1eec714
vmovss 0x2c6943(%rip), %xmm22 # 0x1ef1000
vmovss 0x2c7385(%rip), %xmm23 # 0x1ef1a4c
vbroadcastss 0x2c2043(%rip), %xmm24 # 0x1eec714
vmovss 0x2c77e1(%rip), %xmm25 # 0x1ef1ebc
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x70(%rsp), %xmm28
vmovaps 0x60(%rsp), %xmm31
testl %ebp, %ebp
je 0x1c2b66a
leal -0x1(%rbp), %eax
vmovss 0x2a0(%rsp,%rax,8), %xmm0
vmovss 0x2a4(%rsp,%rax,8), %xmm1
movl 0x210(%rsp,%rax,4), %ecx
vmovsd 0x320(%rsp,%rax,8), %xmm14
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x210(%rsp,%rax,4)
cmovel %eax, %ebp
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm2
vmulss %xmm20, %xmm2, %xmm2
incq %rdx
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm3
vmulss %xmm20, %xmm3, %xmm3
vsubss %xmm2, %xmm21, %xmm4
vmulss %xmm2, %xmm1, %xmm15
vfmadd231ss %xmm4, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm4) + xmm15
vsubss %xmm3, %xmm21, %xmm2
vmulss %xmm3, %xmm1, %xmm13
vfmadd231ss %xmm2, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm2) + xmm13
vsubss %xmm15, %xmm13, %xmm0
vucomiss %xmm0, %xmm22
jbe 0x1c2b608
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vucomiss %xmm0, %xmm23
seta %cl
cmpl $0x4, %ebp
setae %al
vsubps %xmm1, %xmm24, %xmm2
vmulps 0x1c0(%rsp), %xmm1, %xmm3
vmulps 0x1b0(%rsp), %xmm1, %xmm4
vmulps 0x1a0(%rsp), %xmm1, %xmm5
vmulps 0x190(%rsp), %xmm1, %xmm1
vfmadd231ps 0x200(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x1f0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps 0x1e0(%rsp), %xmm2, %xmm5 # xmm5 = (xmm2 * mem) + xmm5
vfmadd231ps 0x1d0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm15, 0x40(%rsp)
vbroadcastss %xmm15, %xmm6
vmovaps %xmm13, 0x10(%rsp)
vbroadcastss %xmm13, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm19, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss %xmm25, %xmm0, %xmm5
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2c609a(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c2a953
vmovss 0x2c7573(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c2a9b4
vmovss 0x2c7565(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2c606e(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c2a9b4
vmovss 0x2c7533(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2c603c(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c2b5dc
vcmpltss %xmm16, %xmm13, %k1
vmovaps %xmm21, %xmm15
vmovss 0x2c6001(%rip), %xmm16 # 0x1ef09cc
vxorps %xmm11, %xmm11, %xmm11
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm11, %xmm9, %k1
vmovaps %xmm21, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2c101c(%rip), %xmm7 # 0x1eeba20
vmovss %xmm11, %xmm7, %xmm7 {%k1}
vmovss 0x2c2172(%rip), %xmm8 # 0x1eecb84
vmovss %xmm11, %xmm8, %xmm8 {%k1}
vcmpltss %xmm11, %xmm14, %k1
vmovaps %xmm21, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c2aa34
jnp 0x1c2aa79
vucomiss %xmm13, %xmm14
jne 0x1c2aa81
jp 0x1c2aa81
vxorps %xmm16, %xmm16, %xmm16
vucomiss %xmm16, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2c0fc1(%rip), %xmm13 # 0x1eeba20
vmovss %xmm16, %xmm13, %xmm13 {%k1}
vmovss 0x2c2117(%rip), %xmm14 # 0x1eecb84
vmovss 0x2c1c9d(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c2aaa8
vxorps %xmm16, %xmm16, %xmm16
jmp 0x1c2aab2
vxorps %xmm18, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm21, %xmm13
vxorps %xmm16, %xmm16, %xmm16
vfmadd213ss %xmm14, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0x90(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm15
vcmpltss %xmm16, %xmm10, %k1
vmovaps %xmm21, %xmm13
vmovss 0x2c5ef1(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
jne 0x1c2aae4
jnp 0x1c2ab4e
vucomiss %xmm9, %xmm10
jne 0x1c2ab23
jp 0x1c2ab23
vucomiss %xmm16, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2c0f17(%rip), %xmm9 # 0x1eeba20
vmovss %xmm16, %xmm9, %xmm9 {%k1}
vmovss 0x2c206d(%rip), %xmm10 # 0x1eecb84
vmovss 0x2c1bf3(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c2ab44
vxorps %xmm18, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm21, %xmm9
vfmadd213ss %xmm10, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm21, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm21, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c2b5de
vaddss 0x3328ce(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2c1ffa(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm24, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm21, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm21, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm21, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm30, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm30, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm30, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm21, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x40(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vmovaps 0x10(%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2c1e8b(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm29, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm29
vmovaps %xmm31, %xmm12
vfmadd213ps %xmm14, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm14
vmovaps 0x290(%rsp), %xmm13
vfmadd213ps %xmm15, %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + xmm15
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm30, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2f6141(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x334f5f(%rip), %xmm29 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm29, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x32fbbf(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2f60b0(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2f6049(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm14 # xmm14 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c2b5c2
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps %xmm26, %xmm15
jbe 0x1c2afbf
testb %sil, %sil
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x2c607c(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
je 0x1c2aff9
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c2aff9
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x2c601f(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c2b5d8
movl $0xc8, %eax
vsubss %xmm0, %xmm21, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm29, %xmm6 # xmm6 = (xmm29 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm17, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm13
ja 0x1c2b0ae
decq %rax
jne 0x1c2b00a
jmp 0x1c2b5ea
vucomiss %xmm16, %xmm0
jb 0x1c2b5ea
vucomiss %xmm0, %xmm21
vmovaps 0x10(%rsp), %xmm13
jb 0x1c2b5f0
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm16, %xmm1
jb 0x1c2b5f0
vucomiss %xmm1, %xmm21
jb 0x1c2b5f0
vmovss 0x18(%r12), %xmm2
vinsertps $0x1c, 0x28(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x38(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vmovaps (%r15), %xmm3
vmovaps 0x160(%rsp), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vdpps $0x7f, %xmm2, %xmm4, %xmm4
vmovaps 0x120(%rsp), %xmm5
vsubps %xmm3, %xmm5, %xmm5
vdpps $0x7f, %xmm2, %xmm5, %xmm5
vmovaps 0x110(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm6
vdpps $0x7f, %xmm2, %xmm6, %xmm6
vmovaps 0x140(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vdpps $0x7f, %xmm2, %xmm7, %xmm7
vmovaps 0x150(%rsp), %xmm8
vsubps %xmm3, %xmm8, %xmm8
vdpps $0x7f, %xmm2, %xmm8, %xmm8
vmovaps 0x100(%rsp), %xmm9
vsubps %xmm3, %xmm9, %xmm9
vdpps $0x7f, %xmm2, %xmm9, %xmm9
vmovaps 0xf0(%rsp), %xmm10
vsubps %xmm3, %xmm10, %xmm10
vdpps $0x7f, %xmm2, %xmm10, %xmm10
vmovaps 0x130(%rsp), %xmm12
vsubps %xmm3, %xmm12, %xmm3
vdpps $0x7f, %xmm2, %xmm3, %xmm2
vsubss %xmm1, %xmm21, %xmm3
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm1, %xmm10, %xmm10
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm4, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm6) + xmm10
vfmadd231ss %xmm7, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm7) + xmm1
vsubss %xmm0, %xmm21, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm10, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm10) + xmm1
vfmadd231ss %xmm9, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm8) + xmm1
vucomiss 0xc(%r15), %xmm1
jb 0x1c2b5f0
vmovss 0x20(%r15), %xmm12
vucomiss %xmm1, %xmm12
jb 0x1c2b5f0
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm24, %xmm8
vmulps 0x150(%rsp), %xmm7, %xmm9
vmulps 0x100(%rsp), %xmm7, %xmm10
vmulps 0xf0(%rsp), %xmm7, %xmm11
vmulps 0x130(%rsp), %xmm7, %xmm7
vfmadd231ps 0x160(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x120(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x110(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x140(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps %xmm30, %xmm7, %xmm6
movq (%r9), %rax
movq 0x1e8(%rax), %rax
movq 0x58(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movl 0x24(%r15), %eax
testl %eax, 0x34(%r14)
je 0x1c2b5bb
movq 0x10(%r9), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c2b2e6
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c2b5bd
vbroadcastss %xmm5, %xmm5
vmulps 0x250(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x260(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x270(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x280(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%r9), %rax
vshufps $0xe9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,2,3]
vmovlps %xmm3, 0x220(%rsp)
vmovss %xmm2, 0x228(%rsp)
vmovlps %xmm0, 0x22c(%rsp)
movq 0xa0(%rsp), %rcx
movl %ecx, 0x234(%rsp)
movq 0x58(%rsp), %rcx
movl %ecx, 0x238(%rsp)
movl (%rax), %ecx
movl %ecx, 0x23c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x240(%rsp)
vmovss %xmm1, 0x20(%r15)
movl $0xffffffff, 0x2c(%rsp) # imm = 0xFFFFFFFF
leaq 0x2c(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r9), %rax
movq %rax, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
leaq 0x220(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm14, 0xe0(%rsp)
vmovss %xmm12, 0xc(%rsp)
je 0x1c2b4ca
movl %edi, 0x28(%rsp)
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
movl 0x28(%rsp), %edi
vmovaps 0x60(%rsp), %xmm31
vmovaps 0x70(%rsp), %xmm28
vmovaps 0x30(%rsp), %xmm29
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x2c6a6c(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x2c12ba(%rip), %xmm24 # 0x1eec714
vmovss 0x2c65e8(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x2c5b92(%rip), %xmm22 # 0x1ef1000
vmovss 0x2c129c(%rip), %xmm21 # 0x1eec714
vmovss 0x2f5a5e(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x2c5b60(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x2f5a2a(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x2f5a24(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x2c5b42(%rip), %xmm30 # 0x1ef0fec
vbroadcastss 0x2f5a28(%rip), %ymm27 # 0x1f20edc
movq 0x20(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c2b5fa
movq 0x10(%r9), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c2b5b7
testb $0x2, (%rcx)
jne 0x1c2b4eb
testb $0x40, 0x3e(%r14)
je 0x1c2b5aa
movl %edi, %r14d
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
movl %r14d, %edi
vmovaps 0x60(%rsp), %xmm31
vmovaps 0x70(%rsp), %xmm28
vmovaps 0x30(%rsp), %xmm29
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x2c697b(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x2c11c9(%rip), %xmm24 # 0x1eec714
vmovss 0x2c64f7(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x2c5aa1(%rip), %xmm22 # 0x1ef1000
vmovss 0x2c11ab(%rip), %xmm21 # 0x1eec714
vmovss 0x2f596d(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x2c5a6f(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x2f5939(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x2f5933(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x2c5a51(%rip), %xmm30 # 0x1ef0fec
vbroadcastss 0x2f5937(%rip), %ymm27 # 0x1f20edc
movq 0x20(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c2b5fa
movb $0x1, %al
jmp 0x1c2b5fc
xorl %eax, %eax
orb %al, %dil
jmp 0x1c2b5f0
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x30(%rsp), %xmm29
vmovaps %xmm26, %xmm15
jmp 0x1c2b5ea
xorl %ebx, %ebx
jmp 0x1c2b5ea
movb $0x1, %bl
vmovaps %xmm26, %xmm14
vmovaps 0x40(%rsp), %xmm15
vmovaps 0x10(%rsp), %xmm13
testb %bl, %bl
jne 0x1c2a6f1
jmp 0x1c2b608
xorl %eax, %eax
testb %al, %al
jne 0x1c2b5bd
vmovss %xmm12, 0x20(%r15)
jmp 0x1c2b5bd
vinsertps $0x10, %xmm13, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm13[0],xmm15[2,3]
vmovaps 0x200(%rsp), %xmm6
vmovaps 0x1f0(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm17
vmovaps 0x1d0(%rsp), %xmm7
vmovaps 0x1c0(%rsp), %xmm5
vmovaps 0x1b0(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovups 0x300(%rsp), %ymm31
vmovups 0x2e0(%rsp), %ymm28
jmp 0x1c2a1e9
testb $0x1, %dil
movq 0xa8(%rsp), %rcx
jne 0x1c2b69b
vmovups 0x2c0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne 0xb(%rsp)
jne 0x1c29b40
movb 0xb(%rsp), %al
andb $0x1, %al
addq $0x348, %rsp # imm = 0x348
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 4>::intersect_n<embree::avx512::OrientedCurve1IntersectorK<embree::BezierCurveT, 4>, embree::avx512::Intersect1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_n(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x478, %rsp # imm = 0x478
movq %rcx, %r10
movq %rsi, %r11
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%rdx,4), %xmm1
vmovss 0x40(%r11,%rdx,4), %xmm2
vinsertps $0x10, 0x10(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rbx
vpmovsxbd 0x6(%r8,%rbx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%rbx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2e6f56(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2f5724(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm27, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2f5699(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x2c57ab(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2c0e99(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r9
subq %rcx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rbx
subq %rcx, %rbx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rbx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%r11,%rdx,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2f45a5(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x80(%r11,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2f4580(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x32ef82(%rip), %ymm7, %k0 # 0x1f5a920
vmovups %ymm3, 0x3f0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c2d7e4
movq %rdx, %r9
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r15d
leaq (%rdx,%rdx,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x10, %r13
movl $0x1, %eax
shlxl %r9d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x1c0(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x2c55e8(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f54b6(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f54a8(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq %r10, 0x98(%rsp)
movq %r11, 0x90(%rsp)
movq %rdx, 0x88(%rsp)
tzcntq %r15, %rax
movl 0x2(%r8), %ecx
movl 0x6(%r8,%rax,4), %edi
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0xa0(%rsp)
movq (%rax,%rcx,8), %rsi
movq 0x58(%rsi), %rax
movq 0x68(%rsi), %rcx
movq %rcx, %rdx
movq %rdi, 0xe8(%rsp)
imulq %rdi, %rdx
movl (%rax,%rdx), %r9d
movq 0xa0(%rsi), %rdx
movq %rdx, %rdi
imulq %r9, %rdi
leaq 0x1(%r9), %r11
leaq 0x2(%r9), %r10
leaq 0x3(%r9), %r12
movq 0xd8(%rsi), %rbx
imulq %rbx, %r9
movq 0xc8(%rsi), %r14
vmovups (%r14,%r9), %xmm5
movq %rdx, %r9
imulq %r11, %r9
imulq %rbx, %r11
vmovups (%r14,%r11), %xmm6
movq %rdx, %r11
imulq %r10, %r11
imulq %rbx, %r10
vmovups (%r14,%r10), %xmm7
imulq %r12, %rbx
vmovups (%r14,%rbx), %xmm8
movq %rdx, %r10
imulq %r12, %r10
movq 0x90(%rsi), %rsi
vmovaps (%rsi,%rdi), %xmm9
vmovaps (%rsi,%r9), %xmm10
vmovaps (%rsi,%r11), %xmm11
blsrq %r15, %r15
vmovaps (%rsi,%r10), %xmm4
movq %r15, %rdi
subq $0x1, %rdi
jb 0x1c2bb4d
andq %r15, %rdi
tzcntq %r15, %r9
movl 0x6(%r8,%r9,4), %r9d
imulq %rcx, %r9
movl (%rax,%r9), %r9d
imulq %rdx, %r9
prefetcht0 (%rsi,%r9)
prefetcht0 0x40(%rsi,%r9)
testq %rdi, %rdi
je 0x1c2bb4d
tzcntq %rdi, %rdi
movl 0x6(%r8,%rdi,4), %edi
imulq %rdi, %rcx
movl (%rax,%rcx), %eax
imulq %rax, %rdx
prefetcht1 (%rsi,%rdx)
prefetcht1 0x40(%rsi,%rdx)
movq 0x90(%rsp), %r11
movq 0x88(%rsp), %r9
vmovss (%r11,%r9,4), %xmm0
vinsertps $0x1c, 0x10(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovss 0x30(%r11,%r9,4), %xmm1
vmovss %xmm1, 0xac(%rsp)
vmulps %xmm31, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm31) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm1
vaddps %xmm2, %xmm9, %xmm2
vfmadd231ps %xmm17, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vfmadd231ps %xmm31, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm31) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) + xmm12
vaddps %xmm3, %xmm5, %xmm13
vfmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm4
vfmadd231ps %xmm31, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm31) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm11, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm11) + xmm4
vfmadd231ps %xmm10, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm8, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm9) + xmm8
vfmadd231ps %xmm31, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm31) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfnmadd231ps %xmm7, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm7) + xmm8
vfmadd231ps %xmm6, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x2c0a7b(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x2c0ed4(%rip), %xmm16 # 0x1eecb80
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x2c52fc(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vdpps $0x7f, %xmm5, %xmm5, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm6, %xmm5, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm11, %xmm11
vmulps %xmm5, %xmm11, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm9, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm8 # xmm8 = -(xmm6 * xmm8) + xmm17
vmulss %xmm6, %xmm8, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm14
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm15
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm13, %xmm6
vsubps %xmm6, %xmm3, %xmm16
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm13, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm13
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x2c60d3(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm14, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm16, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm15, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm13, %xmm22
vsubps %xmm0, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps (%r13), %xmm4
vmovaps 0x10(%r13), %xmm5
vmovaps %xmm1, 0x320(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps 0x20(%r13), %xmm7
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x310(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm6, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm6) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm6
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x300(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm7, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm6) + xmm3
vsubps %xmm0, %xmm16, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vmovaps %xmm6, 0x2f0(%rsp)
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm8) + xmm6
vsubps %xmm0, %xmm15, %xmm10
vbroadcastss %xmm10, %xmm8
vshufps $0x55, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2e0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm9) + xmm10
vfmadd231ps %xmm8, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm8) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm8
vshufps $0x55, %xmm11, %xmm11, %xmm9 # xmm9 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2d0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm11, %xmm11
vfmadd231ps %xmm9, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm9) + xmm11
vfmadd231ps %xmm8, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm8) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm8
vshufps $0x55, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2c0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm7, %xmm12, %xmm12
vfmadd231ps %xmm9, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm9) + xmm12
vfmadd231ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm8) + xmm12
vsubps %xmm0, %xmm13, %xmm9
vbroadcastss %xmm9, %xmm0
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2b0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vfmadd231ps %xmm8, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm8) + xmm7
vfmadd231ps %xmm0, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm0) + xmm7
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm23 # xmm23 = xmm3[0],xmm12[0]
vmovlhps %xmm7, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm7[0]
vminps %xmm9, %xmm8, %xmm0
vmaxps %xmm9, %xmm8, %xmm4
vminps %xmm24, %xmm23, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm24, %xmm23, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vminps %xmm5, %xmm0, %xmm0
vshufpd $0x3, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1]
vmaxps %xmm5, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x2c5ecd(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm18 # xmm18 = xmm1[0,0]
vmovddup %xmm2, %xmm25 # xmm25 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm10, %xmm5 # xmm5 = xmm10[0,0]
vmovddup %xmm11, %xmm2 # xmm2 = xmm11[0,0]
vmovddup %xmm12, %xmm3 # xmm3 = xmm12[0,0]
vmovddup %xmm7, %xmm10 # xmm10 = xmm7[0,0]
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r14d, %r14d
vmovaps %xmm8, 0xd0(%rsp)
vsubps %xmm8, %xmm9, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm9, 0xc0(%rsp)
vsubps %xmm9, %xmm23, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm23, 0x110(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x330(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vmovaps %xmm15, 0x290(%rsp)
vsubps %xmm14, %xmm15, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm17, 0x260(%rsp)
vmovaps %xmm21, 0x240(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm20, 0x250(%rsp)
vmovaps %xmm22, 0x230(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm16, 0x280(%rsp)
vmovaps %xmm13, 0x270(%rsp)
vsubps %xmm16, %xmm13, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm3, %xmm9
vmovaps %xmm2, %xmm7
vmovaps %xmm1, %xmm16
movq 0xa0(%rsp), %rdi
vpbroadcastd %edi, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
movq 0xe8(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
vmovsd 0x2c05b0(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x98(%rsp), %r10
vmovaps %xmm18, 0x1a0(%rsp)
vmovaps %xmm25, 0x190(%rsp)
vmovaps %xmm1, 0x70(%rsp)
vmovaps %xmm6, 0x40(%rsp)
vmovaps %xmm5, 0x30(%rsp)
vmovaps %xmm2, 0x20(%rsp)
vmovaps %xmm3, 0x10(%rsp)
vmovaps %xmm10, (%rsp)
vmovups %ymm29, 0x430(%rsp)
vmovups %ymm28, 0x410(%rsp)
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x2c056a(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm7, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm18, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm18) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2f4ceb(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2e6503(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2f4c9a(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2f4cb1(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2c4ce9(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x3339f9(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c2c60f
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c2c634
movl %r14d, %eax
movl %ecx, 0x1b0(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm26, 0x450(%rsp,%rax,8)
incl %r14d
vbroadcastss 0x2c49ae(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f487c(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f486e(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2c4990(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2f487a(%rip), %xmm21 # 0x1f20ee0
vmovss 0x2c00a4(%rip), %xmm22 # 0x1eec714
vmovss 0x2c4986(%rip), %xmm23 # 0x1ef1000
vmovss 0x2c53c8(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x2c0086(%rip), %xmm25 # 0x1eec714
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
testl %r14d, %r14d
je 0x1c2d7c5
leal -0x1(%r14), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x1b0(%rsp,%rax,4), %ecx
vmovsd 0x450(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1b0(%rsp,%rax,4)
cmovel %eax, %r14d
vxorps %xmm13, %xmm13, %xmm13
vcvtsi2ss %rdx, %xmm13, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm13, %xmm13, %xmm13
vcvtsi2ss %rdx, %xmm13, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm26
vfmadd231ss %xmm4, %xmm0, %xmm26 # xmm26 = (xmm0 * xmm4) + xmm26
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm11
vfmadd231ss %xmm2, %xmm0, %xmm11 # xmm11 = (xmm0 * xmm2) + xmm11
vsubss %xmm26, %xmm11, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c2d793
vmovaps %xmm26, %xmm8
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %r14d
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm7, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps 0x1a0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm8, 0xb0(%rsp)
vbroadcastss %xmm8, %xmm6
vmovaps %xmm11, 0x50(%rsp)
vbroadcastss %xmm11, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2c5681(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2c40d6(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c2c917
vmovss 0x2c55af(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c2c978
vmovss 0x2c55a1(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2c40aa(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c2c978
vmovss 0x2c556f(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2c4078(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c2d3a5
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2c403d(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bf05d(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x2c01b3(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c2c9f3
jnp 0x1c2ca32
vucomiss %xmm13, %xmm14
jne 0x1c2ca45
jp 0x1c2ca45
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bf008(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x2c015e(%rip), %xmm14 # 0x1eecb84
vmovss 0x2bfce4(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c2ca66
vmovaps 0xd0(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm16
jmp 0x1c2ca81
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xd0(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x2c3f34(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
jne 0x1c2caa1
jnp 0x1c2cb0b
vucomiss %xmm9, %xmm10
jne 0x1c2cae0
jp 0x1c2cae0
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bef5a(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x2c00b0(%rip), %xmm10 # 0x1eecb84
vmovss 0x2bfc36(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c2cb01
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c2d397
vaddss 0x330911(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2c003d(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0xb0(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vmovaps 0x50(%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2bfece(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm29, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps 0x330(%rsp), %xmm13
vfmadd213ps 0x110(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2f417f(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x332f9d(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x32dbfd(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2f40ee(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2f4087(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c2d387
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps 0x70(%rsp), %xmm16
jbe 0x1c2cf84
testb %sil, %sil
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c40b8(%rip), %xmm13 # 0x1ef0fec
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm14
je 0x1c2cfbf
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c2cfbf
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c405a(%rip), %xmm13 # 0x1ef0fec
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c2d3a1
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm13, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm13, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c2d073
decq %rax
jne 0x1c2cfd0
jmp 0x1c2d357
vucomiss %xmm30, %xmm0
jb 0x1c2d357
vucomiss %xmm0, %xmm22
jb 0x1c2d357
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c2d357
vucomiss %xmm1, %xmm22
jb 0x1c2d357
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm12
vmulss %xmm2, %xmm1, %xmm2
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm5) + xmm12
vfmadd231ss %xmm6, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm6) + xmm2
vsubss %xmm0, %xmm22, %xmm9
vmulss %xmm9, %xmm9, %xmm3
vmulss %xmm3, %xmm9, %xmm4
vmulss %xmm0, %xmm13, %xmm5
vmulss %xmm3, %xmm5, %xmm5
vmulps %xmm0, %xmm0, %xmm3
vmulss %xmm3, %xmm13, %xmm6
vmulss %xmm6, %xmm9, %xmm6
vmulps %xmm3, %xmm0, %xmm7
vmulss %xmm2, %xmm7, %xmm2
vfmadd231ss %xmm12, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm12) + xmm2
vfmadd231ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ss %xmm11, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm11) + xmm2
vucomiss 0xac(%rsp), %xmm2
jb 0x1c2d357
vmovss 0x80(%r11,%r9,4), %xmm14
vucomiss %xmm2, %xmm14
jb 0x1c2d357
movq %r8, %r12
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vsubps %xmm3, %xmm25, %xmm8
vmulps 0x290(%rsp), %xmm3, %xmm10
vmulps 0x240(%rsp), %xmm3, %xmm11
vmulps 0x230(%rsp), %xmm3, %xmm12
vmulps 0x270(%rsp), %xmm3, %xmm13
vfmadd231ps 0x2a0(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x250(%rsp), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vfmadd231ps 0x280(%rsp), %xmm8, %xmm13 # xmm13 = (xmm8 * mem) + xmm13
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm11, %xmm12, %xmm11
vsubps %xmm12, %xmm13, %xmm12
vbroadcastss %xmm0, %xmm8
vmulps %xmm11, %xmm8, %xmm13
vbroadcastss %xmm9, %xmm9
vfmadd231ps %xmm10, %xmm9, %xmm13 # xmm13 = (xmm9 * xmm10) + xmm13
vmulps %xmm12, %xmm8, %xmm10
vfmadd231ps %xmm11, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm11) + xmm10
vmulps %xmm10, %xmm8, %xmm10
vfmadd231ps %xmm13, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm13) + xmm10
vmulps %xmm17, %xmm10, %xmm9
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %rbp
movl 0x90(%r11,%r9,4), %eax
testl %eax, 0x34(%rbp)
je 0x1c2d354
vbroadcastss %xmm7, %xmm7
vmulps 0x1f0(%rsp), %xmm7, %xmm7
vbroadcastss %xmm6, %xmm6
vfmadd132ps 0x200(%rsp), %xmm7, %xmm6 # xmm6 = (xmm6 * mem) + xmm7
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x210(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x220(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vmulps %xmm6, %xmm4, %xmm4
vfmsub231ps %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) - xmm4
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm11
jne 0x1c2d3d4
cmpq $0x0, 0x40(%rbp)
jne 0x1c2d3d4
vmovss %xmm2, 0x80(%r11,%r9,4)
vextractps $0x1, %xmm4, 0xc0(%r11,%r9,4)
vextractps $0x2, %xmm4, 0xd0(%r11,%r9,4)
vmovss %xmm4, 0xe0(%r11,%r9,4)
vmovss %xmm0, 0xf0(%r11,%r9,4)
vmovss %xmm1, 0x100(%r11,%r9,4)
movq 0xe8(%rsp), %rax
movl %eax, 0x110(%r11,%r9,4)
movl %edi, 0x120(%r11,%r9,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r11,%r9,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r11,%r9,4)
movq %r12, %r8
jmp 0x1c2d37a
movq %r12, %r8
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm11
testb %bl, %bl
jne 0x1c2c6c3
jmp 0x1c2d793
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c2d357
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c2d3a7
xorl %ebx, %ebx
jmp 0x1c2d357
movb $0x1, %bl
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps %xmm26, %xmm15
vmovaps 0xb0(%rsp), %xmm26
jmp 0x1c2d374
movq 0x8(%r10), %rax
vshufps $0x55, %xmm4, %xmm4, %xmm0 # xmm0 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[2,2,2,2]
vbroadcastss %xmm4, %xmm4
vmovaps %xmm0, 0x360(%rsp)
vmovaps %xmm1, 0x370(%rsp)
vmovaps %xmm4, 0x380(%rsp)
vmovaps %xmm8, 0x390(%rsp)
vmovaps %xmm3, 0x3a0(%rsp)
vmovaps 0x1d0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
leaq 0x3d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
vmovss %xmm2, 0x80(%r11,%r9,4)
vmovaps 0x1c0(%rsp), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x120(%rsp), %rax
movq %rax, 0x130(%rsp)
movq 0x18(%rbp), %rax
movq %rax, 0x138(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x140(%rsp)
movq %r11, 0x148(%rsp)
leaq 0x360(%rsp), %rax
movq %rax, 0x150(%rsp)
movl $0x4, 0x158(%rsp)
movq 0x40(%rbp), %rax
testq %rax, %rax
vmovaps %xmm15, 0x160(%rsp)
vmovss %xmm14, 0x6c(%rsp)
je 0x1c2d5c3
leaq 0x130(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x6c(%rsp), %xmm14
vmovaps 0x50(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm26
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps (%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xa0(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c44ed(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2c3a97(%rip), %xmm23 # 0x1ef1000
vmovss 0x2bf1a1(%rip), %xmm22 # 0x1eec714
vmovss 0x2f3963(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2c3a65(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2f392f(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2f3929(%rip), %xmm18 # 0x1f20ec4
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %r9
vbroadcastss 0x2f3929(%rip), %ymm27 # 0x1f20edc
movq 0x90(%rsp), %r11
movq 0x98(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c2d770
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c2d6d7
testb $0x2, (%rcx)
jne 0x1c2d5fc
testb $0x40, 0x3e(%rbp)
je 0x1c2d6d7
leaq 0x130(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x6c(%rsp), %xmm14
vmovaps 0x50(%rsp), %xmm11
vmovaps 0xb0(%rsp), %xmm26
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps (%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xa0(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c43d9(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2c3983(%rip), %xmm23 # 0x1ef1000
vmovss 0x2bf08d(%rip), %xmm22 # 0x1eec714
vmovss 0x2f384f(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2c3951(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2f381b(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2f3815(%rip), %xmm18 # 0x1f20ec4
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %r9
vbroadcastss 0x2f3815(%rip), %ymm27 # 0x1f20edc
movq 0x90(%rsp), %r11
movq 0x98(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c2d770
movq 0x148(%rsp), %rax
movq 0x150(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c2d77a
vmovss %xmm14, 0x80(%r11,%r9,4)
vbroadcastss 0x2c3868(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2bef86(%rip), %xmm25 # 0x1eec714
jmp 0x1c2d34f
vinsertps $0x10, %xmm11, %xmm26, %xmm0 # xmm0 = xmm26[0],xmm11[0],xmm26[2,3]
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x190(%rsp), %xmm25
vmovups 0x430(%rsp), %ymm29
vmovups 0x410(%rsp), %ymm28
jmp 0x1c2c195
vmovups 0x3f0(%rsp), %ymm0
vcmpleps 0x80(%r11,%r9,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r15d
jne 0x1c2ba36
addq $0x478, %rsp # imm = 0x478
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 4>::occluded_n<embree::avx512::OrientedCurve1IntersectorK<embree::BezierCurveT, 4>, embree::avx512::Occluded1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x468, %rsp # imm = 0x468
movq %r8, %r10
movq %rdx, %r14
movq %rsi, %r9
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r9,%r14,4), %xmm1
vmovss 0x40(%r9,%r14,4), %xmm2
vinsertps $0x10, 0x10(%r9,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r9,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r9,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%r9,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %r11
vpmovsxbd 0x6(%r8,%r11,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%r11,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2e4e0e(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2f35d7(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2f3546(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x2c365d(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2bed4b(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %r11
subq %rax, %r11
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%r11), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x30(%r9,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2f245b(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x80(%r9,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2f2436(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x32ce32(%rip), %ymm1, %k0 # 0x1f5a920
vmovups %ymm6, 0x3e0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0xf(%rsp)
je 0x1c2f8f0
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r12
addq $0x10, %r12
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x1c0(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x2c3496(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f3364(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f3356(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq %r10, 0x90(%rsp)
movq %r9, 0x88(%rsp)
movq %rcx, 0x80(%rsp)
tzcntq %r13, %rax
movl 0x2(%r10), %edx
movl 0x6(%r10,%rax,4), %eax
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq %rdx, 0x98(%rsp)
movq (%rcx,%rdx,8), %rdi
movq 0x58(%rdi), %rcx
movq 0x68(%rdi), %rdx
movq %rdx, %rsi
imulq %rax, %rsi
movl (%rcx,%rsi), %r10d
movq 0xa0(%rdi), %rsi
movq %rsi, %r8
imulq %r10, %r8
leaq 0x1(%r10), %rbx
leaq 0x2(%r10), %r11
leaq 0x3(%r10), %r9
movq 0xd8(%rdi), %r15
imulq %r15, %r10
movq 0xc8(%rdi), %rbp
vmovups (%rbp,%r10), %xmm5
movq %rsi, %r10
imulq %rbx, %r10
imulq %r15, %rbx
vmovups (%rbp,%rbx), %xmm6
movq %rsi, %rbx
imulq %r11, %rbx
imulq %r15, %r11
vmovups (%rbp,%r11), %xmm7
imulq %r9, %r15
vmovups (%rbp,%r15), %xmm8
movq %rsi, %r11
imulq %r9, %r11
movq 0x90(%rdi), %rdi
vmovaps (%rdi,%r8), %xmm9
vmovaps (%rdi,%r10), %xmm10
movq 0x90(%rsp), %r10
vmovaps (%rdi,%rbx), %xmm11
blsrq %r13, %r13
vmovaps (%rdi,%r11), %xmm4
movq %r13, %r8
subq $0x1, %r8
jb 0x1c2dca2
andq %r13, %r8
tzcntq %r13, %r9
movl 0x6(%r10,%r9,4), %r9d
imulq %rdx, %r9
movl (%rcx,%r9), %r9d
imulq %rsi, %r9
prefetcht0 (%rdi,%r9)
prefetcht0 0x40(%rdi,%r9)
testq %r8, %r8
je 0x1c2dca2
tzcntq %r8, %r8
movl 0x6(%r10,%r8,4), %r8d
imulq %r8, %rdx
movl (%rcx,%rdx), %ecx
imulq %rcx, %rsi
prefetcht1 (%rdi,%rsi)
prefetcht1 0x40(%rdi,%rsi)
movq 0x88(%rsp), %r9
vmovss (%r9,%r14,4), %xmm0
vinsertps $0x1c, 0x10(%r9,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r9,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmulps %xmm31, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm31) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm1
vaddps %xmm2, %xmm9, %xmm2
vfmadd231ps %xmm17, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vfmadd231ps %xmm31, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm31) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) + xmm12
vaddps %xmm3, %xmm5, %xmm13
vfmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm4
vfmadd231ps %xmm31, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm31) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm11, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm11) + xmm4
vfmadd231ps %xmm10, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm8, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm9) + xmm8
vfmadd231ps %xmm31, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm31) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfnmadd231ps %xmm7, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm7) + xmm8
vfmadd231ps %xmm6, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x2be93e(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x2bed97(%rip), %xmm16 # 0x1eecb80
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x2c31bf(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vdpps $0x7f, %xmm5, %xmm5, %xmm9
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vmovss %xmm9, %xmm31, %xmm8 # xmm8 = xmm9[0],xmm31[1,2,3]
vrsqrt14ss %xmm8, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm9, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm11
vdpps $0x7f, %xmm6, %xmm5, %xmm13
vbroadcastss %xmm9, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm13, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm8, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm9 # xmm9 = -(xmm6 * xmm9) + xmm17
vmulss %xmm6, %xmm9, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm13
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm11, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm11, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x2c3f96(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm13, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm13, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x320(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%r12), %xmm4
vmovaps 0x10(%r12), %xmm5
vmovaps 0x20(%r12), %xmm6
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x310(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm7
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x300(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm6, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm7) + xmm3
vsubps %xmm0, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm7
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2f0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) + xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vsubps %xmm0, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm7
vshufps $0x55, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2e0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm8, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm8) + xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm7
vshufps $0x55, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2d0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm11
vfmadd231ps %xmm8, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm8) + xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm7
vshufps $0x55, %xmm12, %xmm12, %xmm8 # xmm8 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2c0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm6, %xmm12, %xmm12
vfmadd231ps %xmm8, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm8) + xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vsubps %xmm0, %xmm16, %xmm8
vbroadcastss %xmm8, %xmm0
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x2b0(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm6
vfmadd231ps %xmm7, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm7) + xmm6
vfmadd231ps %xmm0, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm0) + xmm6
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm24 # xmm24 = xmm3[0],xmm12[0]
vmovlhps %xmm6, %xmm9, %xmm25 # xmm25 = xmm9[0],xmm6[0]
vminps %xmm23, %xmm8, %xmm0
vmaxps %xmm23, %xmm8, %xmm4
vminps %xmm25, %xmm24, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm25, %xmm24, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vshufpd $0x3, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,1]
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm7, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x2c3d89(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm7 # xmm7 = xmm1[0,0]
vmovddup %xmm2, %xmm18 # xmm18 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm9, %xmm2 # xmm2 = xmm9[0,0]
vmovddup %xmm10, %xmm9 # xmm9 = xmm10[0,0]
vmovddup %xmm11, %xmm10 # xmm10 = xmm11[0,0]
vmovddup %xmm12, %xmm11 # xmm11 = xmm12[0,0]
vmovddup %xmm6, %xmm12 # xmm12 = xmm6[0,0]
vmovaps %xmm0, 0x140(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x220(%rsp)
xorl %r11d, %r11d
xorl %ebx, %ebx
vmovss 0x30(%r9,%r14,4), %xmm0
vmovss %xmm0, 0xac(%rsp)
vmovaps %xmm8, 0xd0(%rsp)
vsubps %xmm8, %xmm23, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm23, 0xc0(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm24, 0x110(%rsp)
vmovaps %xmm25, 0x150(%rsp)
vsubps %xmm24, %xmm25, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm13, 0x2a0(%rsp)
vmovaps %xmm14, 0x290(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm2, %xmm13
vmovaps %xmm17, 0x260(%rsp)
vmovaps %xmm21, 0x240(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm20, 0x250(%rsp)
vmovaps %xmm22, 0x230(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm15, 0x280(%rsp)
vmovaps %xmm16, 0x270(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm1, %xmm16
movq 0x98(%rsp), %r8
vpbroadcastd %r8d, %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
vmovsd 0x2be467(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x80(%rsp), %rcx
vmovaps %xmm7, 0x170(%rsp)
vmovaps %xmm18, 0x160(%rsp)
vmovaps %xmm1, 0x70(%rsp)
vmovaps %xmm2, 0x50(%rsp)
vmovaps %xmm9, 0x40(%rsp)
vmovaps %xmm10, 0x30(%rsp)
vmovaps %xmm11, 0x20(%rsp)
vmovaps %xmm12, 0x10(%rsp)
vmovups %ymm29, 0x420(%rsp)
vmovups %ymm28, 0x400(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x2be424(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm9, %xmm3
vmulps %xmm1, %xmm10, %xmm4
vmulps %xmm1, %xmm11, %xmm5
vmulps %xmm1, %xmm12, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm18, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm18) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2f2ba4(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2e43bc(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2f2b5d(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2f2b4a(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2f2b63(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2c2b9a(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x3318aa(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %edx
andb $0x7f, %al
je 0x1c2e75e
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %edx
andb %dl, %al
movzbl %al, %edx
testl %edx, %edx
je 0x1c2e781
movl %ebx, %eax
movl %edx, 0x180(%rsp,%rax,4)
vmovlps %xmm0, 0x330(%rsp,%rax,8)
vmovlps %xmm27, 0x440(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x2c2861(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f272f(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f2721(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2c2843(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2f272d(%rip), %xmm21 # 0x1f20ee0
vmovss 0x2bdf57(%rip), %xmm22 # 0x1eec714
vmovss 0x2c2839(%rip), %xmm23 # 0x1ef1000
vmovss 0x2c327b(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x2bdf39(%rip), %xmm25 # 0x1eec714
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x100(%rsp), %xmm26
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0xe0(%rsp), %xmm29
testl %ebx, %ebx
je 0x1c2f8c6
leal -0x1(%rbx), %eax
vmovss 0x330(%rsp,%rax,8), %xmm0
vmovss 0x334(%rsp,%rax,8), %xmm1
movl 0x180(%rsp,%rax,4), %esi
vmovsd 0x440(%rsp,%rax,8), %xmm15
tzcntq %rsi, %rdx
blsrl %esi, %esi
movl %esi, 0x180(%rsp,%rax,4)
cmovel %eax, %ebx
vxorps %xmm2, %xmm2, %xmm2
vcvtsi2ss %rdx, %xmm2, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c2f899
vmovaps %xmm27, %xmm6
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %dil
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm9, %xmm3
vmulps %xmm1, %xmm10, %xmm4
vmulps %xmm1, %xmm11, %xmm5
vmulps %xmm1, %xmm12, %xmm1
vfmadd231ps 0x170(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x160(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0xb0(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x60(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2c3530(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2c1f85(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c2ea68
vmovss 0x2c345e(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c2eac9
vmovss 0x2c3450(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2c1f59(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c2eac9
vmovss 0x2c341e(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2c1f27(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c2f846
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2c1eec(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bcf0c(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x2be062(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c2eb44
jnp 0x1c2eb83
vucomiss %xmm13, %xmm14
jne 0x1c2eb96
jp 0x1c2eb96
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bceb7(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x2be00d(%rip), %xmm14 # 0x1eecb84
vmovss 0x2bdb93(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c2ebb7
vmovaps 0xd0(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm16
jmp 0x1c2ebd2
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xd0(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x2c1de3(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x60(%rsp), %xmm14
jne 0x1c2ebf8
jnp 0x1c2ec62
vucomiss %xmm9, %xmm10
jne 0x1c2ec37
jp 0x1c2ec37
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bce03(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x2bdf59(%rip), %xmm10 # 0x1eecb84
vmovss 0x2bdadf(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c2ec58
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %bpl
vucomiss %xmm8, %xmm7
ja 0x1c2f7df
vaddss 0x32e7b9(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2bdee5(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm27, %xmm4 # xmm4 = xmm27[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm27, %xmm1 # xmm1 = (xmm27 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm27, %xmm4 # xmm4 = (xmm27 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0xb0(%rsp), %xmm27
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2bdd7c(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm28, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm29, %xmm13
vfmadd213ps 0x110(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2f2030(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x330e4e(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x32baae(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2f1f9f(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2f1f38(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c2f7cf
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x70(%rsp), %xmm16
jbe 0x1c2f0d3
testb %sil, %sil
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c1f69(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x150(%rsp), %xmm13
vmovaps 0x140(%rsp), %xmm14
je 0x1c2f10e
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c2f10e
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c1f0b(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x150(%rsp), %xmm13
vmovaps 0x140(%rsp), %xmm14
orb %dil, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c2f815
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm12, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c2f1c3
decq %rax
jne 0x1c2f120
jmp 0x1c2f817
vucomiss %xmm30, %xmm0
jb 0x1c2f817
vucomiss %xmm0, %xmm22
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
jb 0x1c2f2f0
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c2f2f0
vucomiss %xmm1, %xmm22
jb 0x1c2f2f0
vmovss 0x8(%r12), %xmm2
vinsertps $0x1c, 0x18(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm7
vmulss %xmm7, %xmm7, %xmm3
vmulss %xmm3, %xmm7, %xmm2
vmulss %xmm0, %xmm12, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm4
vmulss %xmm4, %xmm12, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vmulps %xmm4, %xmm0, %xmm6
vmulss %xmm1, %xmm6, %xmm1
vfmadd231ss %xmm9, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm11, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm11) + xmm1
vucomiss 0xac(%rsp), %xmm1
jb 0x1c2f2f0
vmovss 0x80(%r9,%r14,4), %xmm4
vucomiss %xmm1, %xmm4
jae 0x1c2f30d
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
jmp 0x1c2f83b
vmovss %xmm4, 0xa8(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vsubps %xmm4, %xmm25, %xmm8
vmulps 0x290(%rsp), %xmm4, %xmm9
vmulps 0x240(%rsp), %xmm4, %xmm10
vmulps 0x230(%rsp), %xmm4, %xmm11
vmulps 0x270(%rsp), %xmm4, %xmm12
vfmadd231ps 0x2a0(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x260(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x250(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x280(%rsp), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm12, %xmm10
vbroadcastss %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm11
vbroadcastss %xmm7, %xmm7
vfmadd231ps %xmm8, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm8) + xmm11
vmulps %xmm0, %xmm10, %xmm8
vfmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm9) + xmm8
vmulps %xmm0, %xmm8, %xmm8
vfmadd231ps %xmm11, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm11) + xmm8
vmulps %xmm17, %xmm8, %xmm7
movq (%rcx), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r15
movl 0x90(%r9,%r14,4), %eax
testl %eax, 0x34(%r15)
je 0x1c2f7b0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
jne 0x1c2f3f7
movb $0x1, %al
cmpq $0x0, 0x48(%r15)
je 0x1c2f7ca
vbroadcastss %xmm6, %xmm6
vmulps 0x1e0(%rsp), %xmm6, %xmm6
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x1f0(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x200(%rsp), %xmm5, %xmm3 # xmm3 = (xmm3 * mem) + xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x210(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[1,2,0,3]
vmulps %xmm5, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm3) - xmm2
movq 0x8(%rcx), %rax
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[2,2,2,2]
vbroadcastss %xmm2, %xmm2
vmovaps %xmm3, 0x350(%rsp)
vmovaps %xmm5, 0x360(%rsp)
vmovaps %xmm2, 0x370(%rsp)
vmovaps %xmm0, 0x380(%rsp)
vmovaps %xmm4, 0x390(%rsp)
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovdqa 0x1d0(%rsp), %xmm0
vmovdqa %xmm0, 0x3b0(%rsp)
leaq 0x3c0(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rdx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vmovss %xmm1, 0x80(%r9,%r14,4)
vmovaps 0x1c0(%rsp), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x120(%rsp), %rax
movq %rax, 0x190(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x198(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x1a0(%rsp)
movq %r9, 0x1a8(%rsp)
leaq 0x350(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movl $0x4, 0x1b8(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
vmovaps %xmm15, 0x130(%rsp)
je 0x1c2f64b
leaq 0x190(%rsp), %rdi
movl %r11d, 0xa4(%rsp)
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0xb0(%rsp), %xmm27
vmovaps 0x130(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0x100(%rsp), %xmm26
movl 0xa4(%rsp), %r11d
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm16
movq 0x98(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x2bd137(%rip), %xmm25 # 0x1eec714
vmovss 0x2c2465(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2c1a0f(%rip), %xmm23 # 0x1ef1000
vmovss 0x2bd119(%rip), %xmm22 # 0x1eec714
vmovss 0x2f18db(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2c19dd(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2f18a7(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2f18a1(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2c19bf(%rip), %xmm17 # 0x1ef0fec
vxorps %xmm31, %xmm31, %xmm31
movq 0x80(%rsp), %rcx
movq 0x88(%rsp), %r9
movq 0x90(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c2f877
movq 0x10(%rcx), %rdx
movq 0x10(%rdx), %rax
testq %rax, %rax
je 0x1c2f773
testb $0x2, (%rdx)
jne 0x1c2f685
testb $0x40, 0x3e(%r15)
je 0x1c2f773
leaq 0x190(%rsp), %rdi
movl %r11d, %r15d
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0xb0(%rsp), %xmm27
vmovaps 0x130(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0x100(%rsp), %xmm26
movl %r15d, %r11d
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm16
movq 0x98(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x2bd00f(%rip), %xmm25 # 0x1eec714
vmovss 0x2c233d(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2c18e7(%rip), %xmm23 # 0x1ef1000
vmovss 0x2bcff1(%rip), %xmm22 # 0x1eec714
vmovss 0x2f17b3(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2c18b5(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2f177f(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2f1779(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2c1897(%rip), %xmm17 # 0x1ef0fec
vxorps %xmm31, %xmm31, %xmm31
movq 0x80(%rsp), %rcx
movq 0x88(%rsp), %r9
movq 0x90(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
movq 0x1a8(%rsp), %rax
vmovaps 0x80(%rax), %xmm0
vbroadcastss 0x2bd3e8(%rip), %xmm0 {%k1} # 0x1eecb84
vmovaps %xmm0, 0x80(%rax)
kortestb %k1, %k1
setne %al
jmp 0x1c2f879
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
orb %al, %r11b
jmp 0x1c2f83b
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c2f817
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps %xmm27, %xmm15
vmovaps 0xb0(%rsp), %xmm27
jmp 0x1c2f83b
xorl %ebp, %ebp
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm14
testb %bpl, %bpl
jne 0x1c2e819
jmp 0x1c2f899
movb $0x1, %bpl
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps %xmm27, %xmm15
vmovaps 0xb0(%rsp), %xmm27
jmp 0x1c2f835
xorl %eax, %eax
testb %al, %al
jne 0x1c2f7ca
vmovss 0xa8(%rsp), %xmm0
vmovss %xmm0, 0x80(%r9,%r14,4)
jmp 0x1c2f7ca
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x170(%rsp), %xmm7
vmovaps 0x160(%rsp), %xmm18
vmovups 0x420(%rsp), %ymm29
vmovups 0x400(%rsp), %ymm28
jmp 0x1c2e2da
testb $0x1, %r11b
jne 0x1c2f8f0
vmovups 0x3e0(%rsp), %ymm0
vcmpleps 0x80(%r9,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne 0xf(%rsp)
jne 0x1c2db88
movb 0xf(%rsp), %al
andb $0x1, %al
addq $0x468, %rsp # imm = 0x468
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 8>::intersect_n<embree::avx512::OrientedCurve1IntersectorK<embree::BezierCurveT, 8>, embree::avx512::Intersect1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_n(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x560, %rsp # imm = 0x560
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r11
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rdx
vbroadcastss 0x12(%r8,%rdx), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x80(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rdx), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r8,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2e2ced(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2f14bb(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2f1430(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x2c1542(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2bcc30(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r9
subq %rcx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%r11,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2f033c(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x100(%r11,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2f0317(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x32ad19(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x3a0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c31ac0
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r9d
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %rbx
addq $0x20, %rbx
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x340(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x2c1382(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f1250(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f1242(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq %r8, 0x88(%rsp)
movq %r10, 0x80(%rsp)
movq %r11, 0x78(%rsp)
tzcntq %r9, %rax
movl 0x2(%r8), %ecx
movl 0x6(%r8,%rax,4), %edi
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0x90(%rsp)
movq (%rax,%rcx,8), %rsi
movq 0x58(%rsi), %rax
movq 0x68(%rsi), %rcx
movq %rcx, %rdx
movq %rdi, 0xd8(%rsp)
imulq %rdi, %rdx
movq %r9, %r13
movl (%rax,%rdx), %r9d
movq 0xa0(%rsi), %rdx
movq %rdx, %rdi
imulq %r9, %rdi
leaq 0x1(%r9), %r11
leaq 0x2(%r9), %r10
leaq 0x3(%r9), %r8
movq 0xd8(%rsi), %r14
imulq %r14, %r9
movq 0xc8(%rsi), %r12
vmovups (%r12,%r9), %xmm5
movq %rdx, %r9
imulq %r11, %r9
imulq %r14, %r11
vmovups (%r12,%r11), %xmm6
movq %rdx, %r11
imulq %r10, %r11
imulq %r14, %r10
vmovups (%r12,%r10), %xmm7
imulq %r8, %r14
vmovups (%r12,%r14), %xmm8
movq %rdx, %r10
imulq %r8, %r10
movq 0x90(%rsi), %rsi
vmovaps (%rsi,%rdi), %xmm9
vmovaps (%rsi,%r9), %xmm10
movq 0x88(%rsp), %r8
vmovaps (%rsi,%r11), %xmm11
blsrq %r13, %r9
vmovaps (%rsi,%r10), %xmm4
movq %r9, %rdi
subq $0x1, %rdi
jb 0x1c2fdc1
andq %r9, %rdi
movq %r9, %r10
tzcntq %r9, %r9
movl 0x6(%r8,%r9,4), %r9d
imulq %rcx, %r9
movl (%rax,%r9), %r9d
imulq %rdx, %r9
prefetcht0 (%rsi,%r9)
prefetcht0 0x40(%rsi,%r9)
movq %r10, %r9
testq %rdi, %rdi
je 0x1c2fdc1
tzcntq %rdi, %rdi
movl 0x6(%r8,%rdi,4), %edi
imulq %rdi, %rcx
movl (%rax,%rcx), %eax
imulq %rax, %rdx
prefetcht1 (%rsi,%rdx)
prefetcht1 0x40(%rsi,%rdx)
movq 0x78(%rsp), %r11
vmovss (%r11,%r15,4), %xmm0
vinsertps $0x1c, 0x20(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovss 0x60(%r11,%r15,4), %xmm1
vmovss %xmm1, 0x9c(%rsp)
vmulps %xmm31, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm31) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm1
vaddps %xmm2, %xmm9, %xmm2
vfmadd231ps %xmm17, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vfmadd231ps %xmm31, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm31) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) + xmm12
vaddps %xmm3, %xmm5, %xmm13
vfmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm4
vfmadd231ps %xmm31, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm31) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm11, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm11) + xmm4
vfmadd231ps %xmm10, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm8, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm9) + xmm8
vfmadd231ps %xmm31, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm31) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfnmadd231ps %xmm7, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm7) + xmm8
vfmadd231ps %xmm6, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x2bc812(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x2bcc6b(%rip), %xmm16 # 0x1eecb80
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x2c1093(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vdpps $0x7f, %xmm5, %xmm5, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm6, %xmm5, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm11, %xmm11
vmulps %xmm5, %xmm11, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm9, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm8 # xmm8 = -(xmm6 * xmm8) + xmm17
vmulss %xmm6, %xmm8, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm14
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm15
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm13, %xmm6
vsubps %xmm6, %xmm3, %xmm16
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm13, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm13
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x2c1e6a(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm14, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm16, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm15, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm13, %xmm22
vsubps %xmm0, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps (%rbx), %xmm4
vmovaps 0x10(%rbx), %xmm5
vmovaps %xmm1, 0x310(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps 0x20(%rbx), %xmm7
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x300(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm6, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm6) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm6
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x2f0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm7, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm6) + xmm3
vsubps %xmm0, %xmm16, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vmovaps %xmm6, 0x2e0(%rsp)
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm8) + xmm6
vsubps %xmm0, %xmm15, %xmm10
vbroadcastss %xmm10, %xmm8
vshufps $0x55, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2d0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm9) + xmm10
vfmadd231ps %xmm8, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm8) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm8
vshufps $0x55, %xmm11, %xmm11, %xmm9 # xmm9 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2c0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm11, %xmm11
vfmadd231ps %xmm9, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm9) + xmm11
vfmadd231ps %xmm8, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm8) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm8
vshufps $0x55, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2b0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm7, %xmm12, %xmm12
vfmadd231ps %xmm9, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm9) + xmm12
vfmadd231ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm8) + xmm12
vsubps %xmm0, %xmm13, %xmm9
vbroadcastss %xmm9, %xmm0
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2a0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vfmadd231ps %xmm8, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm8) + xmm7
vfmadd231ps %xmm0, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm0) + xmm7
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm23 # xmm23 = xmm3[0],xmm12[0]
vmovlhps %xmm7, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm7[0]
vminps %xmm9, %xmm8, %xmm0
vmaxps %xmm9, %xmm8, %xmm4
vminps %xmm24, %xmm23, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm24, %xmm23, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vminps %xmm5, %xmm0, %xmm0
vshufpd $0x3, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1]
vmaxps %xmm5, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x2c1c68(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm18 # xmm18 = xmm1[0,0]
vmovddup %xmm2, %xmm25 # xmm25 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm10, %xmm10 # xmm10 = xmm10[0,0]
vmovddup %xmm11, %xmm11 # xmm11 = xmm11[0,0]
vmovddup %xmm12, %xmm12 # xmm12 = xmm12[0,0]
vmovddup %xmm7, %xmm2 # xmm2 = xmm7[0,0]
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r14d, %r14d
vmovaps %xmm8, 0xc0(%rsp)
vsubps %xmm8, %xmm9, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm9, 0xb0(%rsp)
vsubps %xmm9, %xmm23, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm23, 0x110(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm14, 0x290(%rsp)
vmovaps %xmm15, 0x280(%rsp)
vsubps %xmm14, %xmm15, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm17, 0x250(%rsp)
vmovaps %xmm21, 0x230(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm20, 0x240(%rsp)
vmovaps %xmm22, 0x220(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm16, 0x270(%rsp)
vmovaps %xmm13, 0x260(%rsp)
vsubps %xmm16, %xmm13, %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm2, %xmm13
vmovaps %xmm1, %xmm16
movq 0x90(%rsp), %rdi
vpbroadcastd %edi, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
movq 0xd8(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vmovsd 0x2bc34f(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x80(%rsp), %r10
vmovaps %xmm18, 0x1a0(%rsp)
vmovaps %xmm25, 0x190(%rsp)
vmovaps %xmm1, 0x60(%rsp)
vmovaps %xmm6, 0x30(%rsp)
vmovaps %xmm10, 0x20(%rsp)
vmovaps %xmm11, 0x10(%rsp)
vmovaps %xmm12, (%rsp)
vmovaps %xmm2, 0x40(%rsp)
vmovaps %ymm29, 0x3e0(%rsp)
vmovaps %ymm28, 0x3c0(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x2bc30e(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps %xmm18, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm18) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2f0a8d(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2e22a5(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2f0a46(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2f0a33(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2f0a4c(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2c0a83(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x32f793(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c30875
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c3089a
movl %r14d, %eax
movl %ecx, 0x1b0(%rsp,%rax,4)
vmovlps %xmm0, 0x320(%rsp,%rax,8)
vmovlps %xmm27, 0x400(%rsp,%rax,8)
incl %r14d
vbroadcastss 0x2c0748(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2f0616(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2f0608(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2c072a(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2f0614(%rip), %xmm21 # 0x1f20ee0
vmovss 0x2bbe3e(%rip), %xmm22 # 0x1eec714
vmovss 0x2c0720(%rip), %xmm23 # 0x1ef1000
vmovss 0x2c1162(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x2bbe20(%rip), %xmm25 # 0x1eec714
vmovaps 0x60(%rsp), %xmm16
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x100(%rsp), %xmm26
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0xe0(%rsp), %xmm29
testl %r14d, %r14d
je 0x1c31aa1
leal -0x1(%r14), %eax
vmovss 0x320(%rsp,%rax,8), %xmm0
vmovss 0x324(%rsp,%rax,8), %xmm1
movl 0x1b0(%rsp,%rax,4), %ecx
vmovsd 0x400(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1b0(%rsp,%rax,4)
cmovel %eax, %r14d
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c31a75
vmovaps %xmm27, %xmm7
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %r14d
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps 0x1a0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm7, 0xa0(%rsp)
vbroadcastss %xmm7, %xmm6
vmovaps %xmm14, 0x50(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2c1415(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2bfe6a(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c30b83
vmovss 0x2c1343(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c30be4
vmovss 0x2c1335(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2bfe3e(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c30be4
vmovss 0x2c1303(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2bfe0c(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c31656
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2bfdd1(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2badf1(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x2bbf47(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c30c5f
jnp 0x1c30c9e
vucomiss %xmm13, %xmm14
jne 0x1c30cb1
jp 0x1c30cb1
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bad9c(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x2bbef2(%rip), %xmm14 # 0x1eecb84
vmovss 0x2bba78(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c30cd2
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0xb0(%rsp), %xmm16
jmp 0x1c30ced
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xc0(%rsp), %xmm15
vmovaps 0xb0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x2bfcc8(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x50(%rsp), %xmm14
jne 0x1c30d13
jnp 0x1c30d7d
vucomiss %xmm9, %xmm10
jne 0x1c30d52
jp 0x1c30d52
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2bace8(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x2bbe3e(%rip), %xmm10 # 0x1eecb84
vmovss 0x2bb9c4(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c30d73
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r13b
vucomiss %xmm8, %xmm7
ja 0x1c315ed
vaddss 0x32c69e(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2bbdca(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm27, %xmm4 # xmm4 = xmm27[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm27, %xmm1 # xmm1 = (xmm27 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm27, %xmm4 # xmm4 = (xmm27 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0xa0(%rsp), %xmm27
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2bbc61(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm28, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm29, %xmm13
vfmadd213ps 0x110(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2eff15(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x32ed33(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x329993(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2efe84(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2efe1d(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c315dd
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x60(%rsp), %xmm16
jbe 0x1c311ee
testb %sil, %sil
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2bfe4e(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm13
vmovaps 0x170(%rsp), %xmm14
je 0x1c31229
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c31229
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2bfdf0(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xc0(%rsp), %xmm9
vmovaps 0xb0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm13
vmovaps 0x170(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c31622
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm12, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c312dd
decq %rax
jne 0x1c3123a
jmp 0x1c31625
vucomiss %xmm30, %xmm0
jb 0x1c31625
vucomiss %xmm0, %xmm22
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x50(%rsp), %xmm14
jb 0x1c315c4
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c315c4
vucomiss %xmm1, %xmm22
jb 0x1c315c4
vmovss 0x8(%rbx), %xmm2
vinsertps $0x1c, 0x18(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2a0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm2
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm6) + xmm2
vsubss %xmm0, %xmm22, %xmm7
vmulss %xmm7, %xmm7, %xmm4
vmulss %xmm4, %xmm7, %xmm3
vmulss %xmm0, %xmm12, %xmm5
vmulss %xmm4, %xmm5, %xmm4
vmulps %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulss %xmm2, %xmm6, %xmm2
vfmadd231ss %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm9) + xmm2
vfmadd231ss %xmm8, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm8) + xmm2
vfmadd231ss %xmm11, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm11) + xmm2
vucomiss 0x9c(%rsp), %xmm2
jb 0x1c315c4
vmovss 0x100(%r11,%r15,4), %xmm8
vucomiss %xmm2, %xmm8
jb 0x1c315c4
vmovss %xmm8, 0x98(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,1,1,1]
vsubps %xmm8, %xmm25, %xmm9
vmulps 0x280(%rsp), %xmm8, %xmm10
vmulps 0x230(%rsp), %xmm8, %xmm11
vmulps 0x220(%rsp), %xmm8, %xmm12
vmulps 0x260(%rsp), %xmm8, %xmm8
vfmadd231ps 0x290(%rsp), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10
vfmadd231ps 0x250(%rsp), %xmm9, %xmm11 # xmm11 = (xmm9 * mem) + xmm11
vfmadd231ps 0x240(%rsp), %xmm9, %xmm12 # xmm12 = (xmm9 * mem) + xmm12
vfmadd231ps 0x270(%rsp), %xmm9, %xmm8 # xmm8 = (xmm9 * mem) + xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm12, %xmm10
vsubps %xmm12, %xmm8, %xmm8
vbroadcastss %xmm0, %xmm11
vmulps %xmm10, %xmm11, %xmm12
vbroadcastss %xmm7, %xmm7
vfmadd231ps %xmm9, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm9) + xmm12
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm10, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm10) + xmm8
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm12, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm12) + xmm8
vmulps %xmm17, %xmm8, %xmm7
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %r12
movl 0x120(%r11,%r15,4), %eax
testl %eax, 0x34(%r12)
je 0x1c315c4
vbroadcastss %xmm6, %xmm6
vmulps 0x1e0(%rsp), %xmm6, %xmm6
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x1f0(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x200(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[1,2,0,3]
vmulps %xmm5, %xmm3, %xmm3
vfmsub231ps %xmm4, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm4) - xmm3
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
jne 0x1c31686
cmpq $0x0, 0x40(%r12)
jne 0x1c31686
vmovss %xmm2, 0x100(%r11,%r15,4)
vextractps $0x1, %xmm3, 0x180(%r11,%r15,4)
vextractps $0x2, %xmm3, 0x1a0(%r11,%r15,4)
vmovss %xmm3, 0x1c0(%r11,%r15,4)
vmovss %xmm0, 0x1e0(%r11,%r15,4)
vmovss %xmm1, 0x200(%r11,%r15,4)
movq 0xd8(%rsp), %rax
movl %eax, 0x220(%r11,%r15,4)
movl %edi, 0x240(%r11,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%r11,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r11,%r15,4)
jmp 0x1c31648
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
jmp 0x1c31648
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x60(%rsp), %xmm16
jmp 0x1c31625
vmovaps 0x60(%rsp), %xmm16
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm13
vmovaps %xmm27, %xmm15
vmovaps 0xa0(%rsp), %xmm27
jmp 0x1c31648
xorl %r13d, %r13d
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm13
vmovaps 0x50(%rsp), %xmm14
testb %r13b, %r13b
jne 0x1c30931
jmp 0x1c31a75
movb $0x1, %r13b
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps (%rsp), %xmm12
vmovaps 0x40(%rsp), %xmm13
vmovaps %xmm27, %xmm15
vmovaps 0xa0(%rsp), %xmm27
jmp 0x1c31642
movq 0x8(%r10), %rax
vbroadcastss %xmm0, %ymm1
vbroadcastss 0x2e106c(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm0
vpermps %ymm3, %ymm4, %ymm4
vbroadcastss 0x2ef831(%rip), %ymm5 # 0x1f20edc
vpermps %ymm3, %ymm5, %ymm5
vbroadcastss %xmm3, %ymm3
vmovaps %ymm4, 0x420(%rsp)
vmovaps %ymm5, 0x440(%rsp)
vmovaps %ymm3, 0x460(%rsp)
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
leaq 0x500(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x500(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
vmovss %xmm2, 0x100(%r11,%r15,4)
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rax
movq %rax, 0x130(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x138(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x140(%rsp)
movq %r11, 0x148(%rsp)
leaq 0x420(%rsp), %rax
movq %rax, 0x150(%rsp)
movl $0x8, 0x158(%rsp)
movq 0x40(%r12), %rax
testq %rax, %rax
vmovaps %xmm15, 0x160(%rsp)
je 0x1c31897
leaq 0x130(%rsp), %rdi
movq %r9, 0x128(%rsp)
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm14
vmovaps 0xa0(%rsp), %xmm27
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0x100(%rsp), %xmm26
vmovaps 0x40(%rsp), %xmm13
vmovaps (%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x60(%rsp), %xmm16
movq 0x90(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c0214(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2bf7be(%rip), %xmm23 # 0x1ef1000
vmovss 0x2baec8(%rip), %xmm22 # 0x1eec714
vmovss 0x2ef68a(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2bf78c(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2ef656(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2ef650(%rip), %xmm18 # 0x1f20ec4
vxorps %xmm31, %xmm31, %xmm31
movq 0x128(%rsp), %r9
movq 0x78(%rsp), %r11
movq 0x80(%rsp), %r10
movq 0x88(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c31a49
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c319a3
testb $0x2, (%rcx)
jne 0x1c318cd
testb $0x40, 0x3e(%r12)
je 0x1c319a3
leaq 0x130(%rsp), %rdi
movq %r9, %r12
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm14
vmovaps 0xa0(%rsp), %xmm27
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0x100(%rsp), %xmm26
vmovaps 0x40(%rsp), %xmm13
vmovaps (%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm6
vmovaps 0x60(%rsp), %xmm16
movq 0x90(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2c0103(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2bf6ad(%rip), %xmm23 # 0x1ef1000
vmovss 0x2badb7(%rip), %xmm22 # 0x1eec714
vmovss 0x2ef579(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2bf67b(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2ef545(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2ef53f(%rip), %xmm18 # 0x1f20ec4
vxorps %xmm31, %xmm31, %xmm31
movq %r12, %r9
movq 0x78(%rsp), %r11
movq 0x80(%rsp), %r10
movq 0x88(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c31a49
vptestmd %ymm0, %ymm0, %k1
movq 0x148(%rsp), %rax
movq 0x150(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c31a5c
vmovd 0x98(%rsp), %xmm0
vmovd %xmm0, 0x100(%r11,%r15,4)
vbroadcastss 0x2bf586(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2baca4(%rip), %xmm25 # 0x1eec714
jmp 0x1c31648
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x190(%rsp), %xmm25
vmovaps 0x3e0(%rsp), %ymm29
vmovaps 0x3c0(%rsp), %ymm28
jmp 0x1c303f0
vmovaps 0x3a0(%rsp), %ymm0
vcmpleps 0x100(%r11,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r9d
jne 0x1c2fc99
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 8>::occluded_n<embree::avx512::OrientedCurve1IntersectorK<embree::BezierCurveT, 8>, embree::avx512::Occluded1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x580, %rsp # imm = 0x580
movq %r8, %r10
movq %rdx, %r9
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%r9,4), %xmm1
vmovss 0x80(%r11,%r9,4), %xmm2
vinsertps $0x10, 0x20(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x88(%rsp)
vinsertps $0x20, 0xc0(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2e0b1f(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2ef2e8(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2ef257(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x2bf36e(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2baa5c(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r11,%r9,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2ee16c(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r11,%r9,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2ee147(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x328b43(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x3c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0x1f(%rsp)
je 0x1c33c0f
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
leaq (%r9,%r9,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x20, %r13
movl $0x1, %eax
shlxl %r9d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x2bf1a7(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2ef075(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2ef067(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq 0x88(%rsp), %rdi
movq %r10, 0xb8(%rsp)
movq %r9, 0xb0(%rsp)
movq %r11, 0xa8(%rsp)
tzcntq %r12, %rax
movl 0x2(%r10), %edx
movl 0x6(%r10,%rax,4), %eax
movq (%rdi), %rcx
movq 0x1e8(%rcx), %rcx
movq %rdx, 0xc0(%rsp)
movq (%rcx,%rdx,8), %rdi
movq 0x58(%rdi), %rcx
movq 0x68(%rdi), %rdx
movq %rdx, %rsi
imulq %rax, %rsi
movl (%rcx,%rsi), %r10d
movq 0xa0(%rdi), %rsi
movq %rsi, %r8
imulq %r10, %r8
leaq 0x1(%r10), %rbx
leaq 0x2(%r10), %r11
leaq 0x3(%r10), %r9
movq 0xd8(%rdi), %r14
imulq %r14, %r10
movq 0xc8(%rdi), %r15
vmovups (%r15,%r10), %xmm5
movq %rsi, %r10
imulq %rbx, %r10
imulq %r14, %rbx
vmovups (%r15,%rbx), %xmm6
movq %rsi, %rbx
imulq %r11, %rbx
imulq %r14, %r11
vmovups (%r15,%r11), %xmm7
imulq %r9, %r14
vmovups (%r15,%r14), %xmm8
movq %rsi, %r11
imulq %r9, %r11
movq 0x90(%rdi), %rdi
vmovaps (%rdi,%r8), %xmm9
vmovaps (%rdi,%r10), %xmm10
movq 0xb8(%rsp), %r10
vmovaps (%rdi,%rbx), %xmm11
blsrq %r12, %r12
vmovaps (%rdi,%r11), %xmm4
movq %r12, %r8
subq $0x1, %r8
jb 0x1c31f96
andq %r12, %r8
tzcntq %r12, %r9
movl 0x6(%r10,%r9,4), %r9d
imulq %rdx, %r9
movl (%rcx,%r9), %r9d
imulq %rsi, %r9
prefetcht0 (%rdi,%r9)
prefetcht0 0x40(%rdi,%r9)
testq %r8, %r8
je 0x1c31f96
tzcntq %r8, %r8
movl 0x6(%r10,%r8,4), %r8d
imulq %r8, %rdx
movl (%rcx,%rdx), %ecx
imulq %rcx, %rsi
prefetcht1 (%rdi,%rsi)
prefetcht1 0x40(%rdi,%rsi)
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r11
vmovss (%r11,%r9,4), %xmm0
vinsertps $0x1c, 0x20(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmulps %xmm31, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm31) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm2) + xmm1
vaddps %xmm2, %xmm9, %xmm2
vfmadd231ps %xmm17, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vfmadd231ps %xmm31, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm31) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) + xmm12
vaddps %xmm3, %xmm5, %xmm13
vfmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm3) + xmm4
vfmadd231ps %xmm31, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm31) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm11, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm11) + xmm4
vfmadd231ps %xmm10, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm8, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm9) + xmm8
vfmadd231ps %xmm31, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm31) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfnmadd231ps %xmm7, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm7) + xmm8
vfmadd231ps %xmm6, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x2ba642(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x2baa9b(%rip), %xmm16 # 0x1eecb80
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x2beec3(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vdpps $0x7f, %xmm5, %xmm5, %xmm9
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vmovss %xmm9, %xmm31, %xmm8 # xmm8 = xmm9[0],xmm31[1,2,3]
vrsqrt14ss %xmm8, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm9, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm11
vdpps $0x7f, %xmm6, %xmm5, %xmm13
vbroadcastss %xmm9, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm13, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm8, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm9 # xmm9 = -(xmm6 * xmm9) + xmm17
vmulss %xmm6, %xmm9, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm13
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm11, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm11, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x2bfc9a(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm13, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm13, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x330(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%r13), %xmm4
vmovaps 0x10(%r13), %xmm5
vmovaps 0x20(%r13), %xmm6
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x320(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm7
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x310(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm6, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm7) + xmm3
vsubps %xmm0, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm7
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x300(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) + xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vsubps %xmm0, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm7
vshufps $0x55, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2f0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm8, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm8) + xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm7
vshufps $0x55, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2e0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm11
vfmadd231ps %xmm8, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm8) + xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm7
vshufps $0x55, %xmm12, %xmm12, %xmm8 # xmm8 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2d0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm6, %xmm12, %xmm12
vfmadd231ps %xmm8, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm8) + xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vsubps %xmm0, %xmm16, %xmm8
vbroadcastss %xmm8, %xmm0
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x2c0(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm6
vfmadd231ps %xmm7, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm7) + xmm6
vfmadd231ps %xmm0, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm0) + xmm6
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm24 # xmm24 = xmm3[0],xmm12[0]
vmovlhps %xmm6, %xmm9, %xmm25 # xmm25 = xmm9[0],xmm6[0]
vminps %xmm23, %xmm8, %xmm0
vmaxps %xmm23, %xmm8, %xmm4
vminps %xmm25, %xmm24, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm25, %xmm24, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vshufpd $0x3, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,1]
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm7, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x2bfa8f(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm7 # xmm7 = xmm1[0,0]
vmovddup %xmm2, %xmm18 # xmm18 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm9, %xmm2 # xmm2 = xmm9[0,0]
vmovddup %xmm10, %xmm3 # xmm3 = xmm10[0,0]
vmovddup %xmm11, %xmm9 # xmm9 = xmm11[0,0]
vmovddup %xmm12, %xmm10 # xmm10 = xmm12[0,0]
vmovddup %xmm6, %xmm11 # xmm11 = xmm6[0,0]
vmovaps %xmm0, 0x160(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x3a0(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
vmovss 0x60(%r11,%r9,4), %xmm0
vmovss %xmm0, 0xcc(%rsp)
vmovaps %xmm8, 0xf0(%rsp)
vsubps %xmm8, %xmm23, %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmovaps %xmm23, 0xe0(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm24, 0x130(%rsp)
vmovaps %xmm25, 0x170(%rsp)
vsubps %xmm24, %xmm25, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm13, 0x2b0(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps %xmm3, %xmm13
vmovaps %xmm2, %xmm12
vmovaps %xmm17, 0x270(%rsp)
vmovaps %xmm21, 0x250(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm20, 0x260(%rsp)
vmovaps %xmm22, 0x240(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm15, 0x290(%rsp)
vmovaps %xmm16, 0x280(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm1, %xmm16
movq 0xc0(%rsp), %r8
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
vmovsd 0x2ba169(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x88(%rsp), %rdi
vmovaps %xmm7, 0x190(%rsp)
vmovaps %xmm18, 0x180(%rsp)
vmovaps %xmm1, 0x90(%rsp)
vmovaps %xmm2, 0x60(%rsp)
vmovaps %xmm3, 0x50(%rsp)
vmovaps %xmm9, 0x40(%rsp)
vmovaps %xmm10, 0x30(%rsp)
vmovaps %xmm11, 0x20(%rsp)
vmovaps %ymm29, 0x400(%rsp)
vmovaps %ymm28, 0x3e0(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x2ba123(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm18, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm18) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2ee8a3(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2e00bb(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2ee85c(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2ee849(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2ee862(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2be899(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x32d5a9(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c32a5f
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c32a82
movl %ebx, %eax
movl %ecx, 0x1a0(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm27, 0x420(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x2be560(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2ee42e(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2ee420(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2be542(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2ee42c(%rip), %xmm21 # 0x1f20ee0
vmovss 0x2b9c56(%rip), %xmm22 # 0x1eec714
vmovss 0x2be538(%rip), %xmm23 # 0x1ef1000
vmovss 0x2bef7a(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x2b9c38(%rip), %xmm25 # 0x1eec714
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x120(%rsp), %xmm26
vmovaps 0x110(%rsp), %xmm28
vmovaps 0x100(%rsp), %xmm29
testl %ebx, %ebx
je 0x1c33be5
leal -0x1(%rbx), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x1a0(%rsp,%rax,4), %ecx
vmovsd 0x420(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1a0(%rsp,%rax,4)
cmovel %eax, %ebx
vxorps %xmm2, %xmm2, %xmm2
vcvtsi2ss %rdx, %xmm2, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c33bb8
vmovaps %xmm27, %xmm6
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x180(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0xd0(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x70(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2bf230(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2bdc85(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c32d68
vmovss 0x2bf15e(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c32dc9
vmovss 0x2bf150(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2bdc59(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c32dc9
vmovss 0x2bf11e(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2bdc27(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c33b65
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2bdbec(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2b8c0c(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x2b9d62(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c32e44
jnp 0x1c32e83
vucomiss %xmm13, %xmm14
jne 0x1c32e96
jp 0x1c32e96
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2b8bb7(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x2b9d0d(%rip), %xmm14 # 0x1eecb84
vmovss 0x2b9893(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c32eb7
vmovaps 0xf0(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm16
jmp 0x1c32ed2
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xf0(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x2bdae3(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x70(%rsp), %xmm14
jne 0x1c32ef8
jnp 0x1c32f62
vucomiss %xmm9, %xmm10
jne 0x1c32f37
jp 0x1c32f37
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2b8b03(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x2b9c59(%rip), %xmm10 # 0x1eecb84
vmovss 0x2b97df(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c32f58
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r15b
vucomiss %xmm8, %xmm7
ja 0x1c33afd
vaddss 0x32a4b9(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2b9be5(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm27, %xmm4 # xmm4 = xmm27[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm27, %xmm1 # xmm1 = (xmm27 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm27, %xmm4 # xmm4 = (xmm27 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0xd0(%rsp), %xmm27
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2b9a7c(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm28, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm29, %xmm13
vfmadd213ps 0x130(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2edd30(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x32cb4e(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x3277ae(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2edc9f(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2edc38(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c33aed
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x90(%rsp), %xmm16
jbe 0x1c333d3
testb %sil, %sil
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2bdc69(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xf0(%rsp), %xmm9
vmovaps 0xe0(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
je 0x1c3340e
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c3340e
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2bdc0b(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xf0(%rsp), %xmm9
vmovaps 0xe0(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c33b33
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c334c2
decq %rax
jne 0x1c3341f
jmp 0x1c33b36
vucomiss %xmm30, %xmm0
jb 0x1c33b36
vucomiss %xmm0, %xmm22
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
jb 0x1c335f2
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c335f2
vucomiss %xmm1, %xmm22
jb 0x1c335f2
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x330(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm7
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm7 # xmm7 = (xmm10 * xmm3) + xmm7
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm9, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm7, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm7) + xmm1
vucomiss 0xcc(%rsp), %xmm1
jb 0x1c335f2
vmovss 0x100(%r11,%r9,4), %xmm7
vucomiss %xmm1, %xmm7
jae 0x1c33609
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jmp 0x1c33b5a
vmovss %xmm7, 0xc8(%rsp)
movq %r12, 0x148(%rsp)
movl %r14d, %r12d
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm25, %xmm8
vmulps 0x2a0(%rsp), %xmm7, %xmm9
vmulps 0x250(%rsp), %xmm7, %xmm10
vmulps 0x240(%rsp), %xmm7, %xmm11
vmulps 0x280(%rsp), %xmm7, %xmm7
vfmadd231ps 0x2b0(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x270(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x290(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps %xmm17, %xmm7, %xmm6
movq (%rdi), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r14
movl 0x120(%r11,%r9,4), %eax
testl %eax, 0x34(%r14)
je 0x1c33ac9
movq 0x10(%rdi), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jne 0x1c336f9
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c33add
vbroadcastss %xmm5, %xmm5
vmulps 0x200(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x210(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x220(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x230(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%rdi), %rax
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2defab(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm0
vpermps %ymm2, %ymm4, %ymm4
vbroadcastss 0x2ed770(%rip), %ymm5 # 0x1f20edc
vpermps %ymm2, %ymm5, %ymm5
vbroadcastss %xmm2, %ymm2
vmovaps %ymm4, 0x440(%rsp)
vmovaps %ymm5, 0x460(%rsp)
vmovaps %ymm2, 0x480(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x500(%rsp)
leaq 0x520(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x540(%rsp)
vmovss %xmm1, 0x100(%r11,%r9,4)
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
leaq 0x1e0(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x1b8(%rsp)
movq 0x8(%rdi), %rax
movq %rax, 0x1c0(%rsp)
movq %r11, 0x1c8(%rsp)
leaq 0x440(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x8, 0x1d8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm15, 0x150(%rsp)
je 0x1c33966
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0x100(%rsp), %xmm29
vmovaps 0x110(%rsp), %xmm28
vmovaps 0x120(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x2b8e24(%rip), %xmm25 # 0x1eec714
vmovss 0x2be152(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2bd6fc(%rip), %xmm23 # 0x1ef1000
vmovss 0x2b8e06(%rip), %xmm22 # 0x1eec714
vmovss 0x2ed5c8(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2bd6ca(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2ed594(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2ed58e(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2bd6ac(%rip), %xmm17 # 0x1ef0fec
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c33b96
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c33a8b
testb $0x2, (%rcx)
jne 0x1c3399b
testb $0x40, 0x3e(%r14)
je 0x1c33a8b
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0x100(%rsp), %xmm29
vmovaps 0x110(%rsp), %xmm28
vmovaps 0x120(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x2b8cff(%rip), %xmm25 # 0x1eec714
vmovss 0x2be02d(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2bd5d7(%rip), %xmm23 # 0x1ef1000
vmovss 0x2b8ce1(%rip), %xmm22 # 0x1eec714
vmovss 0x2ed4a3(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2bd5a5(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2ed46f(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2ed469(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2bd587(%rip), %xmm17 # 0x1ef0fec
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x1c8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2b90d0(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %al
jmp 0x1c33b98
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
movl %r12d, %r14d
orb %al, %r14b
movq 0x148(%rsp), %r12
jmp 0x1c33b5a
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x90(%rsp), %xmm16
jmp 0x1c33b36
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps %xmm27, %xmm15
vmovaps 0xd0(%rsp), %xmm27
jmp 0x1c33b5a
xorl %r15d, %r15d
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm14
testb %r15b, %r15b
jne 0x1c32b1a
jmp 0x1c33bb8
movb $0x1, %r15b
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps %xmm27, %xmm15
vmovaps 0xd0(%rsp), %xmm27
jmp 0x1c33b54
xorl %eax, %eax
testb %al, %al
jne 0x1c33add
vmovss 0xc8(%rsp), %xmm0
vmovss %xmm0, 0x100(%r11,%r9,4)
jmp 0x1c33add
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x190(%rsp), %xmm7
vmovaps 0x180(%rsp), %xmm18
vmovaps 0x400(%rsp), %ymm29
vmovaps 0x3e0(%rsp), %ymm28
jmp 0x1c325db
testb $0x1, %r14b
jne 0x1c33c0f
vmovaps 0x3c0(%rsp), %ymm0
vcmpleps 0x100(%r11,%r9,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r12d
setne 0x1f(%rsp)
jne 0x1c31e7f
movb 0x1f(%rsp), %al
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNvIntersector1<8>::occluded_t<embree::avx512::SweepCurve1Intersector1<embree::BSplineCurveT>, embree::avx512::Occluded1Epilog1<true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersector1<M>::intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x8c0, %rsp # imm = 0x8C0
movq %rcx, %r9
movq %rsi, %r15
movzbl 0x1(%rcx), %ecx
leaq (%rcx,%rcx,4), %r10
leaq (%r10,%r10,4), %rax
vbroadcastss 0x12(%r9,%rax), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rax), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%r9,%rcx,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rcx,%rcx,2), %rsi
vpmovsxbd 0x6(%r9,%rsi,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rcx,%r10,2), %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rcx, %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rcx,%rcx,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm0
addq %rcx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %r10d
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x2d7daa(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x2e6572(%rip), %ymm17 # 0x1f20edc
vpermps %ymm2, %ymm17, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm17, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x2e64e5(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x2b65fa(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2b1ce4(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r8
subq %rcx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rcx,%rcx), %rdi
addq %rcx, %r10
shlq $0x3, %rsi
subq %rcx, %rsi
movl %ecx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r10), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%r15){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2e53f9(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2e53d5(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %ecx, %ymm1
vpcmpgtd 0x31fdd1(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x540(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r8b
je 0x1c3d01d
movq %rdx, 0x38(%rsp)
leaq (%r9,%rax), %r10
addq $0x6, %r10
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r11d
addq $0x10, %r10
movq %r9, 0x1a8(%rsp)
movq %r10, 0x1a0(%rsp)
tzcntq %r11, %rax
blsrq %r11, %r11
movl 0x6(%r9,%rax,4), %ebx
shll $0x6, %eax
movq %r11, %rcx
vmovups (%r10,%rax), %xmm0
subq $0x1, %rcx
jb 0x1c3abe9
andq %r11, %rcx
tzcntq %r11, %rdx
shll $0x6, %edx
prefetcht0 (%r10,%rdx)
prefetcht0 0x40(%r10,%rdx)
testq %rcx, %rcx
je 0x1c3abe9
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r10,%rcx)
prefetcht1 0x40(%r10,%rcx)
vmovups 0x10(%r10,%rax), %xmm2
vmovups 0x20(%r10,%rax), %xmm3
vmovups 0x30(%r10,%rax), %xmm4
vaddps %xmm2, %xmm0, %xmm1
vaddps %xmm3, %xmm1, %xmm1
vaddps %xmm4, %xmm1, %xmm5
vmovaps (%r15), %xmm6
vmovaps 0x10(%r15), %xmm1
vmulps 0x2e26dd(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm6, %xmm5, %xmm5
vdpps $0x7f, %xmm1, %xmm5, %xmm5
movl 0x2(%r9), %eax
movq %rax, 0x98(%rsp)
vdpps $0x7f, %xmm1, %xmm1, %xmm7
vxorps %xmm10, %xmm10, %xmm10
vmovss %xmm7, %xmm10, %xmm8 # xmm8 = xmm7[0],xmm10[1,2,3]
vrcp14ss %xmm8, %xmm10, %xmm8
vmovaps %xmm8, %xmm9
vfnmadd213ss 0x2b63a0(%rip), %xmm7, %xmm9 # xmm9 = -(xmm7 * xmm9) + mem
vmulss %xmm9, %xmm8, %xmm7
vmulss %xmm7, %xmm5, %xmm5
vmovaps %xmm5, 0x2f0(%rsp)
vbroadcastss %xmm5, %ymm5
vmovaps %ymm5, 0x1c0(%rsp)
vfmadd231ps %xmm5, %xmm1, %xmm6 # xmm6 = (xmm1 * xmm5) + xmm6
vblendps $0x8, %xmm10, %xmm6, %xmm5 # xmm5 = xmm6[0,1,2],xmm10[3]
vsubps %xmm5, %xmm0, %xmm6
vsubps %xmm5, %xmm3, %xmm7
vbroadcastss 0x10(%r15), %ymm9
vbroadcastss 0x14(%r15), %ymm10
vbroadcastss 0x18(%r15), %ymm0
vmovaps %ymm0, 0x440(%rsp)
vsubps %xmm5, %xmm2, %xmm8
vsubps %xmm5, %xmm4, %xmm4
vbroadcastss %xmm6, %ymm18
vbroadcastss 0x2d7a47(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm16
vbroadcastss 0x2e6210(%rip), %ymm2 # 0x1f20edc
vpermps %ymm6, %ymm2, %ymm17
vbroadcastss 0x2e61fd(%rip), %ymm3 # 0x1f20ed8
vmovaps %ymm6, 0x280(%rsp)
vpermps %ymm6, %ymm3, %ymm5
vmovaps %ymm5, 0x760(%rsp)
vbroadcastss %xmm8, %ymm19
vpermps %ymm8, %ymm0, %ymm20
vpermps %ymm8, %ymm2, %ymm22
vmovaps %ymm8, 0x240(%rsp)
vpermps %ymm8, %ymm3, %ymm23
vbroadcastss %xmm7, %ymm24
vpermps %ymm7, %ymm0, %ymm25
vpermps %ymm7, %ymm2, %ymm27
vmovaps %ymm7, 0x260(%rsp)
vpermps %ymm7, %ymm3, %ymm29
vbroadcastss %xmm4, %ymm31
vpermps %ymm4, %ymm0, %ymm21
vpermps %ymm4, %ymm2, %ymm6
vmovaps %ymm4, 0x220(%rsp)
vpermps %ymm4, %ymm3, %ymm30
vmulps %xmm1, %xmm1, %xmm0
vpermps %ymm0, %ymm2, %ymm2
vmovaps %ymm10, 0x460(%rsp)
vfmadd231ps %ymm10, %ymm10, %ymm2 # ymm2 = (ymm10 * ymm10) + ymm2
vmovaps %ymm9, 0x480(%rsp)
vfmadd231ps %ymm9, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm9) + ymm2
vandps %ymm28, %ymm2, %ymm0
vmovaps %ymm0, 0x420(%rsp)
movl $0x1, %eax
movq %rax, 0xf8(%rsp)
xorl %r14d, %r14d
xorl %r13d, %r13d
vmovsd 0x2b194d(%rip), %xmm1 # 0x1eec6f0
vbroadcastss 0x2b1968(%rip), %ymm3 # 0x1eec714
vmovaps %ymm2, 0x740(%rsp)
vmovaps %ymm18, 0x720(%rsp)
vmovaps %ymm16, 0x700(%rsp)
vmovaps %ymm17, 0x6e0(%rsp)
vmovaps %ymm19, 0x6c0(%rsp)
vmovaps %ymm20, 0x6a0(%rsp)
vmovaps %ymm22, 0x680(%rsp)
vmovaps %ymm23, 0x660(%rsp)
vmovaps %ymm24, 0x640(%rsp)
vmovaps %ymm25, 0x620(%rsp)
vmovaps %ymm27, 0x600(%rsp)
vmovaps %ymm29, 0x5e0(%rsp)
vmovaps %ymm31, 0x5c0(%rsp)
vmovaps %ymm21, 0x5a0(%rsp)
vmovaps %ymm6, 0x580(%rsp)
vmovaps %ymm30, 0x560(%rsp)
vmovshdup %xmm1, %xmm0 # xmm0 = xmm1[1,1,3,3]
vsubss %xmm1, %xmm0, %xmm0
vmulss 0x2e6092(%rip), %xmm0, %xmm2 # 0x1f20ed0
vmovaps %xmm2, 0x60(%rsp)
vmovaps %xmm1, 0x2d0(%rsp)
vbroadcastss %xmm1, %ymm4
vbroadcastss %xmm0, %ymm0
vmovaps %ymm4, 0x40(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vfmadd231ps 0x2e60b1(%rip), %ymm0, %ymm4 # ymm4 = (ymm0 * mem) + ymm4
vsubps %ymm4, %ymm3, %ymm7
vmulps %ymm7, %ymm7, %ymm8
vmulps %ymm7, %ymm8, %ymm0
vbroadcastss 0x2b1d07(%rip), %ymm26 # 0x1eecb8c
vmulps %ymm26, %ymm0, %ymm1
vmulps %ymm4, %ymm4, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm7, %ymm4, %ymm10
vmulps %ymm7, %ymm10, %ymm5
vbroadcastss 0x2b6154(%rip), %ymm14 # 0x1ef0ffc
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm10, %ymm12
vbroadcastss 0x2b613b(%rip), %ymm15 # 0x1ef0ff4
vmulps %ymm15, %ymm12, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm26, %ymm2, %ymm11
vaddps %ymm0, %ymm11, %ymm11
vmulps %ymm14, %ymm12, %ymm12
vmulps %ymm5, %ymm15, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vbroadcastss 0x2b6115(%rip), %ymm14 # 0x1ef1000
vmulps %ymm0, %ymm14, %ymm11
vmulps %ymm1, %ymm14, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm2, %ymm14, %ymm5
vmulps %ymm5, %ymm31, %ymm2
vmulps %ymm5, %ymm21, %ymm28
vmulps %ymm5, %ymm6, %ymm1
vmulps %ymm5, %ymm30, %ymm5
vfmadd231ps %ymm24, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm24) + ymm2
vfmadd231ps %ymm25, %ymm13, %ymm28 # ymm28 = (ymm13 * ymm25) + ymm28
vfmadd231ps %ymm27, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm27) + ymm1
vfmadd231ps %ymm13, %ymm29, %ymm5 # ymm5 = (ymm29 * ymm13) + ymm5
vfmadd231ps %ymm19, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm19) + ymm2
vfmadd231ps %ymm20, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm20) + ymm28
vfmadd231ps %ymm22, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm22) + ymm1
vfmadd231ps %ymm12, %ymm23, %ymm5 # ymm5 = (ymm23 * ymm12) + ymm5
vfmadd231ps %ymm18, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm18) + ymm2
vfmadd231ps %ymm16, %ymm11, %ymm28 # ymm28 = (ymm11 * ymm16) + ymm28
vfmadd231ps %ymm17, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm17) + ymm1
vmovaps 0x760(%rsp), %ymm0
vfmadd231ps %ymm11, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm11) + ymm5
vbroadcastss 0x2e5f55(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm7, %ymm11
vmulps %ymm7, %ymm11, %ymm7
vxorps %ymm26, %ymm4, %ymm11
vmulps %ymm4, %ymm11, %ymm4
vmulps 0x2b1c03(%rip){1to8}, %ymm10, %ymm10 # 0x1eecb8c
vsubps %ymm10, %ymm4, %ymm4
vaddps %ymm10, %ymm8, %ymm8
vbroadcastss 0x2b1be4(%rip), %ymm10 # 0x1eecb80
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm4
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm31, %ymm10
vmulps %ymm9, %ymm21, %ymm12
vmulps %ymm6, %ymm9, %ymm13
vmulps %ymm9, %ymm30, %ymm9
vfmadd231ps %ymm24, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm24) + ymm10
vfmadd231ps %ymm25, %ymm8, %ymm12 # ymm12 = (ymm8 * ymm25) + ymm12
vfmadd231ps %ymm27, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm27) + ymm13
vfmadd231ps %ymm8, %ymm29, %ymm9 # ymm9 = (ymm29 * ymm8) + ymm9
vfmadd231ps %ymm19, %ymm4, %ymm10 # ymm10 = (ymm4 * ymm19) + ymm10
vfmadd231ps %ymm20, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm20) + ymm12
vfmadd231ps %ymm22, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm22) + ymm13
vfmadd231ps %ymm4, %ymm23, %ymm9 # ymm9 = (ymm23 * ymm4) + ymm9
vfmadd231ps %ymm18, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm18) + ymm10
vfmadd231ps %ymm16, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm16) + ymm12
vfmadd231ps %ymm17, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm17) + ymm13
vfmadd231ps %ymm7, %ymm0, %ymm9 # ymm9 = (ymm0 * ymm7) + ymm9
vbroadcastss 0x60(%rsp), %ymm4
vmulps %ymm4, %ymm10, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vmulps %ymm4, %ymm13, %ymm13
vmulps %ymm4, %ymm9, %ymm6
vmovaps %ymm2, %ymm8
vmovaps 0x324cf2(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm28, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm1, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm5, %ymm6, %ymm4
vmaxps %ymm4, %ymm5, %ymm14
vminps %ymm4, %ymm5, %ymm4
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm2, %ymm8, %ymm7
vsubps %ymm28, %ymm9, %ymm6
vsubps %ymm1, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm3, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm3
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm0, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm0, %ymm24 # ymm24 = (ymm0 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm4, %ymm4
vsubps %ymm18, %ymm4, %ymm4
vmulps 0x2b57c3(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2b57bd(%rip){1to8}, %ymm4, %ymm4 # 0x1ef0944
vmovaps %ymm4, 0x60(%rsp)
vmulps %ymm14, %ymm14, %ymm4
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2b157a(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2b155b(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm2, %ymm31, %ymm27
vmovaps %ymm28, 0xa0(%rsp)
vsubps %ymm28, %ymm31, %ymm28
vmovaps %ymm1, 0x1e0(%rsp)
vsubps %ymm1, %ymm31, %ymm29
vmovaps 0x440(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vmovaps 0x460(%rsp), %ymm21
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x480(%rsp), %ymm1
vfmadd231ps %ymm27, %ymm1, %ymm22 # ymm22 = (ymm1 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm1, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm1) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm1
vmovaps 0x740(%rsp), %ymm16
vsubps %ymm1, %ymm16, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm21
vsubps %ymm4, %ymm21, %ymm4
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2b18f3(%rip){1to8}, %ymm15, %ymm24 # 0x1eecb8c
vmulps %ymm4, %ymm24, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
vmovaps %ymm1, 0x2a0(%rsp)
je 0x1c3b3a2
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vmovaps %ymm3, %ymm16
vrcp14ps %ymm31, %ymm3
vfnmadd213ps %ymm16, %ymm3, %ymm31 # ymm31 = -(ymm3 * ymm31) + ymm16
vfmadd132ps %ymm3, %ymm3, %ymm31 # ymm31 = (ymm31 * ymm3) + ymm3
vxorps %ymm26, %ymm22, %ymm3
vsubps %ymm30, %ymm3, %ymm3
vmulps %ymm31, %ymm3, %ymm3
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm30
vmovaps %ymm17, %ymm31
vfmadd213ps %ymm18, %ymm3, %ymm31 # ymm31 = (ymm3 * ymm31) + ymm18
vmulps %ymm31, %ymm14, %ymm31
vmovaps %ymm31, 0x3e0(%rsp)
vmovaps %ymm17, %ymm31
vfmadd213ps %ymm18, %ymm30, %ymm31 # ymm31 = (ymm30 * ymm31) + ymm18
vmulps %ymm31, %ymm14, %ymm31
vmovaps %ymm31, 0x3c0(%rsp)
vbroadcastss 0x2b06dd(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm3, %ymm16, %ymm31 {%k1}
vbroadcastss 0x2b1832(%rip), %ymm3 # 0x1eecb84
vblendmps %ymm30, %ymm3, %ymm30 {%k1}
vbroadcastss 0x2e5b62(%rip), %ymm23 # 0x1f20ec4
vandps %ymm23, %ymm1, %ymm3
vmovaps 0x420(%rsp), %ymm16
vmaxps %ymm3, %ymm16, %ymm3
vmulps 0x2b6b34(%rip){1to8}, %ymm3, %ymm3 # 0x1ef1eb4
vandps %ymm23, %ymm15, %ymm23
vcmpltps %ymm3, %ymm23, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c3cfa8
vbroadcastss 0x2b1374(%rip), %ymm3 # 0x1eec714
jmp 0x1c3b3b6
vbroadcastss 0x2b0674(%rip), %ymm31 # 0x1eeba20
vbroadcastss 0x2b17ce(%rip), %ymm30 # 0x1eecb84
andb $0x7f, %al
je 0x1c3b7a7
vmovaps %ymm21, 0x140(%rsp)
vmovaps %ymm3, %ymm16
vmovss 0xc(%r15), %xmm3
vmovss 0x20(%r15), %xmm4
vmovaps 0x2f0(%rsp), %xmm23
vsubss %xmm23, %xmm3, %xmm3
vbroadcastss %xmm3, %ymm3
vmaxps %ymm31, %ymm3, %ymm3
vsubss %xmm23, %xmm4, %xmm4
vbroadcastss %xmm4, %ymm4
vminps %ymm30, %ymm4, %ymm4
vmulps %ymm29, %ymm13, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x440(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x460(%rsp), %ymm31
vfmadd231ps %ymm12, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm12) + ymm13
vmovaps 0x480(%rsp), %ymm30
vfmadd231ps %ymm11, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm11) + ymm13
vbroadcastss 0x2e5a7c(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2b5b90(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2e5a57(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm26, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2b16dc(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm3, %ymm3
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2b0558(%rip), %ymm23 # 0x1eeba20
vmovaps %ymm23, %ymm11 {%k1}
vminps %ymm11, %ymm4, %ymm4
vxorps %xmm13, %xmm13, %xmm13
vsubps %ymm8, %ymm13, %ymm8
vsubps %ymm9, %ymm13, %ymm9
vsubps %ymm10, %ymm13, %ymm10
vmulps %ymm0, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm0, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm30, %ymm8 # ymm8 = -(ymm30 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm26, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm26, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm3, %ymm0
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm23, %ymm9 {%k1}
vminps %ymm9, %ymm4, %ymm8
vmovaps %ymm0, 0x360(%rsp)
vcmpleps %ymm8, %ymm0, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c3b7b6
vmovaps 0x3e0(%rsp), %ymm3
vmaxps 0x60(%rsp), %ymm13, %ymm4
vminps %ymm16, %ymm3, %ymm3
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm3, %ymm3
vmovaps 0x3c0(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2e5985(%rip), %ymm11 # 0x1f20f40
vaddps %ymm3, %ymm11, %ymm3
vbroadcastss 0x2e2ef0(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm3, %ymm12, %ymm3
vmovaps 0x40(%rsp), %ymm0
vmovaps 0x200(%rsp), %ymm1
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x3e0(%rsp)
vmaxps %ymm10, %ymm9, %ymm3
vaddps %ymm3, %ymm11, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x3c0(%rsp)
vmulps %ymm4, %ymm4, %ymm3
vmovaps 0x140(%rsp), %ymm0
vsubps %ymm3, %ymm0, %ymm11
vmulps %ymm11, %ymm24, %ymm3
vsubps %ymm3, %ymm25, %ymm3
vcmpnltps %ymm10, %ymm3, %k0
kortestb %k0, %k0
je 0x1c3b7c1
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm13, %ymm3, %k1
vsqrtps %ymm3, %ymm3
vaddps %ymm15, %ymm15, %ymm4
vrcp14ps %ymm4, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm4 # ymm4 = -(ymm9 * ymm4) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm4 # ymm4 = (ymm4 * ymm9) + ymm9
vxorps 0x2e585d(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm3, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm12
vsubps %ymm22, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm13
vmovaps %ymm17, %ymm3
vfmadd213ps %ymm18, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm18
vmulps %ymm3, %ymm14, %ymm9
vmulps %ymm12, %ymm30, %ymm3
vmulps %ymm12, %ymm31, %ymm4
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm2, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm2
vsubps %ymm19, %ymm3, %ymm3
vmovaps %ymm6, %ymm19
vmovaps 0xa0(%rsp), %ymm1
vfmadd213ps %ymm1, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm1
vsubps %ymm19, %ymm4, %ymm4
vmovaps 0x1e0(%rsp), %ymm0
vfmadd213ps %ymm0, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm0
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm31, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm2, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm2
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm1, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm1
vsubps %ymm6, %ymm17, %ymm1
vfmadd213ps %ymm0, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm0
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2b0303(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm12, %ymm0, %ymm2 {%k1}
vbroadcastss 0x2b1458(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm13, %ymm0, %ymm0 {%k1}
vandps 0x2a0(%rsp), %ymm28, %ymm6
vmovaps 0x420(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2b6763(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c3b7e8
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2b140c(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2b029f(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm0 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c3b7e8
vbroadcastss 0x2e5713(%rip), %ymm28 # 0x1f20ec4
jmp 0x1c3c2e6
vmovaps %ymm16, %ymm3
jmp 0x1c3c2e6
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm4, %xmm4, %xmm4
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2b0241(%rip), %ymm2 # 0x1eeba20
vbroadcastss 0x2b139c(%rip), %ymm0 # 0x1eecb84
vbroadcastss 0x10(%r15), %ymm6
vbroadcastss 0x14(%r15), %ymm7
vbroadcastss 0x18(%r15), %ymm11
vmulps %ymm5, %ymm11, %ymm5
vfmadd231ps %ymm1, %ymm7, %ymm5 # ymm5 = (ymm7 * ymm1) + ymm5
vfmadd231ps %ymm10, %ymm6, %ymm5 # ymm5 = (ymm6 * ymm10) + ymm5
vmovaps 0x360(%rsp), %ymm10
vmovaps %ymm10, 0x4a0(%rsp)
vminps %ymm2, %ymm8, %ymm1
vmovaps %ymm1, 0x4c0(%rsp)
vandps %ymm28, %ymm5, %ymm2
vmaxps %ymm0, %ymm10, %ymm5
vmovaps %ymm5, 0x4e0(%rsp)
vmovaps %ymm8, 0x500(%rsp)
vbroadcastss 0x2e5688(%rip), %ymm0 # 0x1f20ed4
vcmpltps %ymm0, %ymm2, %k1
kmovd %k1, 0x10c(%rsp)
vcmpleps %ymm1, %ymm10, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x380(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %edx
andb %al, %dl
movl %edx, 0x18(%rsp)
movl %edx, %eax
orb %cl, %al
je 0x1c3c2dd
vmovaps %ymm0, %ymm2
movl %ebx, 0x14(%rsp)
movq %r11, 0x1b0(%rsp)
movb %r8b, 0xb(%rsp)
knotb %k0, %k1
vmulps %ymm11, %ymm9, %ymm0
vfmadd213ps %ymm0, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm4) + ymm0
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vandps %ymm28, %ymm3, %ymm0
vcmpltps %ymm2, %ymm0, %k0
kmovd %k1, 0x104(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2e5602(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2e55f4(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq 0xf8(%rsp), %rax
vpbroadcastd %eax, %ymm1
vmovdqa %ymm0, 0x520(%rsp)
vmovdqa %ymm1, 0x400(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0x108(%rsp)
andb %cl, %bl
movq %r14, 0x1b8(%rsp)
je 0x1c3c2f4
vmovaps 0x280(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x260(%rsp), %ymm3
vmovaps 0x220(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x2e5559(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2b6524(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x28(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2e0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vaddps 0x1c0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2b0054(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x3e0(%rsp,%rcx), %xmm10
vmovss 0x4a0(%rsp,%rcx), %xmm8
vmovaps 0x10(%r15), %xmm0
vmovaps %xmm0, 0x2a0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2affe4(%rip), %xmm0 # 0x1eeba24
jb 0x1c3ba48
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3ba7a
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2b6423(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x2c(%rsp)
movl $0x5, %r14d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2b0c53(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2b10ac(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2b54f0(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2b54ec(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2b54c9(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x260(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x240(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x350(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2afe7e(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c3bbbd
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c3bbed
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0xc(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2e52bf(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x340(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2b0f72(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2b0f50(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x220(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x240(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x280(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2b0a81(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x2b0a7d(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x320(%rsp)
vfnmadd213ss 0x2b533d(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x30(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x140(%rsp)
vmovss %xmm7, 0xc(%rsp)
vmovaps %xmm0, 0x330(%rsp)
jb 0x1c3bce5
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c3bd30
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm6, 0x10(%rsp)
vmovss %xmm8, 0xd0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xd0(%rsp), %xmm8
vmovss 0x10(%rsp), %xmm6
vmovaps 0xe0(%rsp), %xmm3
vmovss 0xc(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x2b09a3(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x2b0984(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x2b097e(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x2afc7e(%rip), %xmm0 # 0x1eeba24
jb 0x1c3bdb1
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3be47
vmovss %xmm13, 0x10(%rsp)
vmovaps %xmm14, 0xd0(%rsp)
vmovss %xmm5, 0x24(%rsp)
vmovaps %xmm15, 0x310(%rsp)
vmovss %xmm17, 0x20(%rsp)
vmovss %xmm19, 0x1c(%rsp)
vmovaps %xmm4, 0x300(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x300(%rsp), %xmm4
vmovss 0x1c(%rsp), %xmm19
vmovss 0x20(%rsp), %xmm17
vmovaps 0x310(%rsp), %xmm15
vmovss 0x24(%rsp), %xmm5
vmovaps 0xd0(%rsp), %xmm14
vmovss 0x10(%rsp), %xmm13
vmovss 0xc(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm12
vmovaps 0x1e0(%rsp), %xmm18
vmovaps 0x200(%rsp), %xmm9
vbroadcastss 0x2e5073(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2b4b74(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x260(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x340(%rsp), %xmm2
vfmadd132ps 0x280(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x330(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x30(%rsp), %xmm1
vmulss 0x320(%rsp), %xmm1, %xmm3
vmulss 0x2c(%rsp), %xmm8, %xmm1
vmovss 0x28(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2e4fb2(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xe0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x2a0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x350(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2e4ef1(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c3c0ad
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x2e0(%rsp), %xmm3
vfmadd231ss 0x2b5ebb(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c3c0ad
vaddss 0x2f0(%rsp), %xmm8, %xmm8
xorl %eax, %eax
vucomiss 0xc(%r15), %xmm8
jb 0x1c3c0aa
vmovss 0x20(%r15), %xmm5
vucomiss %xmm8, %xmm5
jb 0x1c3c0aa
xorl %eax, %eax
vucomiss 0x2af9ef(%rip), %xmm10 # 0x1eeba24
jb 0x1c3c0aa
vmovss 0x2b06d5(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c3c0aa
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2b06ba(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2b06b4(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x38(%rsp), %rcx
movq (%rcx), %rax
movq 0x1e8(%rax), %rax
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r12
movl 0x24(%r15), %eax
testl %eax, 0x34(%r12)
je 0x1c3c0a8
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c3c0c3
cmpq $0x0, 0x48(%r12)
jne 0x1c3c0c3
movb $0x1, %r12b
xorl %eax, %eax
jmp 0x1c3c0ad
xorl %eax, %eax
xorl %r12d, %r12d
testb %al, %al
je 0x1c3c2af
decq %r14
jne 0x1c3ba9d
jmp 0x1c3c2a3
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x38(%rsp), %rdx
movq 0x8(%rdx), %rax
vshufps $0xe9, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,2,2,3]
vmovlps %xmm1, 0x170(%rsp)
vmovss %xmm0, 0x178(%rsp)
vmovss %xmm10, 0x17c(%rsp)
movl $0x0, 0x180(%rsp)
movl 0x14(%rsp), %ecx
movl %ecx, 0x184(%rsp)
movq 0x98(%rsp), %rcx
movl %ecx, 0x188(%rsp)
movl (%rax), %ecx
movl %ecx, 0x18c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x190(%rsp)
vmovss %xmm8, 0x20(%r15)
movl $0xffffffff, 0x34(%rsp) # imm = 0xFFFFFFFF
leaq 0x34(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x120(%rsp)
movq %r15, 0x128(%rsp)
leaq 0x170(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c3c226
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vbroadcastss 0x2e4cb5(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2e4cab(%rip), %ymm28 # 0x1f20ec4
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c3c28c
movq 0x38(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c3c287
testb $0x2, (%rcx)
jne 0x1c3c245
testb $0x40, 0x3e(%r12)
je 0x1c3c27a
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vbroadcastss 0x2e4c54(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2e4c4a(%rip), %ymm28 # 0x1f20ec4
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c3c28c
movb $0x1, %r12b
jmp 0x1c3c28f
xorl %r12d, %r12d
testb %r12b, %r12b
jne 0x1c3c0a4
vmovss %xmm5, 0x20(%r15)
jmp 0x1c3c0a4
xorl %r12d, %r12d
vbroadcastss 0x2e4c15(%rip), %xmm4 # 0x1f20ec4
andb $0x1, %r12b
orb %r12b, %r13b
vmovaps 0x3a0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
movq 0x1b8(%rsp), %r14
jne 0x1c3b9bf
jmp 0x1c3c2fd
vbroadcastss 0x2b042e(%rip), %ymm3 # 0x1eec714
vmovaps 0x1c0(%rsp), %ymm4
jmp 0x1c3ce0c
vbroadcastss 0x2e4bc7(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm3
vaddps 0x380(%rsp), %ymm3, %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovd 0x10c(%rsp), %k1
kmovd 0x104(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x18(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2e4b9a(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2e4b8c(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x380(%rsp)
vpcmpled 0x400(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x18(%rsp)
andb %cl, %bl
je 0x1c3cd2f
vmovaps 0x4e0(%rsp), %ymm7
vmovaps 0x280(%rsp), %ymm1
vmovaps 0x240(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x260(%rsp), %ymm5
vmovaps 0x220(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2b5adc(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x28(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2e0(%rsp)
vmovaps %ymm7, 0x360(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2af611(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x3c0(%rsp,%rcx), %xmm10
vmovss 0x500(%rsp,%rcx), %xmm8
vmovaps 0x10(%r15), %xmm0
vmovaps %xmm0, 0x2a0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2af5a1(%rip), %xmm0 # 0x1eeba24
jb 0x1c3c48b
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3c4bd
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2b59e0(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x2c(%rsp)
movl $0x5, %r14d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2b0210(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2b0669(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2b4aad(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2b4aa9(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2b4a86(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x260(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x240(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x350(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2af43b(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c3c600
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c3c630
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0xc(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2e487c(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x340(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2b052f(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2b050d(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x220(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x240(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x280(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2b003e(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x2b003a(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x320(%rsp)
vfnmadd213ss 0x2b48fa(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x30(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x140(%rsp)
vmovss %xmm7, 0xc(%rsp)
vmovaps %xmm0, 0x330(%rsp)
jb 0x1c3c728
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c3c773
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm6, 0x10(%rsp)
vmovss %xmm8, 0xd0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xd0(%rsp), %xmm8
vmovss 0x10(%rsp), %xmm6
vmovaps 0xe0(%rsp), %xmm3
vmovss 0xc(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x2aff60(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x2aff41(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x2aff3b(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x2af23b(%rip), %xmm0 # 0x1eeba24
jb 0x1c3c7f4
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3c88a
vmovss %xmm13, 0x10(%rsp)
vmovaps %xmm14, 0xd0(%rsp)
vmovss %xmm5, 0x24(%rsp)
vmovaps %xmm15, 0x310(%rsp)
vmovss %xmm17, 0x20(%rsp)
vmovss %xmm19, 0x1c(%rsp)
vmovaps %xmm4, 0x300(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x300(%rsp), %xmm4
vmovss 0x1c(%rsp), %xmm19
vmovss 0x20(%rsp), %xmm17
vmovaps 0x310(%rsp), %xmm15
vmovss 0x24(%rsp), %xmm5
vmovaps 0xd0(%rsp), %xmm14
vmovss 0x10(%rsp), %xmm13
vmovss 0xc(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm12
vmovaps 0x1e0(%rsp), %xmm18
vmovaps 0x200(%rsp), %xmm9
vbroadcastss 0x2e4630(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2b4131(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x220(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x260(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x340(%rsp), %xmm2
vfmadd132ps 0x280(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x330(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x30(%rsp), %xmm1
vmulss 0x320(%rsp), %xmm1, %xmm3
vmulss 0x2c(%rsp), %xmm8, %xmm1
vmovss 0x28(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2e456f(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xe0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x2a0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x350(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2e44ae(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c3caf8
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x2e0(%rsp), %xmm3
vfmadd231ss 0x2b5478(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c3caf8
vaddss 0x2f0(%rsp), %xmm8, %xmm8
xorl %eax, %eax
vucomiss 0xc(%r15), %xmm8
vmovaps 0x1c0(%rsp), %ymm4
jb 0x1c3cb05
vmovss 0x20(%r15), %xmm5
vucomiss %xmm8, %xmm5
jb 0x1c3cb05
xorl %eax, %eax
vucomiss 0x2aef9f(%rip), %xmm10 # 0x1eeba24
jb 0x1c3cb05
vmovss 0x2afc85(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c3cb05
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2afc6a(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2afc64(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x38(%rsp), %rcx
movq (%rcx), %rax
movq 0x1e8(%rax), %rax
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r12
movl 0x24(%r15), %eax
testl %eax, 0x34(%r12)
je 0x1c3cb03
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c3cb1e
cmpq $0x0, 0x48(%r12)
jne 0x1c3cb1e
movb $0x1, %r12b
xorl %eax, %eax
jmp 0x1c3cb08
vmovaps 0x1c0(%rsp), %ymm4
jmp 0x1c3cb08
xorl %eax, %eax
xorl %r12d, %r12d
testb %al, %al
je 0x1c3cd01
decq %r14
jne 0x1c3c4e0
jmp 0x1c3ccfe
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x38(%rsp), %rdx
movq 0x8(%rdx), %rax
vshufps $0xe9, %xmm0, %xmm0, %xmm1 # xmm1 = xmm0[1,2,2,3]
vmovlps %xmm1, 0x170(%rsp)
vmovss %xmm0, 0x178(%rsp)
vmovss %xmm10, 0x17c(%rsp)
movl $0x0, 0x180(%rsp)
movl 0x14(%rsp), %ecx
movl %ecx, 0x184(%rsp)
movq 0x98(%rsp), %rcx
movl %ecx, 0x188(%rsp)
movl (%rax), %ecx
movl %ecx, 0x18c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x190(%rsp)
vmovss %xmm8, 0x20(%r15)
movl $0xffffffff, 0x34(%rsp) # imm = 0xFFFFFFFF
leaq 0x34(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x120(%rsp)
movq %r15, 0x128(%rsp)
leaq 0x170(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x1, 0x138(%rsp)
movq 0x48(%r12), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c3cc81
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1c0(%rsp), %ymm4
vbroadcastss 0x2e4250(%rip), %ymm28 # 0x1f20ec4
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c3cce7
movq 0x38(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c3cce2
testb $0x2, (%rcx)
jne 0x1c3cca0
testb $0x40, 0x3e(%r12)
je 0x1c3ccd5
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1c0(%rsp), %ymm4
vbroadcastss 0x2e41ef(%rip), %ymm28 # 0x1f20ec4
movq 0x110(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c3cce7
movb $0x1, %r12b
jmp 0x1c3ccea
xorl %r12d, %r12d
testb %r12b, %r12b
jne 0x1c3caf4
vmovss %xmm5, 0x20(%r15)
jmp 0x1c3caf4
xorl %r12d, %r12d
andb $0x1, %r12b
orb %r12b, %r13b
vmovaps 0x3a0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
movq 0x1b8(%rsp), %r14
jne 0x1c3c402
jmp 0x1c3cd33
vmovaps %ymm3, %ymm4
vmovdqa 0x400(%rsp), %ymm1
vpcmpltd 0x380(%rsp), %ymm1, %k1
vmovaps 0x4a0(%rsp), %ymm0
vpcmpltd 0x520(%rsp), %ymm1, %k2
vaddps %ymm0, %ymm4, %ymm1
vbroadcastss 0x20(%r15), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x108(%rsp), %ecx
andb %al, %cl
vmovaps 0x4e0(%rsp), %ymm1
vaddps %ymm1, %ymm4, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x18(%rsp), %edx
andb %al, %dl
orb %cl, %dl
je 0x1c3cde2
movl %r14d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %dl, 0x780(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x7a0(%rsp,%rax)
vmovaps 0x2d0(%rsp), %xmm0
vmovlps %xmm0, 0x7c0(%rsp,%rax)
movq 0xf8(%rsp), %rcx
incl %ecx
movl %ecx, 0x7c8(%rsp,%rax)
incl %r14d
movq 0x1a8(%rsp), %r9
vbroadcastss 0x2af921(%rip), %ymm3 # 0x1eec714
movb 0xb(%rsp), %r8b
movq 0x1a0(%rsp), %r10
movq 0x1b0(%rsp), %r11
movl 0x14(%rsp), %ebx
vbroadcastss 0x20(%r15), %ymm0
testl %r14d, %r14d
je 0x1c3cff5
leal -0x1(%r14), %edx
leaq (%rdx,%rdx,2), %rcx
shlq $0x5, %rcx
movzbl 0x780(%rsp,%rcx), %esi
vmovaps 0x7a0(%rsp,%rcx), %ymm1
vaddps %ymm1, %ymm4, %ymm2
vcmpleps %ymm0, %ymm2, %k0
kmovb %k0, %eax
andl %esi, %eax
je 0x1c3cf16
kmovd %eax, %k1
vbroadcastss 0x2aebc4(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm1, %ymm2, %ymm1 {%k1}
vshufps $0xb1, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2,5,4,7,6]
vminps %ymm2, %ymm1, %ymm2
vshufpd $0x5, %ymm2, %ymm2, %ymm3 # ymm3 = ymm2[1,0,3,2]
vminps %ymm3, %ymm2, %ymm2
vpermpd $0x4e, %ymm2, %ymm3 # ymm3 = ymm2[2,3,0,1]
vminps %ymm3, %ymm2, %ymm2
vcmpeqps %ymm2, %ymm1, %k0
kmovd %k0, %esi
andb %al, %sil
je 0x1c3ce94
movzbl %sil, %edi
jmp 0x1c3ce97
movzbl %al, %edi
leaq (%rsp,%rcx), %rsi
addq $0x780, %rsi # imm = 0x780
vmovss 0x44(%rsi), %xmm1
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r12d
movq %r12, 0xf8(%rsp)
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %eax, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
vbroadcastss 0x2af836(%rip), %ymm3 # 0x1eec714
je 0x1c3cee3
movl %r14d, %edx
vbroadcastss 0x40(%rsi), %ymm2
vsubss %xmm2, %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vfmadd132ps 0x2e4025(%rip), %ymm2, %ymm1 # ymm1 = (ymm1 * mem) + ymm2
vmovaps %ymm1, 0x4a0(%rsp)
vmovsd 0x4a0(%rsp,%rcx,4), %xmm1
vmovaps %xmm1, 0x2d0(%rsp)
movl %edx, %r14d
testb %al, %al
je 0x1c3ce12
vmovaps 0x720(%rsp), %ymm18
vmovaps 0x700(%rsp), %ymm16
vmovaps 0x6e0(%rsp), %ymm17
vmovaps 0x6c0(%rsp), %ymm19
vmovaps 0x6a0(%rsp), %ymm20
vmovaps 0x680(%rsp), %ymm22
vmovaps 0x660(%rsp), %ymm23
vmovaps 0x640(%rsp), %ymm24
vmovaps 0x620(%rsp), %ymm25
vmovaps 0x600(%rsp), %ymm27
vmovaps 0x5e0(%rsp), %ymm29
vmovaps 0x5c0(%rsp), %ymm31
vmovaps 0x5a0(%rsp), %ymm21
vmovaps 0x580(%rsp), %ymm6
vmovaps 0x560(%rsp), %ymm30
vmovdqa 0x2d0(%rsp), %xmm1
jmp 0x1c3ae2e
vcmpleps 0x2e3f4d(%rip), %ymm4, %k2 # 0x1f20f00
vbroadcastss 0x2afbc8(%rip), %ymm4 # 0x1eecb84
vbroadcastss 0x2aea5a(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm4, %ymm16, %ymm3 {%k2}
vmovaps %ymm3, %ymm31 {%k1}
vblendmps %ymm16, %ymm4, %ymm3 {%k2}
kmovd %k2, %ecx
vmovaps %ymm3, %ymm30 {%k1}
knotb %k1, %k0
kmovd %k0, %edx
orb %cl, %dl
andb %al, %dl
movl %edx, %eax
jmp 0x1c3b397
testb $0x1, %r13b
jne 0x1c3d01d
vmovaps 0x540(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r11d
setne %r8b
jne 0x1c3ab9a
andb $0x1, %r8b
movl %r8d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 4>::intersect_t<embree::avx512::SweepCurve1IntersectorK<embree::BSplineCurveT, 4>, embree::avx512::Intersect1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x9a0, %rsp # imm = 0x9A0
movq %rcx, %r15
movq %rdx, %r10
movq %rsi, %r12
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r10,4), %xmm1
vmovss 0x40(%rsi,%r10,4), %xmm2
vinsertps $0x10, 0x10(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2d55cc(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2e3d9a(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2e3d0f(%rip), %ymm6 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm6, %ymm2, %ymm5
vbroadcastss 0x2b3e21(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm6, %ymm1, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm6, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2af50f(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%r12,%r10,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2e2c1b(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
movq %r10, (%rsp)
vminps 0x80(%r12,%r10,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2e2bf2(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x31d5f4(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x5c0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c3fbc2
leaq (%r8,%rax), %r9
addq $0x6, %r9
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r10d
addq $0x10, %r9
movl $0x1, %eax
movq (%rsp), %rcx
shlxl %ecx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x3b0(%rsp)
movq %r15, 0x10(%rsp)
movq %r8, 0x180(%rsp)
movq %r9, 0x178(%rsp)
tzcntq %r10, %rax
blsrq %r10, %r10
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x20(%rsp)
shll $0x6, %eax
movq %r10, %rcx
vmovups (%r9,%rax), %xmm0
subq $0x1, %rcx
jb 0x1c3d3e7
andq %r10, %rcx
tzcntq %r10, %rdx
shll $0x6, %edx
prefetcht0 (%r9,%rdx)
prefetcht0 0x40(%r9,%rdx)
testq %rcx, %rcx
je 0x1c3d3e7
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r9,%rcx)
prefetcht1 0x40(%r9,%rcx)
vmovups 0x10(%r9,%rax), %xmm1
vmovups 0x20(%r9,%rax), %xmm2
vmovups 0x30(%r9,%rax), %xmm3
movq (%rsp), %rax
vmovss (%r12,%rax,4), %xmm4
vinsertps $0x1c, 0x10(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vbroadcastss 0x40(%r12,%rax,4), %ymm30
vbroadcastss 0x50(%r12,%rax,4), %ymm31
vunpcklps %xmm31, %xmm30, %xmm5 # xmm5 = xmm30[0],xmm31[0],xmm30[1],xmm31[1]
vbroadcastss 0x60(%r12,%rax,4), %ymm21
vinsertps $0x28, %xmm21, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm21[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x2dfeab(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x30(%r12,%rax,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
movl 0x2(%r8), %eax
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2b3b70(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x230(%rsp)
vmovaps %ymm6, 0x360(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm0
vmovaps %ymm0, 0x800(%rsp)
vbroadcastss 0x2d522b(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm1
vmovaps %ymm1, 0x7e0(%rsp)
vbroadcastss 0x2e39ec(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm2
vmovaps %ymm2, 0x7c0(%rsp)
vbroadcastss 0x2e39d1(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x2a0(%rsp)
vpermps %ymm6, %ymm2, %ymm4
vmovaps %ymm4, 0x7a0(%rsp)
vbroadcastss %xmm8, %ymm4
vmovaps %ymm4, 0x780(%rsp)
vpermps %ymm8, %ymm0, %ymm4
vmovaps %ymm4, 0x760(%rsp)
vpermps %ymm8, %ymm1, %ymm4
vmovaps %ymm4, 0x740(%rsp)
vmovaps %ymm8, 0x260(%rsp)
vpermps %ymm8, %ymm2, %ymm4
vmovaps %ymm4, 0x720(%rsp)
vbroadcastss %xmm7, %ymm4
vmovaps %ymm4, 0x700(%rsp)
vpermps %ymm7, %ymm0, %ymm4
vmovaps %ymm4, 0x6e0(%rsp)
vpermps %ymm7, %ymm1, %ymm4
vmovaps %ymm4, 0x6c0(%rsp)
vmovaps %ymm7, 0x280(%rsp)
vpermps %ymm7, %ymm2, %ymm4
vmovaps %ymm4, 0x6a0(%rsp)
vbroadcastss %xmm3, %ymm4
vmovaps %ymm4, 0x680(%rsp)
vpermps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x660(%rsp)
vpermps %ymm3, %ymm1, %ymm0
vmovaps %ymm0, 0x640(%rsp)
vmovaps %ymm3, 0x240(%rsp)
vpermps %ymm3, %ymm2, %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmulss %xmm21, %xmm21, %xmm0
vfmadd231ps %ymm31, %ymm31, %ymm0 # ymm0 = (ymm31 * ymm31) + ymm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x600(%rsp)
vandps 0x2e38b9(%rip){1to8}, %ymm0, %ymm0 # 0x1f20ec4
vmovaps %ymm0, 0x520(%rsp)
vmovss %xmm10, 0x3c(%rsp)
vmovaps %xmm5, 0x2e0(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movq %rax, 0x98(%rsp)
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x3d0(%rsp)
movl $0x1, %ebx
xorl %r11d, %r11d
movl 0x20(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x10c(%rsp)
vmovaps %xmm11, 0x220(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x108(%rsp)
vmovsd 0x2af05c(%rip), %xmm2 # 0x1eec6f0
vbroadcastss 0x2af076(%rip), %ymm16 # 0x1eec714
vmovaps %ymm30, 0x540(%rsp)
vmovaps %ymm31, 0x1c0(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm0
vmulss 0x2e380a(%rip), %xmm0, %xmm1 # 0x1f20ed0
vmovaps %xmm1, 0x60(%rsp)
vmovaps %xmm2, 0x160(%rsp)
vbroadcastss %xmm2, %ymm4
vbroadcastss %xmm0, %ymm0
vmovaps %ymm4, 0x40(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vfmadd231ps 0x2e3829(%rip), %ymm0, %ymm4 # ymm4 = (ymm0 * mem) + ymm4
vsubps %ymm4, %ymm16, %ymm7
vmulps %ymm7, %ymm7, %ymm8
vmulps %ymm7, %ymm8, %ymm0
vbroadcastss 0x2af47d(%rip), %ymm30 # 0x1eecb8c
vmulps %ymm30, %ymm0, %ymm1
vmulps %ymm4, %ymm4, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm7, %ymm4, %ymm10
vmulps %ymm7, %ymm10, %ymm5
vbroadcastss 0x2b38ca(%rip), %ymm14 # 0x1ef0ffc
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm10, %ymm12
vbroadcastss 0x2b38b1(%rip), %ymm15 # 0x1ef0ff4
vmulps %ymm15, %ymm12, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm30, %ymm2, %ymm11
vaddps %ymm0, %ymm11, %ymm11
vmulps %ymm14, %ymm12, %ymm12
vmulps %ymm5, %ymm15, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vbroadcastss 0x2b388b(%rip), %ymm14 # 0x1ef1000
vmulps %ymm0, %ymm14, %ymm11
vmulps %ymm1, %ymm14, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm2, %ymm14, %ymm5
vmovaps 0x680(%rsp), %ymm25
vmulps %ymm5, %ymm25, %ymm0
vmovaps 0x660(%rsp), %ymm27
vmulps %ymm5, %ymm27, %ymm2
vmovaps 0x640(%rsp), %ymm28
vmulps %ymm5, %ymm28, %ymm1
vmovaps 0x620(%rsp), %ymm29
vmulps %ymm5, %ymm29, %ymm5
vmovaps 0x700(%rsp), %ymm20
vfmadd231ps %ymm20, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm20) + ymm0
vmovaps 0x6e0(%rsp), %ymm22
vfmadd231ps %ymm22, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm22) + ymm2
vmovaps 0x6c0(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm23) + ymm1
vmovaps 0x6a0(%rsp), %ymm24
vfmadd231ps %ymm13, %ymm24, %ymm5 # ymm5 = (ymm24 * ymm13) + ymm5
vmovaps 0x780(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm16) + ymm0
vmovaps 0x760(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm17) + ymm2
vmovaps 0x740(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm18) + ymm1
vmovaps 0x720(%rsp), %ymm19
vfmadd231ps %ymm12, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm12) + ymm5
vmovaps 0x800(%rsp), %ymm6
vfmadd231ps %ymm6, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm6) + ymm0
vmovaps 0x7e0(%rsp), %ymm3
vfmadd231ps %ymm3, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm3) + ymm2
vmovaps 0x7c0(%rsp), %ymm14
vfmadd231ps %ymm14, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm14) + ymm1
vmovaps 0x7a0(%rsp), %ymm15
vfmadd231ps %ymm11, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm11) + ymm5
vbroadcastss 0x2e3651(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm7, %ymm11
vmulps %ymm7, %ymm11, %ymm7
vxorps %ymm26, %ymm4, %ymm11
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm30, %ymm10, %ymm10
vsubps %ymm10, %ymm4, %ymm4
vaddps %ymm10, %ymm8, %ymm8
vbroadcastss 0x2af2e4(%rip), %ymm10 # 0x1eecb80
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm4
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm25, %ymm10
vmulps %ymm9, %ymm27, %ymm12
vmulps %ymm9, %ymm28, %ymm13
vmulps %ymm9, %ymm29, %ymm9
vfmadd231ps %ymm20, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm20) + ymm10
vfmadd231ps %ymm22, %ymm8, %ymm12 # ymm12 = (ymm8 * ymm22) + ymm12
vfmadd231ps %ymm23, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm23) + ymm13
vfmadd231ps %ymm8, %ymm24, %ymm9 # ymm9 = (ymm24 * ymm8) + ymm9
vfmadd231ps %ymm16, %ymm4, %ymm10 # ymm10 = (ymm4 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm17) + ymm12
vfmadd231ps %ymm18, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm18) + ymm13
vfmadd231ps %ymm4, %ymm19, %ymm9 # ymm9 = (ymm19 * ymm4) + ymm9
vfmadd231ps %ymm6, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm6) + ymm10
vfmadd231ps %ymm3, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm3) + ymm12
vfmadd231ps %ymm14, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm14) + ymm13
vfmadd231ps %ymm7, %ymm15, %ymm9 # ymm9 = (ymm15 * ymm7) + ymm9
vbroadcastss 0x60(%rsp), %ymm4
vmulps %ymm4, %ymm10, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vmulps %ymm4, %ymm13, %ymm13
vmulps %ymm4, %ymm9, %ymm6
vmovaps %ymm0, %ymm8
vmovaps 0x3223f3(%rip), %ymm7 # 0x1f5fd20
vmovaps %ymm31, %ymm3
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm2, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm1, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm5, %ymm6, %ymm4
vmaxps %ymm4, %ymm5, %ymm14
vminps %ymm4, %ymm5, %ymm4
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm5
vpermt2ps %ymm31, %ymm7, %ymm5
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm0, %ymm8, %ymm7
vsubps %ymm2, %ymm9, %ymm6
vsubps %ymm1, %ymm10, %ymm27
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm27, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm27) - ymm17
vmulps %ymm11, %ymm27, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm27, %ymm27, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vbroadcastss 0x2aed11(%rip), %ymm24 # 0x1eec714
vmovaps %ymm24, %ymm26
vfnmadd213ps %ymm24, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm24
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm5, %ymm6, %ymm22
vfmsub231ps %ymm27, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm27) - ymm22
vmulps %ymm19, %ymm27, %ymm24
vfmsub231ps %ymm7, %ymm5, %ymm24 # ymm24 = (ymm5 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm4, %ymm4
vsubps %ymm18, %ymm4, %ymm4
vmulps 0x2b2eb0(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2b2eaa(%rip){1to8}, %ymm4, %ymm4 # 0x1ef0944
vmovaps %ymm4, 0x60(%rsp)
vmulps %ymm14, %ymm14, %ymm4
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2aec67(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2aec48(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmovaps %ymm27, 0x380(%rsp)
vmulps %ymm14, %ymm27, %ymm18
vmovaps %ymm0, 0x140(%rsp)
vsubps %ymm0, %ymm31, %ymm27
vmovaps %ymm2, 0xa0(%rsp)
vsubps %ymm2, %ymm31, %ymm28
vmovaps %ymm1, 0x1e0(%rsp)
vsubps %ymm1, %ymm31, %ymm29
vmulps %ymm29, %ymm21, %ymm22
vfmadd231ps %ymm28, %ymm3, %ymm22 # ymm22 = (ymm3 * ymm28) + ymm22
vmovaps 0x540(%rsp), %ymm0
vfmadd231ps %ymm27, %ymm0, %ymm22 # ymm22 = (ymm0 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm21, %ymm17
vfmadd231ps %ymm3, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm3) + ymm17
vfmadd231ps %ymm0, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm0) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm2
vmovaps 0x600(%rsp), %ymm3
vsubps %ymm2, %ymm3, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm1
vsubps %ymm4, %ymm1, %ymm4
vmulps %ymm22, %ymm22, %ymm25
vmulps %ymm30, %ymm15, %ymm24
vmulps %ymm4, %ymm24, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c3dca6
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm3
vfnmadd213ps %ymm26, %ymm3, %ymm31 # ymm31 = -(ymm3 * ymm31) + ymm26
vfmadd132ps %ymm3, %ymm3, %ymm31 # ymm31 = (ymm31 * ymm3) + ymm3
vxorps 0x2e32d0(%rip){1to8}, %ymm22, %ymm3 # 0x1f20ec0
vsubps %ymm30, %ymm3, %ymm3
vmulps %ymm31, %ymm3, %ymm3
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm3, %ymm30 # ymm30 = (ymm3 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x4e0(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x4c0(%rsp)
vbroadcastss 0x2addda(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm3, %ymm16, %ymm30 {%k1}
vbroadcastss 0x2aef2f(%rip), %ymm3 # 0x1eecb84
vblendmps %ymm31, %ymm3, %ymm31 {%k1}
vbroadcastss 0x2e325f(%rip), %ymm23 # 0x1f20ec4
vandps %ymm23, %ymm2, %ymm3
vmovaps 0x520(%rsp), %ymm16
vmaxps %ymm3, %ymm16, %ymm3
vmulps 0x2b4231(%rip){1to8}, %ymm3, %ymm3 # 0x1ef1eb4
vandps %ymm23, %ymm15, %ymm23
vcmpltps %ymm3, %ymm23, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c3fb52
vbroadcastss 0x2aea70(%rip), %ymm16 # 0x1eec714
jmp 0x1c3dcc0
vbroadcastss 0x2add70(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2aeeca(%rip), %ymm31 # 0x1eecb84
vmovaps %ymm26, %ymm16
andb $0x7f, %al
je 0x1c3e0b7
movq (%rsp), %rcx
vmovss 0x80(%r12,%rcx,4), %xmm3
vsubss 0x2e0(%rsp), %xmm3, %xmm3
vbroadcastss %xmm3, %ymm3
vminps %ymm31, %ymm3, %ymm3
vmovaps 0x5e0(%rsp), %ymm4
vmaxps %ymm30, %ymm4, %ymm4
vmulps %ymm29, %ymm13, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x1a0(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1c0(%rsp), %ymm31
vfmadd231ps %ymm12, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm12) + ymm13
vmovaps 0x540(%rsp), %ymm30
vfmadd231ps %ymm11, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm11) + ymm13
vbroadcastss 0x2e3185(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2b3299(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2e3160(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm26, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2aede5(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm4, %ymm4
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2adc62(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm3, %ymm3
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm5, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm5, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm30, %ymm8 # ymm8 = -(ymm30 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm26, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm26, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm4, %ymm0
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm3, %ymm8
vmovaps %ymm0, 0x340(%rsp)
vcmpleps %ymm8, %ymm0, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c3fa43
vmovaps 0x4e0(%rsp), %ymm3
vmaxps 0x60(%rsp), %ymm23, %ymm4
vminps %ymm16, %ymm3, %ymm3
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm3, %ymm3
vmovaps 0x4c0(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2e3089(%rip), %ymm11 # 0x1f20f40
vaddps %ymm3, %ymm11, %ymm3
vbroadcastss 0x2e05f4(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm3, %ymm12, %ymm3
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm1, %ymm5
vmovaps 0x200(%rsp), %ymm1
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x4e0(%rsp)
vmaxps %ymm10, %ymm9, %ymm3
vaddps %ymm3, %ymm11, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x4c0(%rsp)
vmulps %ymm4, %ymm4, %ymm3
vsubps %ymm3, %ymm5, %ymm11
vmulps %ymm11, %ymm24, %ymm3
vsubps %ymm3, %ymm25, %ymm3
vcmpnltps %ymm10, %ymm3, %k0
kortestb %k0, %k0
je 0x1c3e0cc
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm3, %k1
vsqrtps %ymm3, %ymm3
vaddps %ymm15, %ymm15, %ymm4
vrcp14ps %ymm4, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm4 # ymm4 = -(ymm9 * ymm4) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm4 # ymm4 = (ymm4 * ymm9) + ymm9
vxorps 0x2e2f66(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm3, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm12
vsubps %ymm22, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm13
vmovaps %ymm17, %ymm3
vfmadd213ps %ymm18, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm18
vmulps %ymm3, %ymm14, %ymm9
vmulps %ymm12, %ymm30, %ymm3
vmulps %ymm12, %ymm31, %ymm4
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vmovaps %ymm2, %ymm16
vmovaps 0x140(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm2
vsubps %ymm19, %ymm3, %ymm3
vmovaps %ymm6, %ymm19
vmovaps 0xa0(%rsp), %ymm1
vfmadd213ps %ymm1, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm1
vsubps %ymm19, %ymm4, %ymm4
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x380(%rsp), %ymm5
vfmadd213ps %ymm0, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm0
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm31, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm2, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm2
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm1, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm1
vsubps %ymm6, %ymm17, %ymm1
vfmadd213ps %ymm0, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm0
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2ad9f4(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm12, %ymm0, %ymm2 {%k1}
vbroadcastss 0x2aeb49(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm13, %ymm0, %ymm0 {%k1}
vbroadcastss 0x2e2e7a(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm16, %ymm6
vmovaps 0x520(%rsp), %ymm12
vmaxps %ymm6, %ymm12, %ymm6
vmulps 0x2b3e4d(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm7, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c3e0f3
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2aeafc(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2ad98f(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm0 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c3e0f3
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
jmp 0x1c3fa43
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm4, %xmm4, %xmm4
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2ad936(%rip), %ymm2 # 0x1eeba20
vbroadcastss 0x2aea91(%rip), %ymm0 # 0x1eecb84
vbroadcastss 0x2e2dc8(%rip), %xmm11 # 0x1f20ec4
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm1, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm1) + ymm5
vfmadd231ps %ymm10, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm10) + ymm5
vmovaps 0x340(%rsp), %ymm7
vmovaps %ymm7, 0x820(%rsp)
vminps %ymm2, %ymm8, %ymm1
vmovaps %ymm1, 0x840(%rsp)
vbroadcastss 0x2e2d8e(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm5, %ymm2
vmaxps %ymm0, %ymm7, %ymm5
vmovaps %ymm5, 0x560(%rsp)
vmovaps %ymm8, 0x580(%rsp)
vbroadcastss 0x2e2d7b(%rip), %ymm0 # 0x1f20ed4
vcmpltps %ymm0, %ymm2, %k1
kmovd %k1, 0x104(%rsp)
vcmpleps %ymm1, %ymm7, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x480(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %edx
andb %al, %dl
movl %edx, %eax
orb %cl, %al
je 0x1c3fa05
vmovaps %ymm0, %ymm2
movl %edx, 0x1c(%rsp)
movq %r11, 0x190(%rsp)
movq %r10, 0x198(%rsp)
knotb %k0, %k1
vmulps %ymm9, %ymm21, %ymm0
vfmadd213ps %ymm0, %ymm31, %ymm4 # ymm4 = (ymm31 * ymm4) + ymm0
vfmadd213ps %ymm4, %ymm30, %ymm3 # ymm3 = (ymm30 * ymm3) + ymm4
vandps %ymm6, %ymm3, %ymm0
vcmpltps %ymm2, %ymm0, %k0
kmovd %k1, 0xfc(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2e2cf5(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2e2ce7(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %rbx, 0x188(%rsp)
vpbroadcastd %ebx, %ymm1
vmovdqa %ymm0, 0x5a0(%rsp)
vmovdqa %ymm1, 0x500(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0x100(%rsp)
andb %cl, %bl
je 0x1c3ed69
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x280(%rsp), %ymm3
vmovaps 0x240(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2b3c28(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2d0(%rsp)
vmovaps 0x340(%rsp), %ymm0
vaddps 0x360(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2ad758(%rip), %ymm0 # 0x1eeba20
vblendmps 0x340(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x4e0(%rsp,%rcx), %xmm10
vmovss 0x820(%rsp,%rcx), %xmm8
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x2ad6f4(%rip), %xmm0 # 0x1eeba24
vmovss 0x108(%rsp), %xmm0
jae 0x1c3e376
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2b3b27(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2ae357(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2ae7b0(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2b2bf4(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2b2bf0(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2b2bcd(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x280(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x260(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x380(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2ad582(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c3e4b9
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c3e4e9
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0x8(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x8(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2e29c3(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x330(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2ae676(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2ae654(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x240(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm13
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm13 # xmm13 = (xmm13 * mem) + xmm2
vdpps $0x7f, %xmm13, %xmm13, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2ae185(%rip), %xmm3, %xmm5 # 0x1eec718
vmulss 0x2ae181(%rip), %xmm0, %xmm6 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x310(%rsp)
vfnmadd213ss 0x2b2a41(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm13, 0x140(%rsp)
vmovss %xmm7, 0x8(%rsp)
vmovaps %xmm0, 0x320(%rsp)
jb 0x1c3e5e3
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c3e630
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm5, 0xc(%rsp)
vmovss %xmm6, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm6
vmovss 0xc(%rsp), %xmm5
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps %xmm0, %xmm16
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %xmm19
vmulss %xmm3, %xmm6, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm5, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm13, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm15
vaddss 0x2ae0a3(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2ae084(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2ae07e(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2ad37e(%rip), %xmm0 # 0x1eeba24
jb 0x1c3e6b1
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3e749
vmovss %xmm14, 0xc(%rsp)
vmovaps %xmm15, 0xc0(%rsp)
vmovss %xmm16, 0x2c(%rsp)
vmovss %xmm17, 0x28(%rsp)
vmovaps %xmm18, 0x300(%rsp)
vmovss %xmm20, 0x24(%rsp)
vmovaps %xmm4, 0x2f0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2f0(%rsp), %xmm4
vmovss 0x24(%rsp), %xmm20
vmovaps 0x300(%rsp), %xmm18
vmovss 0x28(%rsp), %xmm17
vmovss 0x2c(%rsp), %xmm16
vmovaps 0xc0(%rsp), %xmm15
vmovss 0xc(%rsp), %xmm14
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm19
vmovaps 0x200(%rsp), %xmm9
vbroadcastss 0x2e2772(%rip), %xmm11 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2b2263(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x280(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x330(%rsp), %xmm2
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x320(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x310(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm8, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2e26ab(%rip){1to4}, %xmm13, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm4, %xmm4
vmovaps 0xd0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm9, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm9, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm9, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x380(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm13, %xmm13, %xmm0 # xmm0 = xmm13[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm10, %xmm10
vsubss %xmm4, %xmm8, %xmm8
vandps %xmm11, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c3ea9c
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x2d0(%rsp), %xmm3
vfmadd231ss 0x2b35b1(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm11, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c3ea9c
vaddss 0x2e0(%rsp), %xmm8, %xmm8
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm8
jb 0x1c3ea9f
movq (%rsp), %rax
vmovss 0x80(%r12,%rax,4), %xmm4
vucomiss %xmm8, %xmm4
jb 0x1c3ea9f
vucomiss 0x2ad0da(%rip), %xmm10 # 0x1eeba24
jb 0x1c3ea9f
vmovss 0x2addbc(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c3ea9f
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2add9d(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2add97(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq (%rsp), %rax
movl 0x90(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c3eabc
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm13, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm13
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c3eac1
cmpq $0x0, 0x40(%r15)
jne 0x1c3eac1
movq (%rsp), %rcx
vmovss %xmm8, 0x80(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0xc0(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0xd0(%r12,%rcx,4)
vmovss %xmm0, 0xe0(%r12,%rcx,4)
vmovss %xmm10, 0xf0(%r12,%rcx,4)
movl $0x0, 0x100(%r12,%rcx,4)
movl 0x20(%rsp), %eax
movl %eax, 0x110(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x120(%r12,%rcx,4)
movq 0x10(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%rcx,4)
jmp 0x1c3ea9f
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c3ed47
testb %al, %al
je 0x1c3e399
jmp 0x1c3ed47
movq %rcx, %r15
jmp 0x1c3ea9f
movq 0x10(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm10, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[2,2,2,2]
vbroadcastss %xmm0, %xmm0
vmovaps %xmm2, 0x3e0(%rsp)
vmovaps %xmm3, 0x3f0(%rsp)
vmovaps %xmm0, 0x400(%rsp)
vmovaps %xmm1, 0x410(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x3d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
leaq 0x450(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rdx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x450(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x460(%rsp)
movq (%rsp), %rax
vmovss %xmm8, 0x80(%r12,%rax,4)
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x120(%rsp)
movq %r12, 0x128(%rsp)
leaq 0x3e0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c3ec24
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vbroadcastss 0x2e22a0(%rip), %xmm11 # 0x1f20ec4
vmovdqa 0xe0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c3ed2f
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c3ec96
testb $0x2, (%rcx)
jne 0x1c3ec5b
testb $0x40, 0x3e(%r15)
je 0x1c3ec96
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vbroadcastss 0x2e222e(%rip), %xmm11 # 0x1f20ec4
vmovdqa 0xe0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c3ed2f
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c3ed3d
movq (%rsp), %rax
vmovss %xmm4, 0x80(%r12,%rax,4)
movq 0x10(%rsp), %r15
jmp 0x1c3ea9f
movq (%rsp), %rax
vmovaps 0x4a0(%rsp), %ymm0
vcmpleps 0x80(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c3e2bb
vmovaps 0x480(%rsp), %ymm0
vaddps 0x360(%rsp), %ymm0, %ymm0
movq (%rsp), %rax
vcmpleps 0x80(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd 0x104(%rsp), %k1
kmovd 0xfc(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x1c(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2e2129(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2e211b(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x480(%rsp)
vpcmpled 0x500(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x1c(%rsp)
andb %cl, %bl
je 0x1c3f914
vmovaps 0x560(%rsp), %ymm5
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x280(%rsp), %ymm3
vmovaps 0x240(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2b306b(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2d0(%rsp)
vmovaps %ymm5, 0x340(%rsp)
vaddps 0x360(%rsp), %ymm5, %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2acb9b(%rip), %ymm0 # 0x1eeba20
vblendmps 0x340(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x4c0(%rsp,%rcx), %xmm10
vmovss 0x580(%rsp,%rcx), %xmm9
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x2acb37(%rip), %xmm0 # 0x1eeba24
vmovss 0x10c(%rsp), %xmm0
jae 0x1c3ef33
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2b2f6a(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vmovaps %xmm9, 0xa0(%rsp)
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2ad79a(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2adbf3(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2b2037(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2b2033(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2b2010(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x280(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x260(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x380(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2ac9c5(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c3f076
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c3f0a6
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0x8(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x8(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2e1e06(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x330(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2adab9(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2ada97(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x240(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm13
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm13 # xmm13 = (xmm13 * mem) + xmm2
vdpps $0x7f, %xmm13, %xmm13, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2ad5c8(%rip), %xmm3, %xmm5 # 0x1eec718
vmulss 0x2ad5c4(%rip), %xmm0, %xmm6 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x310(%rsp)
vfnmadd213ss 0x2b1e84(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm13, 0x140(%rsp)
vmovss %xmm7, 0x8(%rsp)
vmovaps %xmm0, 0x320(%rsp)
jb 0x1c3f1a0
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c3f1ed
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm5, 0xc(%rsp)
vmovss %xmm6, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm6
vmovss 0xc(%rsp), %xmm5
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps %xmm0, %xmm16
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm19
vmulss %xmm3, %xmm6, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm5, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm13, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm15
vaddss 0x2ad4e6(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2ad4c7(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2ad4c1(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2ac7c1(%rip), %xmm0 # 0x1eeba24
jb 0x1c3f26e
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c3f306
vmovss %xmm14, 0xc(%rsp)
vmovaps %xmm15, 0xc0(%rsp)
vmovss %xmm16, 0x2c(%rsp)
vmovss %xmm17, 0x28(%rsp)
vmovaps %xmm18, 0x300(%rsp)
vmovss %xmm20, 0x24(%rsp)
vmovaps %xmm4, 0x2f0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2f0(%rsp), %xmm4
vmovss 0x24(%rsp), %xmm20
vmovaps 0x300(%rsp), %xmm18
vmovss 0x28(%rsp), %xmm17
vmovss 0x2c(%rsp), %xmm16
vmovaps 0xc0(%rsp), %xmm15
vmovss 0xc(%rsp), %xmm14
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm19
vmovaps 0x200(%rsp), %xmm11
vbroadcastss 0x2e1bb5(%rip), %xmm8 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm9
vmovss 0x2b16a6(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x280(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x330(%rsp), %xmm2
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x320(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x310(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2e1aee(%rip){1to4}, %xmm13, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm4, %xmm4
vmovaps 0xd0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x380(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm13, %xmm13, %xmm0 # xmm0 = xmm13[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm10, %xmm10
vsubss %xmm4, %xmm9, %xmm9
vandps %xmm8, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c3f659
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x2d0(%rsp), %xmm3
vfmadd231ss 0x2b29f4(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm8, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c3f659
vaddss 0x2e0(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm9
jb 0x1c3f65c
movq (%rsp), %rax
vmovss 0x80(%r12,%rax,4), %xmm4
vucomiss %xmm9, %xmm4
jb 0x1c3f65c
vucomiss 0x2ac51d(%rip), %xmm10 # 0x1eeba24
jb 0x1c3f65c
vmovss 0x2ad1ff(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c3f65c
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2ad1e0(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2ad1da(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq (%rsp), %rax
movl 0x90(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c3f679
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm13, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm13
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c3f67e
cmpq $0x0, 0x40(%r15)
jne 0x1c3f67e
movq (%rsp), %rcx
vmovss %xmm9, 0x80(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0xc0(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0xd0(%r12,%rcx,4)
vmovss %xmm0, 0xe0(%r12,%rcx,4)
vmovss %xmm10, 0xf0(%r12,%rcx,4)
movl $0x0, 0x100(%r12,%rcx,4)
movl 0x20(%rsp), %eax
movl %eax, 0x110(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x120(%r12,%rcx,4)
movq 0x10(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%rcx,4)
jmp 0x1c3f65c
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c3f8f2
testb %al, %al
je 0x1c3ef56
jmp 0x1c3f8f2
movq %rcx, %r15
jmp 0x1c3f65c
movq 0x10(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm10, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[2,2,2,2]
vbroadcastss %xmm0, %xmm0
vmovaps %xmm2, 0x3e0(%rsp)
vmovaps %xmm3, 0x3f0(%rsp)
vmovaps %xmm0, 0x400(%rsp)
vmovaps %xmm1, 0x410(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x3d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
leaq 0x450(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rdx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x450(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x460(%rsp)
movq (%rsp), %rax
vmovss %xmm9, 0x80(%r12,%rax,4)
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
leaq 0xe0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x120(%rsp)
movq %r12, 0x128(%rsp)
leaq 0x3e0(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x4, 0x138(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c3f7d8
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vmovdqa 0xe0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c3f8da
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c3f841
testb $0x2, (%rcx)
jne 0x1c3f80f
testb $0x40, 0x3e(%r15)
je 0x1c3f841
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vmovdqa 0xe0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c3f8da
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c3f8e8
movq (%rsp), %rax
vmovss %xmm4, 0x80(%r12,%rax,4)
movq 0x10(%rsp), %r15
jmp 0x1c3f65c
movq (%rsp), %rax
vmovaps 0x4a0(%rsp), %ymm0
vcmpleps 0x80(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c3ee78
vmovdqa 0x500(%rsp), %ymm1
vpcmpltd 0x480(%rsp), %ymm1, %k1
vmovaps 0x820(%rsp), %ymm0
vpcmpltd 0x5a0(%rsp), %ymm1, %k2
vmovaps 0x360(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq (%rsp), %rax
vbroadcastss 0x80(%r12,%rax,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x100(%rsp), %ecx
andb %al, %cl
vmovaps 0x560(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x1c(%rsp), %edx
andb %al, %dl
orb %cl, %dl
je 0x1c3fa11
movq 0x190(%rsp), %r11
movl %r11d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %dl, 0x860(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x880(%rsp,%rax)
vmovaps 0x160(%rsp), %xmm2
vmovlps %xmm2, 0x8a0(%rsp,%rax)
movq 0x188(%rsp), %rbx
leal 0x1(%rbx), %ecx
movl %ecx, 0x8a8(%rsp,%rax)
incl %r11d
movq 0x180(%rsp), %r8
vbroadcastss 0x2acd21(%rip), %ymm16 # 0x1eec714
movq 0x178(%rsp), %r9
movq 0x198(%rsp), %r10
jmp 0x1c3fa4c
vbroadcastss 0x2acd05(%rip), %ymm16 # 0x1eec714
jmp 0x1c3fa43
movq 0x180(%rsp), %r8
vbroadcastss 0x2accf1(%rip), %ymm16 # 0x1eec714
movq 0x178(%rsp), %r9
movq 0x198(%rsp), %r10
movq 0x190(%rsp), %r11
movq 0x188(%rsp), %rbx
vmovaps 0x160(%rsp), %xmm2
movl %r11d, %eax
testl %eax, %eax
je 0x1c3fb9f
leal -0x1(%rax), %r11d
leaq (%r11,%r11,2), %rcx
shlq $0x5, %rcx
vmovaps 0x880(%rsp,%rcx), %ymm0
movzbl 0x860(%rsp,%rcx), %esi
vaddps 0x360(%rsp), %ymm0, %ymm1
movq (%rsp), %rdx
vcmpleps 0x80(%r12,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %edx
andl %esi, %edx
je 0x1c3fb42
kmovd %edx, %k1
vbroadcastss 0x2abf7d(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %dl, %sil
je 0x1c3fadb
movzbl %sil, %edi
jmp 0x1c3fade
movzbl %dl, %edi
leaq (%rsp,%rcx), %rsi
addq $0x860, %rsi # imm = 0x860
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %ebx
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %edx, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c3fb18
movl %eax, %r11d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2e13f0(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x3e0(%rsp)
vmovsd 0x3e0(%rsp,%rcx,4), %xmm2
movl %r11d, %eax
testb %dl, %dl
je 0x1c3fa4f
jmp 0x1c3d6b6
vcmpleps 0x2e13a3(%rip), %ymm4, %k2 # 0x1f20f00
vbroadcastss 0x2ad01e(%rip), %ymm4 # 0x1eecb84
vbroadcastss 0x2abeb0(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm4, %ymm16, %ymm3 {%k2}
vmovaps %ymm3, %ymm30 {%k1}
vblendmps %ymm16, %ymm4, %ymm3 {%k2}
kmovd %k2, %ecx
vmovaps %ymm3, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %edx
orb %cl, %dl
andb %al, %dl
movl %edx, %eax
jmp 0x1c3dc9a
movq (%rsp), %rax
vmovaps 0x5c0(%rsp), %ymm0
vcmpleps 0x80(%r12,%rax,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r10d
jne 0x1c3d394
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 8>::intersect_t<embree::avx512::SweepCurve1IntersectorK<embree::BSplineCurveT, 8>, embree::avx512::Intersect1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xa60, %rsp # imm = 0xA60
movq %rcx, %r15
movq %rdx, %r10
movq %rsi, %r12
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r10,4), %xmm1
vmovss 0x80(%rsi,%r10,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%r10,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%r10,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2d0009(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2de7d7(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2de74c(%rip), %ymm6 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm6, %ymm2, %ymm5
vbroadcastss 0x2ae85e(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm6, %ymm1, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm6, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2a9f4c(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%r12,%r10,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2dd658(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
movq %r10, (%rsp)
vminps 0x100(%r12,%r10,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2dd62f(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x318031(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x680(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c451d4
leaq (%r8,%rax), %r9
addq $0x6, %r9
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r10d
addq $0x10, %r9
leaq 0x500(%rsp), %rax
addq $0xe0, %rax
movq %rax, 0x170(%rsp)
movl $0x1, %eax
movq (%rsp), %rcx
shlxl %ecx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movq %r15, 0x10(%rsp)
movq %r8, 0x180(%rsp)
movq %r9, 0x178(%rsp)
tzcntq %r10, %rax
blsrq %r10, %r10
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x20(%rsp)
shll $0x6, %eax
movq %r10, %rcx
vmovups (%r9,%rax), %xmm0
subq $0x1, %rcx
jb 0x1c429c0
andq %r10, %rcx
tzcntq %r10, %rdx
shll $0x6, %edx
prefetcht0 (%r9,%rdx)
prefetcht0 0x40(%r9,%rdx)
testq %rcx, %rcx
je 0x1c429c0
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r9,%rcx)
prefetcht1 0x40(%r9,%rcx)
vmovups 0x10(%r9,%rax), %xmm1
vmovups 0x20(%r9,%rax), %xmm2
vmovups 0x30(%r9,%rax), %xmm3
movq (%rsp), %rax
vmovss (%r12,%rax,4), %xmm4
vinsertps $0x1c, 0x20(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r12,%rax,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
vbroadcastss 0x80(%r12,%rax,4), %ymm30
vbroadcastss 0xa0(%r12,%rax,4), %ymm31
vunpcklps %xmm31, %xmm30, %xmm5 # xmm5 = xmm30[0],xmm31[0],xmm30[1],xmm31[1]
vbroadcastss 0xc0(%r12,%rax,4), %ymm21
vinsertps $0x28, %xmm21, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm21[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x2da8d2(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x60(%r12,%rax,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
movl 0x2(%r8), %eax
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2ae597(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x230(%rsp)
vmovaps %ymm6, 0x380(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm0
vmovaps %ymm0, 0x8c0(%rsp)
vbroadcastss 0x2cfc52(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm1
vmovaps %ymm1, 0x8a0(%rsp)
vbroadcastss 0x2de413(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm2
vmovaps %ymm2, 0x880(%rsp)
vbroadcastss 0x2de3f8(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x2a0(%rsp)
vpermps %ymm6, %ymm2, %ymm4
vmovaps %ymm4, 0x860(%rsp)
vbroadcastss %xmm8, %ymm4
vmovaps %ymm4, 0x840(%rsp)
vpermps %ymm8, %ymm0, %ymm4
vmovaps %ymm4, 0x820(%rsp)
vpermps %ymm8, %ymm1, %ymm4
vmovaps %ymm4, 0x800(%rsp)
vmovaps %ymm8, 0x260(%rsp)
vpermps %ymm8, %ymm2, %ymm4
vmovaps %ymm4, 0x7e0(%rsp)
vbroadcastss %xmm7, %ymm4
vmovaps %ymm4, 0x7c0(%rsp)
vpermps %ymm7, %ymm0, %ymm4
vmovaps %ymm4, 0x7a0(%rsp)
vpermps %ymm7, %ymm1, %ymm4
vmovaps %ymm4, 0x780(%rsp)
vmovaps %ymm7, 0x280(%rsp)
vpermps %ymm7, %ymm2, %ymm4
vmovaps %ymm4, 0x760(%rsp)
vbroadcastss %xmm3, %ymm4
vmovaps %ymm4, 0x740(%rsp)
vpermps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x720(%rsp)
vpermps %ymm3, %ymm1, %ymm0
vmovaps %ymm0, 0x700(%rsp)
vmovaps %ymm3, 0x240(%rsp)
vpermps %ymm3, %ymm2, %ymm0
vmovaps %ymm0, 0x6e0(%rsp)
vmulss %xmm21, %xmm21, %xmm0
vfmadd231ps %ymm31, %ymm31, %ymm0 # ymm0 = (ymm31 * ymm31) + ymm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x6c0(%rsp)
vandps 0x2de2e0(%rip){1to8}, %ymm0, %ymm0 # 0x1f20ec4
vmovaps %ymm0, 0x4c0(%rsp)
vmovss %xmm10, 0x3c(%rsp)
vmovaps %xmm5, 0x300(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
movq %rax, 0x98(%rsp)
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x480(%rsp)
movl $0x1, %ebx
xorl %r11d, %r11d
movl 0x20(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x460(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x10c(%rsp)
vmovaps %xmm11, 0x220(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x108(%rsp)
vmovsd 0x2a9a83(%rip), %xmm2 # 0x1eec6f0
vbroadcastss 0x2a9a9d(%rip), %ymm16 # 0x1eec714
vmovaps %ymm30, 0x4e0(%rsp)
vmovaps %ymm31, 0x1c0(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm0
vmulss 0x2de231(%rip), %xmm0, %xmm1 # 0x1f20ed0
vmovaps %xmm1, 0x60(%rsp)
vmovaps %xmm2, 0x160(%rsp)
vbroadcastss %xmm2, %ymm4
vbroadcastss %xmm0, %ymm0
vmovaps %ymm4, 0x40(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vfmadd231ps 0x2de250(%rip), %ymm0, %ymm4 # ymm4 = (ymm0 * mem) + ymm4
vsubps %ymm4, %ymm16, %ymm7
vmulps %ymm7, %ymm7, %ymm8
vmulps %ymm7, %ymm8, %ymm0
vbroadcastss 0x2a9ea4(%rip), %ymm30 # 0x1eecb8c
vmulps %ymm30, %ymm0, %ymm1
vmulps %ymm4, %ymm4, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm7, %ymm4, %ymm10
vmulps %ymm7, %ymm10, %ymm5
vbroadcastss 0x2ae2f1(%rip), %ymm14 # 0x1ef0ffc
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm10, %ymm12
vbroadcastss 0x2ae2d8(%rip), %ymm15 # 0x1ef0ff4
vmulps %ymm15, %ymm12, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm30, %ymm2, %ymm11
vaddps %ymm0, %ymm11, %ymm11
vmulps %ymm14, %ymm12, %ymm12
vmulps %ymm5, %ymm15, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vbroadcastss 0x2ae2b2(%rip), %ymm14 # 0x1ef1000
vmulps %ymm0, %ymm14, %ymm11
vmulps %ymm1, %ymm14, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm2, %ymm14, %ymm5
vmovaps 0x740(%rsp), %ymm25
vmulps %ymm5, %ymm25, %ymm0
vmovaps 0x720(%rsp), %ymm27
vmulps %ymm5, %ymm27, %ymm2
vmovaps 0x700(%rsp), %ymm28
vmulps %ymm5, %ymm28, %ymm1
vmovaps 0x6e0(%rsp), %ymm29
vmulps %ymm5, %ymm29, %ymm5
vmovaps 0x7c0(%rsp), %ymm20
vfmadd231ps %ymm20, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm20) + ymm0
vmovaps 0x7a0(%rsp), %ymm22
vfmadd231ps %ymm22, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm22) + ymm2
vmovaps 0x780(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm23) + ymm1
vmovaps 0x760(%rsp), %ymm24
vfmadd231ps %ymm13, %ymm24, %ymm5 # ymm5 = (ymm24 * ymm13) + ymm5
vmovaps 0x840(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm16) + ymm0
vmovaps 0x820(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm17) + ymm2
vmovaps 0x800(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm18) + ymm1
vmovaps 0x7e0(%rsp), %ymm19
vfmadd231ps %ymm12, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm12) + ymm5
vmovaps 0x8c0(%rsp), %ymm6
vfmadd231ps %ymm6, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm6) + ymm0
vmovaps 0x8a0(%rsp), %ymm3
vfmadd231ps %ymm3, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm3) + ymm2
vmovaps 0x880(%rsp), %ymm14
vfmadd231ps %ymm14, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm14) + ymm1
vmovaps 0x860(%rsp), %ymm15
vfmadd231ps %ymm11, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm11) + ymm5
vbroadcastss 0x2de078(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm7, %ymm11
vmulps %ymm7, %ymm11, %ymm7
vxorps %ymm26, %ymm4, %ymm11
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm30, %ymm10, %ymm10
vsubps %ymm10, %ymm4, %ymm4
vaddps %ymm10, %ymm8, %ymm8
vbroadcastss 0x2a9d0b(%rip), %ymm10 # 0x1eecb80
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm4
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm25, %ymm10
vmulps %ymm9, %ymm27, %ymm12
vmulps %ymm9, %ymm28, %ymm13
vmulps %ymm9, %ymm29, %ymm9
vfmadd231ps %ymm20, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm20) + ymm10
vfmadd231ps %ymm22, %ymm8, %ymm12 # ymm12 = (ymm8 * ymm22) + ymm12
vfmadd231ps %ymm23, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm23) + ymm13
vfmadd231ps %ymm8, %ymm24, %ymm9 # ymm9 = (ymm24 * ymm8) + ymm9
vfmadd231ps %ymm16, %ymm4, %ymm10 # ymm10 = (ymm4 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm17) + ymm12
vfmadd231ps %ymm18, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm18) + ymm13
vfmadd231ps %ymm4, %ymm19, %ymm9 # ymm9 = (ymm19 * ymm4) + ymm9
vfmadd231ps %ymm6, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm6) + ymm10
vfmadd231ps %ymm3, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm3) + ymm12
vfmadd231ps %ymm14, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm14) + ymm13
vfmadd231ps %ymm7, %ymm15, %ymm9 # ymm9 = (ymm15 * ymm7) + ymm9
vbroadcastss 0x60(%rsp), %ymm4
vmulps %ymm4, %ymm10, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vmulps %ymm4, %ymm13, %ymm13
vmulps %ymm4, %ymm9, %ymm6
vmovaps %ymm0, %ymm8
vmovaps 0x31ce1a(%rip), %ymm7 # 0x1f5fd20
vmovaps %ymm31, %ymm3
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm2, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm1, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm5, %ymm6, %ymm4
vmaxps %ymm4, %ymm5, %ymm14
vminps %ymm4, %ymm5, %ymm4
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm5
vpermt2ps %ymm31, %ymm7, %ymm5
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm0, %ymm8, %ymm7
vsubps %ymm2, %ymm9, %ymm6
vsubps %ymm1, %ymm10, %ymm27
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm27, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm27) - ymm17
vmulps %ymm11, %ymm27, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm27, %ymm27, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vbroadcastss 0x2a9738(%rip), %ymm24 # 0x1eec714
vmovaps %ymm24, %ymm26
vfnmadd213ps %ymm24, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm24
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm5, %ymm6, %ymm22
vfmsub231ps %ymm27, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm27) - ymm22
vmulps %ymm19, %ymm27, %ymm24
vfmsub231ps %ymm7, %ymm5, %ymm24 # ymm24 = (ymm5 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm4, %ymm4
vsubps %ymm18, %ymm4, %ymm4
vmulps 0x2ad8d7(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2ad8d1(%rip){1to8}, %ymm4, %ymm4 # 0x1ef0944
vmovaps %ymm4, 0x60(%rsp)
vmulps %ymm14, %ymm14, %ymm4
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2a968e(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2a966f(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmovaps %ymm27, 0x3a0(%rsp)
vmulps %ymm14, %ymm27, %ymm18
vmovaps %ymm0, 0x140(%rsp)
vsubps %ymm0, %ymm31, %ymm27
vmovaps %ymm2, 0xa0(%rsp)
vsubps %ymm2, %ymm31, %ymm28
vmovaps %ymm1, 0x1e0(%rsp)
vsubps %ymm1, %ymm31, %ymm29
vmulps %ymm29, %ymm21, %ymm22
vfmadd231ps %ymm28, %ymm3, %ymm22 # ymm22 = (ymm3 * ymm28) + ymm22
vmovaps 0x4e0(%rsp), %ymm0
vfmadd231ps %ymm27, %ymm0, %ymm22 # ymm22 = (ymm0 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm21, %ymm17
vfmadd231ps %ymm3, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm3) + ymm17
vfmadd231ps %ymm0, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm0) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm2
vmovaps 0x6c0(%rsp), %ymm3
vsubps %ymm2, %ymm3, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm1
vsubps %ymm4, %ymm1, %ymm4
vmulps %ymm22, %ymm22, %ymm25
vmulps %ymm30, %ymm15, %ymm24
vmulps %ymm4, %ymm24, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c4327f
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm3
vfnmadd213ps %ymm26, %ymm3, %ymm31 # ymm31 = -(ymm3 * ymm31) + ymm26
vfmadd132ps %ymm3, %ymm3, %ymm31 # ymm31 = (ymm31 * ymm3) + ymm3
vxorps 0x2ddcf7(%rip){1to8}, %ymm22, %ymm3 # 0x1f20ec0
vsubps %ymm30, %ymm3, %ymm3
vmulps %ymm31, %ymm3, %ymm3
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm3, %ymm30 # ymm30 = (ymm3 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x420(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x400(%rsp)
vbroadcastss 0x2a8801(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm3, %ymm16, %ymm30 {%k1}
vbroadcastss 0x2a9956(%rip), %ymm3 # 0x1eecb84
vblendmps %ymm31, %ymm3, %ymm31 {%k1}
vbroadcastss 0x2ddc86(%rip), %ymm23 # 0x1f20ec4
vandps %ymm23, %ymm2, %ymm3
vmovaps 0x4c0(%rsp), %ymm16
vmaxps %ymm3, %ymm16, %ymm3
vmulps 0x2aec58(%rip){1to8}, %ymm3, %ymm3 # 0x1ef1eb4
vandps %ymm23, %ymm15, %ymm23
vcmpltps %ymm3, %ymm23, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c45164
vbroadcastss 0x2a9497(%rip), %ymm16 # 0x1eec714
jmp 0x1c43299
vbroadcastss 0x2a8797(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2a98f1(%rip), %ymm31 # 0x1eecb84
vmovaps %ymm26, %ymm16
andb $0x7f, %al
je 0x1c4368b
movq (%rsp), %rcx
vmovss 0x100(%r12,%rcx,4), %xmm3
vsubss 0x300(%rsp), %xmm3, %xmm3
vbroadcastss %xmm3, %ymm3
vminps %ymm31, %ymm3, %ymm3
vmovaps 0x6a0(%rsp), %ymm4
vmaxps %ymm30, %ymm4, %ymm4
vmulps %ymm29, %ymm13, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x1a0(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1c0(%rsp), %ymm31
vfmadd231ps %ymm12, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm12) + ymm13
vmovaps 0x4e0(%rsp), %ymm30
vfmadd231ps %ymm11, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm11) + ymm13
vbroadcastss 0x2ddbac(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2adcc0(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2ddb87(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm26, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a980c(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm4, %ymm4
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a8688(%rip), %ymm23 # 0x1eeba20
vmovaps %ymm23, %ymm11 {%k1}
vminps %ymm11, %ymm3, %ymm3
vxorps %xmm13, %xmm13, %xmm13
vsubps %ymm8, %ymm13, %ymm8
vsubps %ymm9, %ymm13, %ymm9
vsubps %ymm10, %ymm13, %ymm10
vmulps %ymm5, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm5, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm30, %ymm8 # ymm8 = -(ymm30 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm26, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm26, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm4, %ymm0
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm23, %ymm9 {%k1}
vminps %ymm9, %ymm3, %ymm8
vmovaps %ymm0, 0x360(%rsp)
vcmpleps %ymm8, %ymm0, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c45055
vmovaps 0x420(%rsp), %ymm3
vmaxps 0x60(%rsp), %ymm13, %ymm4
vminps %ymm16, %ymm3, %ymm3
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm3, %ymm3
vmovaps 0x400(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2ddab5(%rip), %ymm11 # 0x1f20f40
vaddps %ymm3, %ymm11, %ymm3
vbroadcastss 0x2db020(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm3, %ymm12, %ymm3
vmovaps 0x40(%rsp), %ymm0
vmovaps %ymm1, %ymm5
vmovaps 0x200(%rsp), %ymm1
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x420(%rsp)
vmaxps %ymm10, %ymm9, %ymm3
vaddps %ymm3, %ymm11, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vfmadd213ps %ymm0, %ymm1, %ymm3 # ymm3 = (ymm1 * ymm3) + ymm0
vmovaps %ymm3, 0x400(%rsp)
vmulps %ymm4, %ymm4, %ymm3
vsubps %ymm3, %ymm5, %ymm11
vmulps %ymm11, %ymm24, %ymm3
vsubps %ymm3, %ymm25, %ymm3
vcmpnltps %ymm10, %ymm3, %k0
kortestb %k0, %k0
je 0x1c436a0
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm13, %ymm3, %k1
vsqrtps %ymm3, %ymm3
vaddps %ymm15, %ymm15, %ymm4
vrcp14ps %ymm4, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm4 # ymm4 = -(ymm9 * ymm4) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm4 # ymm4 = (ymm4 * ymm9) + ymm9
vxorps 0x2dd992(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm3, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm12
vsubps %ymm22, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm13
vmovaps %ymm17, %ymm3
vfmadd213ps %ymm18, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm18
vmulps %ymm3, %ymm14, %ymm9
vmulps %ymm12, %ymm30, %ymm3
vmulps %ymm12, %ymm31, %ymm4
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vmovaps %ymm2, %ymm16
vmovaps 0x140(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm2
vsubps %ymm19, %ymm3, %ymm3
vmovaps %ymm6, %ymm19
vmovaps 0xa0(%rsp), %ymm1
vfmadd213ps %ymm1, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm1
vsubps %ymm19, %ymm4, %ymm4
vmovaps 0x1e0(%rsp), %ymm0
vmovaps 0x3a0(%rsp), %ymm5
vfmadd213ps %ymm0, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm0
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm31, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm2, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm2
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm1, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm1
vsubps %ymm6, %ymm17, %ymm1
vfmadd213ps %ymm0, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm0
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2a8420(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm12, %ymm0, %ymm2 {%k1}
vbroadcastss 0x2a9575(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm13, %ymm0, %ymm0 {%k1}
vbroadcastss 0x2dd8a6(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm16, %ymm6
vmovaps 0x4c0(%rsp), %ymm12
vmaxps %ymm6, %ymm12, %ymm6
vmulps 0x2ae879(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm7, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c436c7
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2a9528(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2a83bb(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm0 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c436c7
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
jmp 0x1c45055
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm4, %xmm4, %xmm4
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2a8362(%rip), %ymm2 # 0x1eeba20
vbroadcastss 0x2a94bd(%rip), %ymm0 # 0x1eecb84
vbroadcastss 0x2dd7f4(%rip), %xmm11 # 0x1f20ec4
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm1, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm1) + ymm5
vfmadd231ps %ymm10, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm10) + ymm5
vmovaps 0x360(%rsp), %ymm7
vmovaps %ymm7, 0x8e0(%rsp)
vminps %ymm2, %ymm8, %ymm1
vmovaps %ymm1, 0x900(%rsp)
vbroadcastss 0x2dd7ba(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm5, %ymm2
vmaxps %ymm0, %ymm7, %ymm5
vmovaps %ymm5, 0x620(%rsp)
vmovaps %ymm8, 0x640(%rsp)
vbroadcastss 0x2dd7a7(%rip), %ymm0 # 0x1f20ed4
vcmpltps %ymm0, %ymm2, %k1
kmovd %k1, 0x104(%rsp)
vcmpleps %ymm1, %ymm7, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x3c0(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %edx
andb %al, %dl
movl %edx, %eax
orb %cl, %al
je 0x1c45017
vmovaps %ymm0, %ymm2
movl %edx, 0x1c(%rsp)
movq %r11, 0x190(%rsp)
movq %r10, 0x198(%rsp)
knotb %k0, %k1
vmulps %ymm9, %ymm21, %ymm0
vfmadd213ps %ymm0, %ymm31, %ymm4 # ymm4 = (ymm31 * ymm4) + ymm0
vfmadd213ps %ymm4, %ymm30, %ymm3 # ymm3 = (ymm30 * ymm3) + ymm4
vandps %ymm6, %ymm3, %ymm0
vcmpltps %ymm2, %ymm0, %k0
kmovd %k1, 0xfc(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2dd721(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2dd713(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %rbx, 0x188(%rsp)
vpbroadcastd %ebx, %ymm1
vmovdqa %ymm0, 0x660(%rsp)
vmovdqa %ymm1, 0x4a0(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0x100(%rsp)
andb %cl, %bl
je 0x1c4435c
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x280(%rsp), %ymm3
vmovaps 0x240(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2ae654(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2f0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2a8184(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x420(%rsp,%rcx), %xmm10
vmovss 0x8e0(%rsp,%rcx), %xmm8
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x2a8120(%rip), %xmm0 # 0x1eeba24
vmovss 0x108(%rsp), %xmm0
jae 0x1c4394a
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2ae553(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2a8d83(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a91dc(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2ad620(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2ad61c(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2ad5f9(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x280(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x260(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x3a0(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2a7fae(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c43a8d
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c43abd
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0x8(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x8(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2dd3ef(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x350(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a90a2(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a9080(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x240(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm13
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm13 # xmm13 = (xmm13 * mem) + xmm2
vdpps $0x7f, %xmm13, %xmm13, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2a8bb1(%rip), %xmm3, %xmm5 # 0x1eec718
vmulss 0x2a8bad(%rip), %xmm0, %xmm6 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x330(%rsp)
vfnmadd213ss 0x2ad46d(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm13, 0x140(%rsp)
vmovss %xmm7, 0x8(%rsp)
vmovaps %xmm0, 0x340(%rsp)
jb 0x1c43bb7
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c43c04
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm5, 0xc(%rsp)
vmovss %xmm6, 0xd0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xd0(%rsp), %xmm6
vmovss 0xc(%rsp), %xmm5
vmovaps 0xe0(%rsp), %xmm3
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps %xmm0, %xmm16
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x1e0(%rsp), %xmm19
vmulss %xmm3, %xmm6, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm5, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm13, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm15
vaddss 0x2a8acf(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2a8ab0(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2a8aaa(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2a7daa(%rip), %xmm0 # 0x1eeba24
jb 0x1c43c85
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c43d1d
vmovss %xmm14, 0xc(%rsp)
vmovaps %xmm15, 0xd0(%rsp)
vmovss %xmm16, 0x2c(%rsp)
vmovss %xmm17, 0x28(%rsp)
vmovaps %xmm18, 0x320(%rsp)
vmovss %xmm20, 0x24(%rsp)
vmovaps %xmm4, 0x310(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x310(%rsp), %xmm4
vmovss 0x24(%rsp), %xmm20
vmovaps 0x320(%rsp), %xmm18
vmovss 0x28(%rsp), %xmm17
vmovss 0x2c(%rsp), %xmm16
vmovaps 0xd0(%rsp), %xmm15
vmovss 0xc(%rsp), %xmm14
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm19
vmovaps 0x200(%rsp), %xmm9
vbroadcastss 0x2dd19e(%rip), %xmm11 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2acc8f(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x280(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x350(%rsp), %xmm2
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x340(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x330(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm8, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2dd0d7(%rip){1to4}, %xmm13, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm4, %xmm4
vmovaps 0xe0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm9, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm9, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm9, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm13, %xmm13, %xmm0 # xmm0 = xmm13[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm10, %xmm10
vsubss %xmm4, %xmm8, %xmm8
vandps %xmm11, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c44070
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x2f0(%rsp), %xmm3
vfmadd231ss 0x2adfdd(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm11, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c44070
vaddss 0x300(%rsp), %xmm8, %xmm8
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm8
jb 0x1c44073
movq (%rsp), %rax
vmovss 0x100(%r12,%rax,4), %xmm4
vucomiss %xmm8, %xmm4
jb 0x1c44073
vucomiss 0x2a7b06(%rip), %xmm10 # 0x1eeba24
jb 0x1c44073
vmovss 0x2a87e8(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c44073
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2a87c9(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2a87c3(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq (%rsp), %rax
movl 0x120(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c44090
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm13, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm13
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c44095
cmpq $0x0, 0x40(%r15)
jne 0x1c44095
movq (%rsp), %rcx
vmovss %xmm8, 0x100(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x180(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x1a0(%r12,%rcx,4)
vmovss %xmm0, 0x1c0(%r12,%rcx,4)
vmovss %xmm10, 0x1e0(%r12,%rcx,4)
movl $0x0, 0x200(%r12,%rcx,4)
movl 0x20(%rsp), %eax
movl %eax, 0x220(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x240(%r12,%rcx,4)
movq 0x10(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%rcx,4)
jmp 0x1c44073
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c4433a
testb %al, %al
je 0x1c4396d
jmp 0x1c4433a
movq %rcx, %r15
jmp 0x1c44073
movq 0x10(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2ce658(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2dce22(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x500(%rsp)
vmovaps %ymm3, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x480(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq 0x170(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
movq (%rsp), %rax
vmovss %xmm8, 0x100(%r12,%rax,4)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
leaq 0x2c0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x120(%rsp)
movq %r12, 0x128(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x8, 0x138(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c4420f
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vbroadcastss 0x2dccb5(%rip), %xmm11 # 0x1f20ec4
vmovdqa 0x2c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c44322
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c4427c
testb $0x2, (%rcx)
jne 0x1c44241
testb $0x40, 0x3e(%r15)
je 0x1c4427c
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vbroadcastss 0x2dcc48(%rip), %xmm11 # 0x1f20ec4
vmovdqa 0x2c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c44322
vptestmd %ymm0, %ymm0, %k1
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c44330
movq (%rsp), %rax
vmovss %xmm4, 0x100(%r12,%rax,4)
movq 0x10(%rsp), %r15
jmp 0x1c44073
movq (%rsp), %rax
vmovaps 0x3e0(%rsp), %ymm0
vcmpleps 0x100(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c4388f
vmovaps 0x3c0(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
movq (%rsp), %rax
vcmpleps 0x100(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd 0x104(%rsp), %k1
kmovd 0xfc(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x1c(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2dcb36(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2dcb28(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x3c0(%rsp)
vpcmpled 0x4a0(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x1c(%rsp)
andb %cl, %bl
je 0x1c44f26
vmovaps 0x620(%rsp), %ymm5
vmovaps 0x2a0(%rsp), %ymm1
vmovaps 0x260(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x280(%rsp), %ymm3
vmovaps 0x240(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm0, %xmm11, %xmm0
vandps %xmm1, %xmm11, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2ada78(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2f0(%rsp)
vmovaps %ymm5, 0x360(%rsp)
vaddps 0x380(%rsp), %ymm5, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x2a75a8(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x400(%rsp,%rcx), %xmm10
vmovss 0x640(%rsp,%rcx), %xmm9
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x2a7544(%rip), %xmm0 # 0x1eeba24
vmovss 0x10c(%rsp), %xmm0
jae 0x1c44526
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x2ad977(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vmovaps %xmm9, 0xa0(%rsp)
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2a81a7(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a8600(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x60(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2aca44(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2aca40(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2aca1d(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x280(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x260(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x3a0(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2a73d2(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
jb 0x1c44669
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c44699
vmovss %xmm11, 0x140(%rsp)
vmovss %xmm12, 0x8(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x8(%rsp), %xmm12
vmovss 0x140(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x60(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2dc813(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x350(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a84c6(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a84a4(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x240(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x280(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm13
vfmadd132ps 0x2a0(%rsp), %xmm2, %xmm13 # xmm13 = (xmm13 * mem) + xmm2
vdpps $0x7f, %xmm13, %xmm13, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2a7fd5(%rip), %xmm3, %xmm5 # 0x1eec718
vmulss 0x2a7fd1(%rip), %xmm0, %xmm6 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x330(%rsp)
vfnmadd213ss 0x2ac891(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm13, 0x140(%rsp)
vmovss %xmm7, 0x8(%rsp)
vmovaps %xmm0, 0x340(%rsp)
jb 0x1c44793
vsqrtss %xmm0, %xmm0, %xmm16
jmp 0x1c447e0
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm5, 0xc(%rsp)
vmovss %xmm6, 0xd0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xd0(%rsp), %xmm6
vmovss 0xc(%rsp), %xmm5
vmovaps 0xe0(%rsp), %xmm3
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps %xmm0, %xmm16
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm19
vmulss %xmm3, %xmm6, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm5, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm13, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm15
vaddss 0x2a7ef3(%rip), %xmm7, %xmm14 # 0x1eec714
vmulps %xmm15, %xmm15, %xmm0
vsubps %xmm0, %xmm19, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm18
vmulss 0x2a7ed4(%rip), %xmm18, %xmm17 # 0x1eec718
vmulss 0x2a7ece(%rip), %xmm0, %xmm20 # 0x1eec71c
vucomiss 0x2a71ce(%rip), %xmm0 # 0x1eeba24
jb 0x1c44861
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c448f9
vmovss %xmm14, 0xc(%rsp)
vmovaps %xmm15, 0xd0(%rsp)
vmovss %xmm16, 0x2c(%rsp)
vmovss %xmm17, 0x28(%rsp)
vmovaps %xmm18, 0x320(%rsp)
vmovss %xmm20, 0x24(%rsp)
vmovaps %xmm4, 0x310(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x310(%rsp), %xmm4
vmovss 0x24(%rsp), %xmm20
vmovaps 0x320(%rsp), %xmm18
vmovss 0x28(%rsp), %xmm17
vmovss 0x2c(%rsp), %xmm16
vmovaps 0xd0(%rsp), %xmm15
vmovss 0xc(%rsp), %xmm14
vmovss 0x8(%rsp), %xmm7
vmovaps 0x140(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm19
vmovaps 0x200(%rsp), %xmm11
vbroadcastss 0x2dc5c2(%rip), %xmm8 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm31
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x40(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm9
vmovss 0x2ac0b3(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x60(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x240(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x280(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x350(%rsp), %xmm2
vfmadd132ps 0x2a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x340(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm13, %xmm2
vsubps %xmm2, %xmm1, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x330(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2dc4fb(%rip){1to4}, %xmm13, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm4, %xmm4
vmovaps 0xe0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm16, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm14) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm18, %xmm20, %xmm2
vmulss %xmm18, %xmm18, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm17, %xmm7
vfnmadd231ss %xmm4, %xmm15, %xmm5 # xmm5 = -(xmm15 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm15, %xmm6 # xmm6 = -(xmm15 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm13, %xmm13, %xmm0 # xmm0 = xmm13[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm15, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm15, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm10, %xmm10
vsubss %xmm4, %xmm9, %xmm9
vandps %xmm8, %xmm15, %xmm3
vucomiss %xmm3, %xmm14
jbe 0x1c44c4c
vaddss %xmm1, %xmm14, %xmm1
vmovaps 0x2f0(%rsp), %xmm3
vfmadd231ss 0x2ad401(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm2, %xmm8, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c44c4c
vaddss 0x300(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm9
jb 0x1c44c4f
movq (%rsp), %rax
vmovss 0x100(%r12,%rax,4), %xmm4
vucomiss %xmm9, %xmm4
jb 0x1c44c4f
vucomiss 0x2a6f2a(%rip), %xmm10 # 0x1eeba24
jb 0x1c44c4f
vmovss 0x2a7c0c(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c44c4f
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm19, %xmm2, %xmm1 # xmm1 = xmm19[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2a7bed(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2a7be7(%rip), %xmm19, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq (%rsp), %rax
movl 0x120(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c44c6c
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm13, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm13
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm3 # xmm3 = xmm13[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c44c71
cmpq $0x0, 0x40(%r15)
jne 0x1c44c71
movq (%rsp), %rcx
vmovss %xmm9, 0x100(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x180(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x1a0(%r12,%rcx,4)
vmovss %xmm0, 0x1c0(%r12,%rcx,4)
vmovss %xmm10, 0x1e0(%r12,%rcx,4)
movl $0x0, 0x200(%r12,%rcx,4)
movl 0x20(%rsp), %eax
movl %eax, 0x220(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x240(%r12,%rcx,4)
movq 0x10(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%rcx,4)
jmp 0x1c44c4f
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c44f04
testb %al, %al
je 0x1c44549
jmp 0x1c44f04
movq %rcx, %r15
jmp 0x1c44c4f
movq 0x10(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2cda7c(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2dc246(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x500(%rsp)
vmovaps %ymm3, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x580(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x480(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq 0x170(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
movq (%rsp), %rax
vmovss %xmm9, 0x100(%r12,%rax,4)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
leaq 0x2c0(%rsp), %rax
movq %rax, 0x110(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x118(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x120(%rsp)
movq %r12, 0x128(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0x130(%rsp)
movl $0x8, 0x138(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c44de2
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vmovdqa 0x2c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c44eec
movq 0x10(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c44e46
testb $0x2, (%rcx)
jne 0x1c44e14
testb $0x40, 0x3e(%r15)
je 0x1c44e46
leaq 0x110(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm31
vmovdqa 0x2c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c44eec
vptestmd %ymm0, %ymm0, %k1
movq 0x128(%rsp), %rax
movq 0x130(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c44efa
movq (%rsp), %rax
vmovss %xmm4, 0x100(%r12,%rax,4)
movq 0x10(%rsp), %r15
jmp 0x1c44c4f
movq (%rsp), %rax
vmovaps 0x3e0(%rsp), %ymm0
vcmpleps 0x100(%r12,%rax,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c4446b
vmovdqa 0x4a0(%rsp), %ymm1
vpcmpltd 0x3c0(%rsp), %ymm1, %k1
vmovaps 0x8e0(%rsp), %ymm0
vpcmpltd 0x660(%rsp), %ymm1, %k2
vmovaps 0x380(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq (%rsp), %rax
vbroadcastss 0x100(%r12,%rax,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x100(%rsp), %ecx
andb %al, %cl
vmovaps 0x620(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x1c(%rsp), %edx
andb %al, %dl
orb %cl, %dl
je 0x1c45023
movq 0x190(%rsp), %r11
movl %r11d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %dl, 0x920(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x940(%rsp,%rax)
vmovaps 0x160(%rsp), %xmm2
vmovlps %xmm2, 0x960(%rsp,%rax)
movq 0x188(%rsp), %rbx
leal 0x1(%rbx), %ecx
movl %ecx, 0x968(%rsp,%rax)
incl %r11d
movq 0x180(%rsp), %r8
vbroadcastss 0x2a770f(%rip), %ymm16 # 0x1eec714
movq 0x178(%rsp), %r9
movq 0x198(%rsp), %r10
jmp 0x1c4505e
vbroadcastss 0x2a76f3(%rip), %ymm16 # 0x1eec714
jmp 0x1c45055
movq 0x180(%rsp), %r8
vbroadcastss 0x2a76df(%rip), %ymm16 # 0x1eec714
movq 0x178(%rsp), %r9
movq 0x198(%rsp), %r10
movq 0x190(%rsp), %r11
movq 0x188(%rsp), %rbx
vmovaps 0x160(%rsp), %xmm2
movl %r11d, %eax
testl %eax, %eax
je 0x1c451b1
leal -0x1(%rax), %r11d
leaq (%r11,%r11,2), %rcx
shlq $0x5, %rcx
vmovaps 0x940(%rsp,%rcx), %ymm0
movzbl 0x920(%rsp,%rcx), %esi
vaddps 0x380(%rsp), %ymm0, %ymm1
movq (%rsp), %rdx
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %edx
andl %esi, %edx
je 0x1c45154
kmovd %edx, %k1
vbroadcastss 0x2a696b(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %dl, %sil
je 0x1c450ed
movzbl %sil, %edi
jmp 0x1c450f0
movzbl %dl, %edi
leaq (%rsp,%rcx), %rsi
addq $0x920, %rsi # imm = 0x920
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %ebx
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %edx, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c4512a
movl %eax, %r11d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2dbdde(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x500(%rsp)
vmovsd 0x500(%rsp,%rcx,4), %xmm2
movl %r11d, %eax
testb %dl, %dl
je 0x1c45061
jmp 0x1c42c8f
vcmpleps 0x2dbd91(%rip), %ymm4, %k2 # 0x1f20f00
vbroadcastss 0x2a7a0c(%rip), %ymm4 # 0x1eecb84
vbroadcastss 0x2a689e(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm4, %ymm16, %ymm3 {%k2}
vmovaps %ymm3, %ymm30 {%k1}
vblendmps %ymm16, %ymm4, %ymm3 {%k2}
kmovd %k2, %ecx
vmovaps %ymm3, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %edx
orb %cl, %dl
andb %al, %dl
movl %edx, %eax
jmp 0x1c43273
movq (%rsp), %rax
vmovaps 0x680(%rsp), %ymm0
vcmpleps 0x100(%r12,%rax,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r10d
jne 0x1c4296d
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 8>::occluded_t<embree::avx512::SweepCurve1IntersectorK<embree::BSplineCurveT, 8>, embree::avx512::Occluded1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xa40, %rsp # imm = 0xA40
movq %r8, %r9
movzbl 0x1(%r8), %r10d
leaq (%r10,%r10,4), %r11
leaq (%r11,%r11,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%rdx,4), %xmm1
vmovss 0x80(%rsi,%rdx,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%r10,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%r10,%r10,2), %rbx
vpmovsxbd 0x6(%r8,%rbx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%r10,%r11,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rbx,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %r10, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%r10,%r10,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %r10, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r11d
vpmovsxbd 0x6(%r9,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2cd41a(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2dbbe3(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2dbb51(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x2abc66(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2a7350(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%r10,8), %r8
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%r10,%r10), %rdi
addq %r10, %r11
shlq $0x3, %rbx
subq %r10, %rbx
movl %r10d, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r11), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rbx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%rsi,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2daa60(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
movq %rsi, 0x18(%rsp)
vminps 0x100(%rsi,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2daa36(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %r10d, %ymm1
vpcmpgtd 0x315432(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x660(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r10b
je 0x1c47c35
movq %rcx, 0x50(%rsp)
leaq (%r9,%rax), %r11
addq $0x6, %r11
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %r11
leaq 0x4e0(%rsp), %rax
addq $0xe0, %rax
movq %rax, 0x140(%rsp)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x3e0(%rsp)
movq %rdx, 0x28(%rsp)
movq %r9, 0x150(%rsp)
movq %r11, 0x148(%rsp)
tzcntq %r14, %rcx
blsrq %r14, %r14
movl 0x6(%r9,%rcx,4), %eax
shll $0x6, %ecx
movq %r14, %rdi
vmovups (%r11,%rcx), %xmm0
subq $0x1, %rdi
jb 0x1c455c0
andq %r14, %rdi
tzcntq %r14, %rsi
shll $0x6, %esi
prefetcht0 (%r11,%rsi)
prefetcht0 0x40(%r11,%rsi)
testq %rdi, %rdi
je 0x1c455c0
tzcntq %rdi, %rsi
shll $0x6, %esi
prefetcht1 (%r11,%rsi)
prefetcht1 0x40(%r11,%rsi)
vmovups 0x10(%r11,%rcx), %xmm1
vmovups 0x20(%r11,%rcx), %xmm2
vmovups 0x30(%r11,%rcx), %xmm3
movq 0x18(%rsp), %rcx
vmovss (%rcx,%rdx,4), %xmm4
vinsertps $0x1c, 0x20(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
movl 0x2(%r9), %esi
vbroadcastss 0x80(%rcx,%rdx,4), %ymm12
vbroadcastss 0xa0(%rcx,%rdx,4), %ymm13
vunpcklps %xmm13, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vbroadcastss 0xc0(%rcx,%rdx,4), %ymm14
vinsertps $0x28, %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm14[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x2d7cca(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x60(%rcx,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2ab994(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x1f0(%rsp)
vmovaps %ymm6, 0x340(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm16
vbroadcastss 0x2cd057(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm17
vbroadcastss 0x2db820(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm18
vbroadcastss 0x2db80d(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x260(%rsp)
vpermps %ymm6, %ymm2, %ymm19
vbroadcastss %xmm8, %ymm20
vpermps %ymm8, %ymm0, %ymm22
vpermps %ymm8, %ymm1, %ymm23
vmovaps %ymm8, 0x220(%rsp)
vpermps %ymm8, %ymm2, %ymm24
vbroadcastss %xmm7, %ymm25
vpermps %ymm7, %ymm0, %ymm27
vpermps %ymm7, %ymm1, %ymm29
vmovaps %ymm7, 0x240(%rsp)
vpermps %ymm7, %ymm2, %ymm31
vbroadcastss %xmm3, %ymm21
vpermps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x8a0(%rsp)
vpermps %ymm3, %ymm1, %ymm30
vmovaps %ymm3, 0x200(%rsp)
vpermps %ymm3, %ymm2, %ymm6
vmovaps %ymm14, 0x480(%rsp)
vmulss %xmm14, %xmm14, %xmm0
vmovaps %ymm13, 0x4a0(%rsp)
vfmadd231ps %ymm13, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm13) + ymm0
vmovaps %ymm12, 0x4c0(%rsp)
vfmadd231ps %ymm12, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm12) + ymm0
vbroadcastss %xmm0, %ymm1
vandps %ymm28, %ymm1, %ymm0
vmovaps %ymm0, 0x460(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x2b0(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x860(%rsp)
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x420(%rsp)
movl $0x1, %r15d
xorl %r13d, %r13d
xorl %ebx, %ebx
movq %rsi, 0x168(%rsp)
vpbroadcastd %esi, %ymm0
vmovdqa %ymm0, 0x400(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xfc(%rsp)
vmovaps %xmm11, 0x1e0(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xf8(%rsp)
vmovsd 0x2a6eef(%rip), %xmm2 # 0x1eec6f0
vbroadcastss 0x2a6f0a(%rip), %ymm3 # 0x1eec714
vmovaps %ymm1, 0x880(%rsp)
vmovaps %ymm16, 0x840(%rsp)
vmovaps %ymm17, 0x820(%rsp)
vmovaps %ymm18, 0x800(%rsp)
vmovaps %ymm19, 0x7e0(%rsp)
vmovaps %ymm20, 0x7c0(%rsp)
vmovaps %ymm22, 0x7a0(%rsp)
vmovaps %ymm23, 0x780(%rsp)
vmovaps %ymm24, 0x760(%rsp)
vmovaps %ymm25, 0x740(%rsp)
vmovaps %ymm27, 0x720(%rsp)
vmovaps %ymm29, 0x700(%rsp)
vmovaps %ymm31, 0x6e0(%rsp)
vmovaps %ymm21, 0x6c0(%rsp)
vmovaps %ymm30, 0x6a0(%rsp)
vmovaps %ymm6, 0x680(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm0
vmulss 0x2db634(%rip), %xmm0, %xmm1 # 0x1f20ed0
vmovaps %xmm1, 0x80(%rsp)
vmovaps %xmm2, 0x130(%rsp)
vbroadcastss %xmm2, %ymm4
vbroadcastss %xmm0, %ymm0
vmovaps %ymm4, 0x60(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vfmadd231ps 0x2db650(%rip), %ymm0, %ymm4 # ymm4 = (ymm0 * mem) + ymm4
vsubps %ymm4, %ymm3, %ymm7
vmulps %ymm7, %ymm7, %ymm8
vmulps %ymm7, %ymm8, %ymm0
vbroadcastss 0x2a72a6(%rip), %ymm28 # 0x1eecb8c
vmulps %ymm28, %ymm0, %ymm1
vmulps %ymm4, %ymm4, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm7, %ymm4, %ymm10
vmulps %ymm7, %ymm10, %ymm5
vbroadcastss 0x2ab6f3(%rip), %ymm14 # 0x1ef0ffc
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm10, %ymm12
vbroadcastss 0x2ab6da(%rip), %ymm15 # 0x1ef0ff4
vmulps %ymm15, %ymm12, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm28, %ymm2, %ymm11
vaddps %ymm0, %ymm11, %ymm11
vmulps %ymm14, %ymm12, %ymm12
vmulps %ymm5, %ymm15, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vbroadcastss 0x2ab6b4(%rip), %ymm14 # 0x1ef1000
vmulps %ymm0, %ymm14, %ymm11
vmulps %ymm1, %ymm14, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm2, %ymm14, %ymm5
vmulps %ymm5, %ymm21, %ymm2
vmovaps 0x8a0(%rsp), %ymm0
vmulps %ymm5, %ymm0, %ymm1
vmulps %ymm5, %ymm30, %ymm3
vmulps %ymm5, %ymm6, %ymm5
vfmadd231ps %ymm25, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm25) + ymm2
vfmadd231ps %ymm27, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm27) + ymm1
vfmadd231ps %ymm29, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm29) + ymm3
vfmadd231ps %ymm13, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm13) + ymm5
vfmadd231ps %ymm20, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm20) + ymm2
vfmadd231ps %ymm22, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm22) + ymm1
vfmadd231ps %ymm23, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm23) + ymm3
vfmadd231ps %ymm12, %ymm24, %ymm5 # ymm5 = (ymm24 * ymm12) + ymm5
vfmadd231ps %ymm16, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm16) + ymm2
vfmadd231ps %ymm17, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm17) + ymm1
vfmadd231ps %ymm18, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm18) + ymm3
vfmadd231ps %ymm11, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm11) + ymm5
vbroadcastss 0x2db4f5(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm7, %ymm11
vmulps %ymm7, %ymm11, %ymm7
vxorps %ymm26, %ymm4, %ymm11
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm28, %ymm10, %ymm10
vsubps %ymm10, %ymm4, %ymm4
vaddps %ymm10, %ymm8, %ymm8
vbroadcastss 0x2a7188(%rip), %ymm10 # 0x1eecb80
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm4
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm21, %ymm10
vmulps %ymm0, %ymm9, %ymm12
vmulps %ymm9, %ymm30, %ymm13
vmulps %ymm6, %ymm9, %ymm9
vfmadd231ps %ymm25, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm25) + ymm10
vfmadd231ps %ymm27, %ymm8, %ymm12 # ymm12 = (ymm8 * ymm27) + ymm12
vfmadd231ps %ymm29, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm29) + ymm13
vfmadd231ps %ymm8, %ymm31, %ymm9 # ymm9 = (ymm31 * ymm8) + ymm9
vfmadd231ps %ymm20, %ymm4, %ymm10 # ymm10 = (ymm4 * ymm20) + ymm10
vfmadd231ps %ymm22, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm22) + ymm12
vfmadd231ps %ymm23, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm23) + ymm13
vfmadd231ps %ymm4, %ymm24, %ymm9 # ymm9 = (ymm24 * ymm4) + ymm9
vfmadd231ps %ymm16, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm17) + ymm12
vfmadd231ps %ymm18, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm18) + ymm13
vfmadd231ps %ymm7, %ymm19, %ymm9 # ymm9 = (ymm19 * ymm7) + ymm9
vbroadcastss 0x80(%rsp), %ymm4
vmulps %ymm4, %ymm10, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vmulps %ymm4, %ymm13, %ymm13
vmulps %ymm4, %ymm9, %ymm6
vmovaps %ymm2, %ymm8
vmovaps 0x31a294(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm1, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm3, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm5, %ymm6, %ymm4
vmaxps %ymm4, %ymm5, %ymm14
vminps %ymm4, %ymm5, %ymm4
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm2, %ymm8, %ymm7
vsubps %ymm1, %ymm9, %ymm6
vsubps %ymm3, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vbroadcastss 0x2a6bba(%rip), %ymm21 # 0x1eec714
vmovaps %ymm21, %ymm26
vfnmadd213ps %ymm21, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm21
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm0, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm0, %ymm24 # ymm24 = (ymm0 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm4, %ymm4
vsubps %ymm18, %ymm4, %ymm4
vmulps 0x2aad59(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2aad53(%rip){1to8}, %ymm4, %ymm4 # 0x1ef0944
vmovaps %ymm4, 0x80(%rsp)
vmulps %ymm14, %ymm14, %ymm4
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2a6b0d(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2a6aee(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm2, %ymm31, %ymm27
vsubps %ymm1, %ymm31, %ymm28
vmovaps %ymm3, 0xa0(%rsp)
vsubps %ymm3, %ymm31, %ymm29
vmovaps 0x480(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vmovaps 0x4a0(%rsp), %ymm21
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x4c0(%rsp), %ymm3
vfmadd231ps %ymm27, %ymm3, %ymm22 # ymm22 = (ymm3 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm3, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm3) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm21
vmovaps 0x880(%rsp), %ymm16
vsubps %ymm21, %ymm16, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm3
vmovaps %ymm3, 0x1a0(%rsp)
vsubps %ymm4, %ymm3, %ymm4
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2a6e87(%rip){1to8}, %ymm15, %ymm24 # 0x1eecb8c
vmulps %ymm4, %ymm24, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c45e03
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm3
vfnmadd213ps %ymm26, %ymm3, %ymm31 # ymm31 = -(ymm3 * ymm31) + ymm26
vfmadd132ps %ymm3, %ymm3, %ymm31 # ymm31 = (ymm31 * ymm3) + ymm3
vxorps 0x2db172(%rip){1to8}, %ymm22, %ymm3 # 0x1f20ec0
vsubps %ymm30, %ymm3, %ymm3
vmulps %ymm31, %ymm3, %ymm3
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm3, %ymm30 # ymm30 = (ymm3 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x3c0(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x3a0(%rsp)
vbroadcastss 0x2a5c7c(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm3, %ymm16, %ymm30 {%k1}
vbroadcastss 0x2a6dd1(%rip), %ymm3 # 0x1eecb84
vblendmps %ymm31, %ymm3, %ymm31 {%k1}
vbroadcastss 0x2db101(%rip), %ymm23 # 0x1f20ec4
vandps %ymm23, %ymm21, %ymm3
vmovaps 0x460(%rsp), %ymm16
vmaxps %ymm3, %ymm16, %ymm3
vmulps 0x2ac0d3(%rip){1to8}, %ymm3, %ymm3 # 0x1ef1eb4
vandps %ymm23, %ymm15, %ymm23
vcmpltps %ymm3, %ymm23, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c47bb9
vbroadcastss 0x2a6913(%rip), %ymm3 # 0x1eec714
jmp 0x1c45e1d
vbroadcastss 0x2a5c13(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2a6d6d(%rip), %ymm31 # 0x1eecb84
vmovaps %ymm26, %ymm3
andb $0x7f, %al
je 0x1c46204
vmovaps %ymm21, 0x100(%rsp)
movq 0x18(%rsp), %rcx
vmovaps %ymm3, %ymm16
vmovss 0x100(%rcx,%rdx,4), %xmm3
vsubss 0x2b0(%rsp), %xmm3, %xmm3
vbroadcastss %xmm3, %ymm3
vminps %ymm31, %ymm3, %ymm3
vmovaps 0x860(%rsp), %ymm4
vmaxps %ymm30, %ymm4, %ymm4
vmulps %ymm29, %ymm13, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x480(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x4a0(%rsp), %ymm31
vfmadd231ps %ymm12, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm12) + ymm13
vmovaps 0x4c0(%rsp), %ymm30
vfmadd231ps %ymm11, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm11) + ymm13
vbroadcastss 0x2db01a(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2ab12e(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2daff5(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm26, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a6c7a(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm4, %ymm4
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a5af6(%rip), %ymm23 # 0x1eeba20
vmovaps %ymm23, %ymm11 {%k1}
vminps %ymm11, %ymm3, %ymm3
vxorps %xmm13, %xmm13, %xmm13
vsubps %ymm8, %ymm13, %ymm8
vsubps %ymm9, %ymm13, %ymm9
vsubps %ymm10, %ymm13, %ymm10
vmulps %ymm0, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm0, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm30, %ymm8 # ymm8 = -(ymm30 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm26, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm26, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm4, %ymm0
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm23, %ymm9 {%k1}
vminps %ymm9, %ymm3, %ymm8
vmovaps %ymm0, 0x320(%rsp)
vcmpleps %ymm8, %ymm0, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c4621c
vmovaps 0x3c0(%rsp), %ymm3
vmaxps 0x80(%rsp), %ymm13, %ymm4
vminps %ymm16, %ymm3, %ymm3
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm3, %ymm3
vmovaps 0x3a0(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2daf20(%rip), %ymm11 # 0x1f20f40
vaddps %ymm3, %ymm11, %ymm3
vbroadcastss 0x2d848b(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm3, %ymm12, %ymm3
vmovaps 0x60(%rsp), %ymm0
vmovaps 0x1c0(%rsp), %ymm19
vfmadd213ps %ymm0, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + ymm0
vmovaps %ymm3, 0x3c0(%rsp)
vmaxps %ymm10, %ymm9, %ymm3
vaddps %ymm3, %ymm11, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vfmadd213ps %ymm0, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + ymm0
vmovaps %ymm3, 0x3a0(%rsp)
vmulps %ymm4, %ymm4, %ymm3
vmovaps 0x1a0(%rsp), %ymm0
vsubps %ymm3, %ymm0, %ymm11
vmulps %ymm11, %ymm24, %ymm3
vsubps %ymm3, %ymm25, %ymm3
vcmpnltps %ymm10, %ymm3, %k0
kortestb %k0, %k0
je 0x1c46230
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm13, %ymm3, %k1
vsqrtps %ymm3, %ymm3
vaddps %ymm15, %ymm15, %ymm4
vrcp14ps %ymm4, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm4 # ymm4 = -(ymm9 * ymm4) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm4 # ymm4 = (ymm4 * ymm9) + ymm9
vxorps 0x2dadf7(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm3, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm12
vsubps %ymm22, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm13
vmovaps %ymm17, %ymm3
vfmadd213ps %ymm18, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm18
vmulps %ymm3, %ymm14, %ymm9
vmulps %ymm12, %ymm30, %ymm3
vmulps %ymm12, %ymm31, %ymm4
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm2, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm2
vsubps %ymm19, %ymm3, %ymm3
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm1, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm1
vsubps %ymm19, %ymm4, %ymm4
vmovaps 0xa0(%rsp), %ymm0
vfmadd213ps %ymm0, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm0
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm31, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm2, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm2
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm1, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm1
vsubps %ymm6, %ymm17, %ymm1
vfmadd213ps %ymm0, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm0
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2a58a6(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm12, %ymm0, %ymm2 {%k1}
vbroadcastss 0x2a69fb(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm13, %ymm0, %ymm0 {%k1}
vandps 0x100(%rsp), %ymm28, %ymm6
vmovaps 0x460(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2abd06(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c46257
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2a69af(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2a5842(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm0 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c46257
vbroadcastss 0x2dacb6(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c47a3a
vmovaps %ymm16, %ymm3
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c47a3a
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm4, %xmm4, %xmm4
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2a57d2(%rip), %ymm2 # 0x1eeba20
vbroadcastss 0x2a692d(%rip), %ymm0 # 0x1eecb84
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm1, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm1) + ymm5
vfmadd231ps %ymm10, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm10) + ymm5
vmovaps 0x320(%rsp), %ymm6
vmovaps %ymm6, 0x8c0(%rsp)
vminps %ymm2, %ymm8, %ymm1
vmovaps %ymm1, 0x8e0(%rsp)
vandps %ymm28, %ymm5, %ymm2
vmaxps %ymm0, %ymm6, %ymm5
vmovaps %ymm5, 0x600(%rsp)
vmovaps %ymm8, 0x620(%rsp)
vbroadcastss 0x2dac27(%rip), %ymm0 # 0x1f20ed4
vcmpltps %ymm0, %ymm2, %k1
kmovd %k1, 0xf4(%rsp)
vcmpleps %ymm1, %ymm6, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x360(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %esi
andb %al, %sil
movl %esi, 0x34(%rsp)
movl %esi, %eax
orb %cl, %al
je 0x1c46e0b
vmovaps %ymm0, %ymm2
movq %r14, 0x160(%rsp)
movb %r10b, 0x13(%rsp)
knotb %k0, %k1
vmulps %ymm9, %ymm21, %ymm0
vfmadd213ps %ymm0, %ymm31, %ymm4 # ymm4 = (ymm31 * ymm4) + ymm0
vfmadd213ps %ymm4, %ymm30, %ymm3 # ymm3 = (ymm30 * ymm3) + ymm4
vandps %ymm28, %ymm3, %ymm0
vcmpltps %ymm2, %ymm0, %k0
kmovd %k1, 0xec(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2daba1(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2dab93(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %r15, 0x158(%rsp)
vpbroadcastd %r15d, %ymm1
vmovdqa %ymm0, 0x640(%rsp)
vmovdqa %ymm1, 0x440(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %ecx, 0xf0(%rsp)
andb %cl, %r12b
je 0x1c46e22
vmovaps 0x260(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x240(%rsp), %ymm3
vmovaps 0x200(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x2daaff(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2abaca(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2a0(%rsp)
vmovaps 0x320(%rsp), %ymm0
vaddps 0x340(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x380(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x2a55f9(%rip), %ymm0 # 0x1eeba20
vblendmps 0x320(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x3c0(%rsp,%rcx), %xmm10
vmovss 0x8c0(%rsp,%rcx), %xmm8
vmovaps 0x1e0(%rsp), %xmm0
vucomiss 0x2a5591(%rip), %xmm0 # 0x1eeba24
vmovss 0xf8(%rsp), %xmm0
jae 0x1c464df
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x80(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x80(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2ab9bd(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x48(%rsp)
movl $0x5, %r15d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x1f0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2a61ed(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a6646(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x80(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2aaa87(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2aaa83(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2aaa60(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x240(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x220(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x310(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2a5415(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x1a0(%rsp)
jb 0x1c46626
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c46656
vmovss %xmm11, 0x100(%rsp)
vmovss %xmm12, 0x14(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x14(%rsp), %xmm12
vmovss 0x100(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x80(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2da853(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x300(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a6506(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a64e4(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x200(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x220(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x260(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2a6015(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x2a6011(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x2e0(%rsp)
vfnmadd213ss 0x2aa8d1(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x100(%rsp)
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2f0(%rsp)
jb 0x1c46751
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c4679c
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm6, 0x24(%rsp)
vmovss %xmm8, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm8
vmovss 0x24(%rsp), %xmm6
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x2a5f37(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x2a5f18(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x2a5f12(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x2a5212(%rip), %xmm0 # 0x1eeba24
jb 0x1c4681d
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c468b3
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm5, 0x40(%rsp)
vmovaps %xmm15, 0x2d0(%rsp)
vmovss %xmm17, 0x3c(%rsp)
vmovss %xmm19, 0x38(%rsp)
vmovaps %xmm4, 0x2c0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2c0(%rsp), %xmm4
vmovss 0x38(%rsp), %xmm19
vmovss 0x3c(%rsp), %xmm17
vmovaps 0x2d0(%rsp), %xmm15
vmovss 0x40(%rsp), %xmm5
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x24(%rsp), %xmm13
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss 0x2da607(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x60(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2aa108(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x80(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x240(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x220(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x300(%rsp), %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x2f0(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x2e0(%rsp), %xmm1, %xmm3
vmulss 0x48(%rsp), %xmm8, %xmm1
vmovss 0x44(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2da543(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xd0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x1f0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x310(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2da482(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c46a9e
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x2a0(%rsp), %xmm3
vfmadd231ss 0x2ab450(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c46a9e
vaddss 0x2b0(%rsp), %xmm8, %xmm8
vucomiss 0x5c(%rsp), %xmm8
jb 0x1c46a99
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x100(%rcx,%rax,4), %xmm5
vucomiss %xmm8, %xmm5
jae 0x1c46ab4
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c46dd9
decq %r15
jne 0x1c46503
jmp 0x1c46dd6
xorl %eax, %eax
vucomiss 0x2a4f66(%rip), %xmm10 # 0x1eeba24
jb 0x1c46a9b
vmovss 0x2a5c4c(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c46a9b
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2a5c31(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2a5c2b(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x50(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x168(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x120(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c46a99
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c46b43
cmpq $0x0, 0x48(%r14)
jne 0x1c46b43
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c46a9e
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x50(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2cbb60(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2da32a(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x4e0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovdqa 0x400(%rsp), %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
movq 0x140(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm8, 0x100(%rcx,%rax,4)
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm0, 0x280(%rsp)
leaq 0x280(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x180(%rsp)
movq %rcx, 0x188(%rsp)
leaq 0x4e0(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x8, 0x198(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm5, 0x80(%rsp)
je 0x1c46d0c
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2da1c2(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2da1b8(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x280(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c46db2
movq 0x50(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c46d76
testb $0x2, (%rcx)
jne 0x1c46d3e
testb $0x40, 0x3e(%r14)
je 0x1c46d76
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2da158(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2da14e(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x280(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x188(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2a5de5(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %r14b
jmp 0x1c46db5
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c46b3c
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x100(%rcx,%rax,4)
jmp 0x1c46b3c
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x28(%rsp), %rdx
movq 0x18(%rsp), %rax
vmovaps 0x380(%rsp), %ymm0
vcmpleps 0x100(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c46419
jmp 0x1c46e2b
vbroadcastss 0x2a5900(%rip), %ymm3 # 0x1eec714
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c47a3a
vbroadcastss 0x2da099(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x340(%rsp), %ymm3
vaddps 0x360(%rsp), %ymm3, %ymm0
movq 0x18(%rsp), %rax
vcmpleps 0x100(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xf4(%rsp), %k1
kmovd 0xec(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x34(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2da066(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2da058(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x360(%rsp)
vpcmpled 0x440(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x34(%rsp)
andb %cl, %r12b
je 0x1c47915
vmovaps 0x600(%rsp), %ymm7
vmovaps 0x260(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x240(%rsp), %ymm5
vmovaps 0x200(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2aafa7(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x2a0(%rsp)
vmovaps %ymm7, 0x320(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x380(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x2a4adb(%rip), %ymm0 # 0x1eeba20
vblendmps 0x320(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x3a0(%rsp,%rcx), %xmm10
vmovss 0x620(%rsp,%rcx), %xmm8
vmovaps 0x1e0(%rsp), %xmm0
vucomiss 0x2a4a73(%rip), %xmm0 # 0x1eeba24
vmovss 0xfc(%rsp), %xmm0
jae 0x1c46ffd
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x80(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x80(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2aae9f(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x48(%rsp)
movl $0x5, %r15d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x1f0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2a56cf(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a5b28(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x80(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2a9f69(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2a9f65(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2a9f42(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x240(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x220(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x310(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x2a48f7(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x1a0(%rsp)
jb 0x1c47144
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c47174
vmovss %xmm11, 0x100(%rsp)
vmovss %xmm12, 0x14(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x14(%rsp), %xmm12
vmovss 0x100(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x80(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2d9d35(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x300(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a59e8(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a59c6(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x200(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x220(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x260(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2a54f7(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x2a54f3(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x2e0(%rsp)
vfnmadd213ss 0x2a9db3(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x100(%rsp)
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2f0(%rsp)
jb 0x1c4726f
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c472ba
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm6, 0x24(%rsp)
vmovss %xmm8, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm8
vmovss 0x24(%rsp), %xmm6
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x2a5419(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x2a53fa(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x2a53f4(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x2a46f4(%rip), %xmm0 # 0x1eeba24
jb 0x1c4733b
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c473d1
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm5, 0x40(%rsp)
vmovaps %xmm15, 0x2d0(%rsp)
vmovss %xmm17, 0x3c(%rsp)
vmovss %xmm19, 0x38(%rsp)
vmovaps %xmm4, 0x2c0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2c0(%rsp), %xmm4
vmovss 0x38(%rsp), %xmm19
vmovss 0x3c(%rsp), %xmm17
vmovaps 0x2d0(%rsp), %xmm15
vmovss 0x40(%rsp), %xmm5
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x24(%rsp), %xmm13
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss 0x2d9ae9(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x60(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2a95ea(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x80(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x240(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x220(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x300(%rsp), %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x2f0(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x2e0(%rsp), %xmm1, %xmm3
vmulss 0x48(%rsp), %xmm8, %xmm1
vmovss 0x44(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2d9a25(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xd0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x1f0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x310(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2d9964(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c475bc
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x2a0(%rsp), %xmm3
vfmadd231ss 0x2aa932(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c475bc
vaddss 0x2b0(%rsp), %xmm8, %xmm8
vucomiss 0x5c(%rsp), %xmm8
jb 0x1c475b7
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x100(%rcx,%rax,4), %xmm4
vucomiss %xmm8, %xmm4
jae 0x1c475d2
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c478e5
decq %r15
jne 0x1c47021
jmp 0x1c478e2
xorl %eax, %eax
vucomiss 0x2a4448(%rip), %xmm10 # 0x1eeba24
jb 0x1c475b9
vmovss 0x2a512e(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c475b9
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2a5113(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2a510d(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x50(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x168(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x120(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c475b7
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c47661
cmpq $0x0, 0x48(%r14)
jne 0x1c47661
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c475bc
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x50(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2cb042(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2d980c(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x4e0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovdqa 0x400(%rsp), %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
movq 0x140(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm8, 0x100(%rcx,%rax,4)
vmovaps 0x3e0(%rsp), %ymm0
vmovaps %ymm0, 0x280(%rsp)
leaq 0x280(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x180(%rsp)
movq %rcx, 0x188(%rsp)
leaq 0x4e0(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x8, 0x198(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm4, 0x80(%rsp)
je 0x1c47821
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d96a3(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x280(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c478be
movq 0x50(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c47882
testb $0x2, (%rcx)
jne 0x1c47853
testb $0x40, 0x3e(%r14)
je 0x1c47882
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d9642(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x280(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x188(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2a52d9(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %r14b
jmp 0x1c478c1
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c4765a
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm4, 0x100(%rcx,%rax,4)
jmp 0x1c4765a
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x28(%rsp), %rdx
movq 0x18(%rsp), %rax
vmovaps 0x380(%rsp), %ymm0
vcmpleps 0x100(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c46f37
vmovdqa 0x440(%rsp), %ymm1
vpcmpltd 0x360(%rsp), %ymm1, %k1
vmovaps 0x8c0(%rsp), %ymm0
vpcmpltd 0x640(%rsp), %ymm1, %k2
vmovaps 0x340(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq 0x18(%rsp), %rax
vbroadcastss 0x100(%rax,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xf0(%rsp), %ecx
andb %al, %cl
vmovaps 0x600(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x34(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c47a03
movl %r13d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0x900(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x920(%rsp,%rax)
vmovaps 0x130(%rsp), %xmm2
vmovlps %xmm2, 0x940(%rsp,%rax)
movq 0x158(%rsp), %r15
leal 0x1(%r15), %ecx
movl %ecx, 0x948(%rsp,%rax)
incl %r13d
movq 0x150(%rsp), %r9
vbroadcastss 0x2a4d28(%rip), %ymm3 # 0x1eec714
movb 0x13(%rsp), %r10b
movq 0x148(%rsp), %r11
movq 0x160(%rsp), %r14
jmp 0x1c47a3a
movq 0x150(%rsp), %r9
vbroadcastss 0x2a4d00(%rip), %ymm3 # 0x1eec714
movb 0x13(%rsp), %r10b
movq 0x148(%rsp), %r11
movq 0x160(%rsp), %r14
movq 0x158(%rsp), %r15
vmovaps 0x130(%rsp), %xmm2
testl %r13d, %r13d
je 0x1c47c08
leal -0x1(%r13), %r8d
leaq (%r8,%r8,2), %rcx
shlq $0x5, %rcx
vmovaps 0x920(%rsp,%rcx), %ymm0
movzbl 0x900(%rsp,%rcx), %esi
vaddps 0x340(%rsp), %ymm0, %ymm1
movq 0x18(%rsp), %rax
vcmpleps 0x100(%rax,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %eax
andl %esi, %eax
je 0x1c47b30
kmovd %eax, %k1
vbroadcastss 0x2a3f90(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %al, %sil
je 0x1c47ac8
movzbl %sil, %edi
jmp 0x1c47acb
movzbl %al, %edi
leaq (%rsp,%rcx), %rsi
addq $0x900, %rsi # imm = 0x900
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r15d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %eax, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c47b06
movl %r13d, %r8d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2d9402(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x4e0(%rsp)
vmovsd 0x4e0(%rsp,%rcx,4), %xmm2
movl %r8d, %r13d
testb %al, %al
je 0x1c47a3a
vmovaps 0x840(%rsp), %ymm16
vmovaps 0x820(%rsp), %ymm17
vmovaps 0x800(%rsp), %ymm18
vmovaps 0x7e0(%rsp), %ymm19
vmovaps 0x7c0(%rsp), %ymm20
vmovaps 0x7a0(%rsp), %ymm22
vmovaps 0x780(%rsp), %ymm23
vmovaps 0x760(%rsp), %ymm24
vmovaps 0x740(%rsp), %ymm25
vmovaps 0x720(%rsp), %ymm27
vmovaps 0x700(%rsp), %ymm29
vmovaps 0x6e0(%rsp), %ymm31
vmovaps 0x6c0(%rsp), %ymm21
vmovaps 0x6a0(%rsp), %ymm30
vmovaps 0x680(%rsp), %ymm6
jmp 0x1c4588c
vcmpleps 0x2d933c(%rip), %ymm4, %k2 # 0x1f20f00
vbroadcastss 0x2a4fb7(%rip), %ymm4 # 0x1eecb84
vbroadcastss 0x2a3e49(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm4, %ymm16, %ymm3 {%k2}
vmovaps %ymm3, %ymm30 {%k1}
vblendmps %ymm16, %ymm4, %ymm3 {%k2}
kmovd %k2, %ecx
vmovaps %ymm3, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %al, %sil
movl %esi, %eax
jmp 0x1c45df8
testb $0x1, %bl
jne 0x1c47c35
movq 0x18(%rsp), %rax
vmovaps 0x660(%rsp), %ymm0
vcmpleps 0x100(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
setne %r10b
jne 0x1c45571
andb $0x1, %r10b
movl %r10d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 16>::occluded_t<embree::avx512::SweepCurve1IntersectorK<embree::BSplineCurveT, 16>, embree::avx512::Occluded1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xc40, %rsp # imm = 0xC40
movq %r8, %r9
movzbl 0x1(%r8), %r10d
leaq (%r10,%r10,4), %r11
leaq (%r11,%r11,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%rdx,4), %xmm1
vmovss 0x100(%rsi,%rdx,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%r10,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%r10,%r10,2), %rbx
vpmovsxbd 0x6(%r8,%rbx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%r10,%r11,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rbx,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %r10, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%r10,%r10,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %r10, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r11d
vpmovsxbd 0x6(%r9,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2c7d3b(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2d6504(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2d6472(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x2a6587(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2a1c71(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%r10,8), %r8
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%r10,%r10), %rdi
addq %r10, %r11
shlq $0x3, %rbx
subq %r10, %rbx
movl %r10d, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r11), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rbx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc0(%rsi,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2d5381(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
movq %rsi, 0x18(%rsp)
vminps 0x200(%rsi,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2d5354(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %r10d, %ymm1
vpcmpgtd 0x30fd50(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x520(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r10b
je 0x1c4d334
movq %rcx, 0x50(%rsp)
leaq (%r9,%rax), %r11
addq $0x6, %r11
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %r11
leaq 0x880(%rsp), %rax
addq $0x1c0, %rax # imm = 0x1C0
movq %rax, 0x140(%rsp)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x7c0(%rsp)
movq %rdx, 0x28(%rsp)
movq %r9, 0x150(%rsp)
movq %r11, 0x148(%rsp)
tzcntq %r14, %rcx
blsrq %r14, %r14
movl 0x6(%r9,%rcx,4), %eax
shll $0x6, %ecx
movq %r14, %rdi
vmovups (%r11,%rcx), %xmm0
subq $0x1, %rdi
jb 0x1c4aca1
andq %r14, %rdi
tzcntq %r14, %rsi
shll $0x6, %esi
prefetcht0 (%r11,%rsi)
prefetcht0 0x40(%r11,%rsi)
testq %rdi, %rdi
je 0x1c4aca1
tzcntq %rdi, %rsi
shll $0x6, %esi
prefetcht1 (%r11,%rsi)
prefetcht1 0x40(%r11,%rsi)
vmovups 0x10(%r11,%rcx), %xmm1
vmovups 0x20(%r11,%rcx), %xmm2
vmovups 0x30(%r11,%rcx), %xmm3
movq 0x18(%rsp), %rcx
vmovss (%rcx,%rdx,4), %xmm4
vinsertps $0x1c, 0x40(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%rcx,%rdx,4), %xmm4, %xmm4 # xmm4 = xmm4[0,1],mem[0],zero
movl 0x2(%r9), %esi
vbroadcastss 0x100(%rcx,%rdx,4), %ymm12
vbroadcastss 0x140(%rcx,%rdx,4), %ymm13
vunpcklps %xmm13, %xmm12, %xmm5 # xmm5 = xmm12[0],xmm13[0],xmm12[1],xmm13[1]
vbroadcastss 0x180(%rcx,%rdx,4), %ymm14
vinsertps $0x28, %xmm14, %xmm5, %xmm9 # xmm9 = xmm5[0,1],xmm14[0],zero
vaddps %xmm1, %xmm0, %xmm5
vaddps %xmm2, %xmm5, %xmm5
vaddps %xmm3, %xmm5, %xmm5
vmulps 0x2d25e6(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm4, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0xc0(%rcx,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x2a62ad(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x1f0(%rsp)
vmovaps %ymm6, 0x320(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm6) + xmm4
vblendps $0x8, %xmm8, %xmm4, %xmm4 # xmm4 = xmm4[0,1,2],xmm8[3]
vsubps %xmm4, %xmm0, %xmm6
vsubps %xmm4, %xmm2, %xmm7
vsubps %xmm4, %xmm1, %xmm8
vsubps %xmm4, %xmm3, %xmm3
vbroadcastss %xmm6, %ymm16
vbroadcastss 0x2c7970(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm17
vbroadcastss 0x2d6139(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm18
vbroadcastss 0x2d6126(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x260(%rsp)
vpermps %ymm6, %ymm2, %ymm19
vbroadcastss %xmm8, %ymm20
vpermps %ymm8, %ymm0, %ymm22
vpermps %ymm8, %ymm1, %ymm23
vmovaps %ymm8, 0x220(%rsp)
vpermps %ymm8, %ymm2, %ymm24
vbroadcastss %xmm7, %ymm25
vpermps %ymm7, %ymm0, %ymm27
vpermps %ymm7, %ymm1, %ymm29
vmovaps %ymm7, 0x240(%rsp)
vpermps %ymm7, %ymm2, %ymm31
vbroadcastss %xmm3, %ymm21
vpermps %ymm3, %ymm0, %ymm0
vmovaps %ymm0, 0x760(%rsp)
vpermps %ymm3, %ymm1, %ymm30
vmovaps %ymm3, 0x200(%rsp)
vpermps %ymm3, %ymm2, %ymm6
vmovaps %ymm14, 0x460(%rsp)
vmulss %xmm14, %xmm14, %xmm0
vmovaps %ymm13, 0x480(%rsp)
vfmadd231ps %ymm13, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm13) + ymm0
vmovaps %ymm12, 0x4a0(%rsp)
vfmadd231ps %ymm12, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm12) + ymm0
vbroadcastss %xmm0, %ymm1
vandps %ymm28, %ymm1, %ymm0
vmovaps %ymm0, 0x440(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x290(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x720(%rsp)
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x840(%rsp)
movl $0x1, %r15d
xorl %r13d, %r13d
xorl %ebx, %ebx
movq %rsi, 0x168(%rsp)
vpbroadcastd %esi, %zmm0
vmovdqa64 %zmm0, 0x800(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xfc(%rsp)
vmovaps %xmm11, 0x1e0(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xf8(%rsp)
vmovsd 0x2a180a(%rip), %xmm2 # 0x1eec6f0
vbroadcastss 0x2a1825(%rip), %ymm3 # 0x1eec714
vmovaps %ymm1, 0x740(%rsp)
vmovaps %ymm16, 0x700(%rsp)
vmovaps %ymm17, 0x6e0(%rsp)
vmovaps %ymm18, 0x6c0(%rsp)
vmovaps %ymm19, 0x6a0(%rsp)
vmovaps %ymm20, 0x680(%rsp)
vmovaps %ymm22, 0x660(%rsp)
vmovaps %ymm23, 0x640(%rsp)
vmovaps %ymm24, 0x620(%rsp)
vmovaps %ymm25, 0x600(%rsp)
vmovaps %ymm27, 0x5e0(%rsp)
vmovaps %ymm29, 0x5c0(%rsp)
vmovaps %ymm31, 0x5a0(%rsp)
vmovaps %ymm21, 0x580(%rsp)
vmovaps %ymm30, 0x560(%rsp)
vmovaps %ymm6, 0x540(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm0
vmulss 0x2d5f4f(%rip), %xmm0, %xmm1 # 0x1f20ed0
vmovaps %xmm1, 0x80(%rsp)
vmovaps %xmm2, 0x130(%rsp)
vbroadcastss %xmm2, %ymm4
vbroadcastss %xmm0, %ymm0
vmovaps %ymm4, 0x60(%rsp)
vmovaps %ymm0, 0x1c0(%rsp)
vfmadd231ps 0x2d5f6b(%rip), %ymm0, %ymm4 # ymm4 = (ymm0 * mem) + ymm4
vsubps %ymm4, %ymm3, %ymm7
vmulps %ymm7, %ymm7, %ymm8
vmulps %ymm7, %ymm8, %ymm0
vbroadcastss 0x2a1bc1(%rip), %ymm28 # 0x1eecb8c
vmulps %ymm28, %ymm0, %ymm1
vmulps %ymm4, %ymm4, %ymm9
vmulps %ymm4, %ymm9, %ymm2
vaddps %ymm1, %ymm2, %ymm1
vmulps %ymm7, %ymm4, %ymm10
vmulps %ymm7, %ymm10, %ymm5
vbroadcastss 0x2a600e(%rip), %ymm14 # 0x1ef0ffc
vmulps %ymm5, %ymm14, %ymm11
vmulps %ymm4, %ymm10, %ymm12
vbroadcastss 0x2a5ff5(%rip), %ymm15 # 0x1ef0ff4
vmulps %ymm15, %ymm12, %ymm13
vaddps %ymm13, %ymm11, %ymm11
vaddps %ymm1, %ymm11, %ymm1
vmulps %ymm28, %ymm2, %ymm11
vaddps %ymm0, %ymm11, %ymm11
vmulps %ymm14, %ymm12, %ymm12
vmulps %ymm5, %ymm15, %ymm5
vaddps %ymm5, %ymm12, %ymm5
vaddps %ymm5, %ymm11, %ymm5
vbroadcastss 0x2a5fcf(%rip), %ymm14 # 0x1ef1000
vmulps %ymm0, %ymm14, %ymm11
vmulps %ymm1, %ymm14, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm2, %ymm14, %ymm5
vmulps %ymm5, %ymm21, %ymm2
vmovaps 0x760(%rsp), %ymm0
vmulps %ymm5, %ymm0, %ymm1
vmulps %ymm5, %ymm30, %ymm3
vmulps %ymm5, %ymm6, %ymm5
vfmadd231ps %ymm25, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm25) + ymm2
vfmadd231ps %ymm27, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm27) + ymm1
vfmadd231ps %ymm29, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm29) + ymm3
vfmadd231ps %ymm13, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm13) + ymm5
vfmadd231ps %ymm20, %ymm12, %ymm2 # ymm2 = (ymm12 * ymm20) + ymm2
vfmadd231ps %ymm22, %ymm12, %ymm1 # ymm1 = (ymm12 * ymm22) + ymm1
vfmadd231ps %ymm23, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm23) + ymm3
vfmadd231ps %ymm12, %ymm24, %ymm5 # ymm5 = (ymm24 * ymm12) + ymm5
vfmadd231ps %ymm16, %ymm11, %ymm2 # ymm2 = (ymm11 * ymm16) + ymm2
vfmadd231ps %ymm17, %ymm11, %ymm1 # ymm1 = (ymm11 * ymm17) + ymm1
vfmadd231ps %ymm18, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm18) + ymm3
vfmadd231ps %ymm11, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm11) + ymm5
vbroadcastss 0x2d5e10(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm7, %ymm11
vmulps %ymm7, %ymm11, %ymm7
vxorps %ymm26, %ymm4, %ymm11
vmulps %ymm4, %ymm11, %ymm4
vmulps %ymm28, %ymm10, %ymm10
vsubps %ymm10, %ymm4, %ymm4
vaddps %ymm10, %ymm8, %ymm8
vbroadcastss 0x2a1aa3(%rip), %ymm10 # 0x1eecb80
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm4
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vmulps %ymm9, %ymm21, %ymm10
vmulps %ymm0, %ymm9, %ymm12
vmulps %ymm9, %ymm30, %ymm13
vmulps %ymm6, %ymm9, %ymm9
vfmadd231ps %ymm25, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm25) + ymm10
vfmadd231ps %ymm27, %ymm8, %ymm12 # ymm12 = (ymm8 * ymm27) + ymm12
vfmadd231ps %ymm29, %ymm8, %ymm13 # ymm13 = (ymm8 * ymm29) + ymm13
vfmadd231ps %ymm8, %ymm31, %ymm9 # ymm9 = (ymm31 * ymm8) + ymm9
vfmadd231ps %ymm20, %ymm4, %ymm10 # ymm10 = (ymm4 * ymm20) + ymm10
vfmadd231ps %ymm22, %ymm4, %ymm12 # ymm12 = (ymm4 * ymm22) + ymm12
vfmadd231ps %ymm23, %ymm4, %ymm13 # ymm13 = (ymm4 * ymm23) + ymm13
vfmadd231ps %ymm4, %ymm24, %ymm9 # ymm9 = (ymm24 * ymm4) + ymm9
vfmadd231ps %ymm16, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm17) + ymm12
vfmadd231ps %ymm18, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm18) + ymm13
vfmadd231ps %ymm7, %ymm19, %ymm9 # ymm9 = (ymm19 * ymm7) + ymm9
vbroadcastss 0x80(%rsp), %ymm4
vmulps %ymm4, %ymm10, %ymm11
vmulps %ymm4, %ymm12, %ymm12
vmulps %ymm4, %ymm13, %ymm13
vmulps %ymm4, %ymm9, %ymm6
vmovaps %ymm2, %ymm8
vmovaps 0x314baf(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm1, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm3, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm5, %ymm6, %ymm4
vmaxps %ymm4, %ymm5, %ymm14
vminps %ymm4, %ymm5, %ymm4
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm0
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm2, %ymm8, %ymm7
vsubps %ymm1, %ymm9, %ymm6
vsubps %ymm3, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vbroadcastss 0x2a14d5(%rip), %ymm21 # 0x1eec714
vmovaps %ymm21, %ymm26
vfnmadd213ps %ymm21, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm21
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm0, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm0, %ymm24 # ymm24 = (ymm0 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm4, %ymm4
vsubps %ymm18, %ymm4, %ymm4
vmulps 0x2a5674(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x2a566e(%rip){1to8}, %ymm4, %ymm4 # 0x1ef0944
vmovaps %ymm4, 0x80(%rsp)
vmulps %ymm14, %ymm14, %ymm4
vrsqrt14ps %ymm17, %ymm15
vmulps 0x2a1428(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x2a1409(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm2, %ymm31, %ymm27
vsubps %ymm1, %ymm31, %ymm28
vmovaps %ymm3, 0xa0(%rsp)
vsubps %ymm3, %ymm31, %ymm29
vmovaps 0x460(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vmovaps 0x480(%rsp), %ymm21
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x4a0(%rsp), %ymm3
vfmadd231ps %ymm27, %ymm3, %ymm22 # ymm22 = (ymm3 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm3, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm3) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm21
vmovaps 0x740(%rsp), %ymm16
vsubps %ymm21, %ymm16, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm3
vmovaps %ymm3, 0x1a0(%rsp)
vsubps %ymm4, %ymm3, %ymm4
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2a17a2(%rip){1to8}, %ymm15, %ymm24 # 0x1eecb8c
vmulps %ymm4, %ymm24, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c4b4e8
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm3
vfnmadd213ps %ymm26, %ymm3, %ymm31 # ymm31 = -(ymm3 * ymm31) + ymm26
vfmadd132ps %ymm3, %ymm3, %ymm31 # ymm31 = (ymm31 * ymm3) + ymm3
vxorps 0x2d5a8d(%rip){1to8}, %ymm22, %ymm3 # 0x1f20ec0
vsubps %ymm30, %ymm3, %ymm3
vmulps %ymm31, %ymm3, %ymm3
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm3, %ymm30 # ymm30 = (ymm3 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x400(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x3e0(%rsp)
vbroadcastss 0x2a0597(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm3, %ymm16, %ymm30 {%k1}
vbroadcastss 0x2a16ec(%rip), %ymm3 # 0x1eecb84
vblendmps %ymm31, %ymm3, %ymm31 {%k1}
vbroadcastss 0x2d5a1c(%rip), %ymm23 # 0x1f20ec4
vandps %ymm23, %ymm21, %ymm3
vmovaps 0x440(%rsp), %ymm16
vmaxps %ymm3, %ymm16, %ymm3
vmulps 0x2a69ee(%rip){1to8}, %ymm3, %ymm3 # 0x1ef1eb4
vandps %ymm23, %ymm15, %ymm23
vcmpltps %ymm3, %ymm23, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c4d2b5
vbroadcastss 0x2a122e(%rip), %ymm3 # 0x1eec714
jmp 0x1c4b502
vbroadcastss 0x2a052e(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2a1688(%rip), %ymm31 # 0x1eecb84
vmovaps %ymm26, %ymm3
andb $0x7f, %al
je 0x1c4b8ea
vmovaps %ymm21, 0x100(%rsp)
movq 0x18(%rsp), %rcx
vmovaps %ymm3, %ymm16
vmovss 0x200(%rcx,%rdx,4), %xmm3
vsubss 0x290(%rsp), %xmm3, %xmm3
vbroadcastss %xmm3, %ymm3
vminps %ymm31, %ymm3, %ymm3
vmovaps 0x720(%rsp), %ymm4
vmaxps %ymm30, %ymm4, %ymm4
vmulps %ymm29, %ymm13, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x460(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x480(%rsp), %ymm31
vfmadd231ps %ymm12, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm12) + ymm13
vmovaps 0x4a0(%rsp), %ymm30
vfmadd231ps %ymm11, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm11) + ymm13
vbroadcastss 0x2d5935(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x2a5a49(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2d5910(%rip), %ymm26 # 0x1f20ec0
vxorps %ymm26, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm26, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a1595(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm4, %ymm4
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2a0412(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm3, %ymm3
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm0, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm0, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm30, %ymm8 # ymm8 = -(ymm30 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm26, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm26, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm4, %ymm0
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm3, %ymm8
vmovaps %ymm0, 0x300(%rsp)
vcmpleps %ymm8, %ymm0, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c4b902
vmovaps 0x400(%rsp), %ymm3
vmaxps 0x80(%rsp), %ymm23, %ymm4
vminps %ymm16, %ymm3, %ymm3
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm3, %ymm3
vmovaps 0x3e0(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2d5839(%rip), %ymm11 # 0x1f20f40
vaddps %ymm3, %ymm11, %ymm3
vbroadcastss 0x2d2da4(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm3, %ymm12, %ymm3
vmovaps 0x60(%rsp), %ymm0
vmovaps 0x1c0(%rsp), %ymm13
vfmadd213ps %ymm0, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm3) + ymm0
vmovaps %ymm3, 0x400(%rsp)
vmaxps %ymm10, %ymm9, %ymm3
vaddps %ymm3, %ymm11, %ymm3
vmulps %ymm3, %ymm12, %ymm3
vfmadd213ps %ymm0, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm3) + ymm0
vmovaps %ymm3, 0x3e0(%rsp)
vmulps %ymm4, %ymm4, %ymm3
vmovaps 0x1a0(%rsp), %ymm0
vsubps %ymm3, %ymm0, %ymm11
vmulps %ymm11, %ymm24, %ymm3
vsubps %ymm3, %ymm25, %ymm3
vcmpnltps %ymm10, %ymm3, %k0
kortestb %k0, %k0
je 0x1c4b916
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm3, %k1
vsqrtps %ymm3, %ymm3
vaddps %ymm15, %ymm15, %ymm4
vrcp14ps %ymm4, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm4 # ymm4 = -(ymm9 * ymm4) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm4 # ymm4 = (ymm4 * ymm9) + ymm9
vxorps 0x2d5711(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm3, %ymm9, %ymm9
vmulps %ymm4, %ymm9, %ymm12
vsubps %ymm22, %ymm3, %ymm3
vmulps %ymm4, %ymm3, %ymm13
vmovaps %ymm17, %ymm3
vfmadd213ps %ymm18, %ymm12, %ymm3 # ymm3 = (ymm12 * ymm3) + ymm18
vmulps %ymm3, %ymm14, %ymm9
vmulps %ymm12, %ymm30, %ymm3
vmulps %ymm12, %ymm31, %ymm4
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm2, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm2
vsubps %ymm19, %ymm3, %ymm3
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm1, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm1
vsubps %ymm19, %ymm4, %ymm4
vmovaps 0xa0(%rsp), %ymm0
vfmadd213ps %ymm0, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm0
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm30, %ymm10
vmulps %ymm13, %ymm31, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm2, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm2
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm1, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm1
vsubps %ymm6, %ymm17, %ymm1
vfmadd213ps %ymm0, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm0
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x2a01c0(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm12, %ymm0, %ymm2 {%k1}
vbroadcastss 0x2a1315(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm13, %ymm0, %ymm0 {%k1}
vandps 0x100(%rsp), %ymm28, %ymm6
vmovaps 0x440(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2a6620(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c4b93d
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x2a12c9(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x2a015c(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm0 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c4b93d
vbroadcastss 0x2d55d0(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c4d133
vmovaps %ymm16, %ymm3
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c4d133
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm4, %xmm4, %xmm4
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x2a00ec(%rip), %ymm2 # 0x1eeba20
vbroadcastss 0x2a1247(%rip), %ymm0 # 0x1eecb84
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm1, %ymm31, %ymm5 # ymm5 = (ymm31 * ymm1) + ymm5
vfmadd231ps %ymm10, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm10) + ymm5
vmovaps 0x300(%rsp), %ymm6
vmovaps %ymm6, 0x780(%rsp)
vminps %ymm2, %ymm8, %ymm1
vmovaps %ymm1, 0x7a0(%rsp)
vandps %ymm28, %ymm5, %ymm2
vmaxps %ymm0, %ymm6, %ymm5
vmovaps %ymm5, 0x4c0(%rsp)
vmovaps %ymm8, 0x4e0(%rsp)
vbroadcastss 0x2d5541(%rip), %ymm0 # 0x1f20ed4
vcmpltps %ymm0, %ymm2, %k1
kmovd %k1, 0xf4(%rsp)
vcmpleps %ymm1, %ymm6, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x3a0(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %esi
andb %al, %sil
movl %esi, 0x34(%rsp)
movl %esi, %eax
orb %cl, %al
je 0x1c4c4f9
vmovaps %ymm0, %ymm2
movq %r14, 0x160(%rsp)
movb %r10b, 0x13(%rsp)
knotb %k0, %k1
vmulps %ymm9, %ymm21, %ymm0
vfmadd213ps %ymm0, %ymm31, %ymm4 # ymm4 = (ymm31 * ymm4) + ymm0
vfmadd213ps %ymm4, %ymm30, %ymm3 # ymm3 = (ymm30 * ymm3) + ymm4
vandps %ymm28, %ymm3, %ymm0
vcmpltps %ymm2, %ymm0, %k0
kmovd %k1, 0xec(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2d54bb(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2d54ad(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %r15, 0x158(%rsp)
vpbroadcastd %r15d, %ymm1
vmovdqa %ymm0, 0x500(%rsp)
vmovdqa %ymm1, 0x420(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %ecx, 0xf0(%rsp)
andb %cl, %r12b
je 0x1c4c510
vmovaps 0x260(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x240(%rsp), %ymm3
vmovaps 0x200(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x2d5419(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2a63e4(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x280(%rsp)
vmovaps 0x300(%rsp), %ymm0
vaddps 0x320(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x29ff13(%rip), %ymm0 # 0x1eeba20
vblendmps 0x300(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x400(%rsp,%rcx), %xmm10
vmovss 0x780(%rsp,%rcx), %xmm8
vmovaps 0x1e0(%rsp), %xmm0
vucomiss 0x29feab(%rip), %xmm0 # 0x1eeba24
vmovss 0xf8(%rsp), %xmm0
jae 0x1c4bbc5
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x80(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x80(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2a62d7(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x48(%rsp)
movl $0x5, %r15d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x1f0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2a0b07(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a0f60(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x80(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2a53a1(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2a539d(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2a537a(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x240(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x220(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x2f0(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x29fd2f(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x1a0(%rsp)
jb 0x1c4bd0c
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c4bd3c
vmovss %xmm11, 0x100(%rsp)
vmovss %xmm12, 0x14(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x14(%rsp), %xmm12
vmovss 0x100(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x80(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2d516d(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x2e0(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a0e20(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a0dfe(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x200(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x220(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x260(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2a092f(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x2a092b(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x2c0(%rsp)
vfnmadd213ss 0x2a51eb(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x100(%rsp)
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2d0(%rsp)
jb 0x1c4be37
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c4be82
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm6, 0x24(%rsp)
vmovss %xmm8, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm8
vmovss 0x24(%rsp), %xmm6
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x2a0851(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x2a0832(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x2a082c(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x29fb2c(%rip), %xmm0 # 0x1eeba24
jb 0x1c4bf03
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c4bf99
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm5, 0x40(%rsp)
vmovaps %xmm15, 0x2b0(%rsp)
vmovss %xmm17, 0x3c(%rsp)
vmovss %xmm19, 0x38(%rsp)
vmovaps %xmm4, 0x2a0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2a0(%rsp), %xmm4
vmovss 0x38(%rsp), %xmm19
vmovss 0x3c(%rsp), %xmm17
vmovaps 0x2b0(%rsp), %xmm15
vmovss 0x40(%rsp), %xmm5
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x24(%rsp), %xmm13
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss 0x2d4f21(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x60(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2a4a22(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x80(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x240(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x220(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x2e0(%rsp), %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x2d0(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x2c0(%rsp), %xmm1, %xmm3
vmulss 0x48(%rsp), %xmm8, %xmm1
vmovss 0x44(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2d4e5d(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xd0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x1f0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x2f0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2d4d9c(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c4c184
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x280(%rsp), %xmm3
vfmadd231ss 0x2a5d6a(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c4c184
vaddss 0x290(%rsp), %xmm8, %xmm8
vucomiss 0x5c(%rsp), %xmm8
jb 0x1c4c17f
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm5
vucomiss %xmm8, %xmm5
jae 0x1c4c19a
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c4c4c4
decq %r15
jne 0x1c4bbe9
jmp 0x1c4c4c1
xorl %eax, %eax
vucomiss 0x29f880(%rip), %xmm10 # 0x1eeba24
jb 0x1c4c181
vmovss 0x2a0566(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c4c181
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2a054b(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2a0545(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x50(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x168(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c4c17f
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c4c229
cmpq $0x0, 0x48(%r14)
jne 0x1c4c229
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c4c184
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x50(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %zmm1
vbroadcastss 0x2c6478(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x2d4c40(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x840(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x800(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x140(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm8, 0x200(%rcx,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x340(%rsp)
leaq 0x340(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x180(%rsp)
movq %rcx, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm5, 0x80(%rsp)
je 0x1c4c3f7
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d4ad7(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2d4acd(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c4c49d
movq 0x50(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c4c465
testb $0x2, (%rcx)
jne 0x1c4c42d
testb $0x40, 0x3e(%r14)
je 0x1c4c465
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d4a69(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2d4a5f(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x188(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x2a06f8(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c4c4a0
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c4c222
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x200(%rcx,%rax,4)
jmp 0x1c4c222
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x28(%rsp), %rdx
movq 0x18(%rsp), %rax
vmovaps 0x3c0(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c4baff
jmp 0x1c4c519
vbroadcastss 0x2a0212(%rip), %ymm3 # 0x1eec714
vmovaps 0x130(%rsp), %xmm2
jmp 0x1c4d133
vbroadcastss 0x2d49ab(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x320(%rsp), %ymm3
vaddps 0x3a0(%rsp), %ymm3, %ymm0
movq 0x18(%rsp), %rax
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xf4(%rsp), %k1
kmovd 0xec(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x34(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2d4975(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2d4967(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x3a0(%rsp)
vpcmpled 0x420(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x34(%rsp)
andb %cl, %r12b
je 0x1c4d00e
vmovaps 0x4c0(%rsp), %ymm7
vmovaps 0x260(%rsp), %ymm1
vmovaps 0x220(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x240(%rsp), %ymm5
vmovaps 0x200(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2a58b6(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x280(%rsp)
vmovaps %ymm7, 0x300(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x29f3ea(%rip), %ymm0 # 0x1eeba20
vblendmps 0x300(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x3e0(%rsp,%rcx), %xmm10
vmovss 0x4e0(%rsp,%rcx), %xmm8
vmovaps 0x1e0(%rsp), %xmm0
vucomiss 0x29f382(%rip), %xmm0 # 0x1eeba24
vmovss 0xfc(%rsp), %xmm0
jae 0x1c4c6ee
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
kmovw %k1, 0x80(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x80(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2a57ae(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x48(%rsp)
movl $0x5, %r15d
vmovaps %xmm8, 0xa0(%rsp)
vbroadcastss %xmm8, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x1f0(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x29ffde(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm4
vmulss %xmm4, %xmm4, %xmm1
vmulss %xmm1, %xmm4, %xmm1
vmulss %xmm10, %xmm10, %xmm11
vmulss %xmm11, %xmm10, %xmm2
vmovss 0x2a0437(%rip), %xmm7 # 0x1eecb8c
vmovaps %xmm7, %xmm3
vfmadd213ss %xmm2, %xmm1, %xmm3 # xmm3 = (xmm1 * xmm3) + xmm2
vmulss %xmm4, %xmm10, %xmm12
vmovaps %xmm4, 0x80(%rsp)
vmulss %xmm4, %xmm12, %xmm4
vmulss %xmm12, %xmm10, %xmm5
vmovss 0x2a4878(%rip), %xmm8 # 0x1ef0ff4
vmulss %xmm5, %xmm8, %xmm6
vmovss 0x2a4874(%rip), %xmm9 # 0x1ef0ffc
vfmadd231ss %xmm9, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm9) + xmm6
vaddss %xmm6, %xmm3, %xmm3
vmovaps %xmm7, %xmm6
vfmadd213ss %xmm1, %xmm2, %xmm6 # xmm6 = (xmm2 * xmm6) + xmm1
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) + xmm4
vaddss %xmm4, %xmm6, %xmm4
vmovss 0x2a4851(%rip), %xmm5 # 0x1ef1000
vmulss %xmm5, %xmm1, %xmm1
vmulss %xmm5, %xmm3, %xmm3
vmulss %xmm5, %xmm4, %xmm4
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x240(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd132ps 0x220(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x260(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm1, 0x2f0(%rsp)
vsubps %xmm1, %xmm0, %xmm0
vmovaps %xmm0, 0x1c0(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x29f206(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x1a0(%rsp)
jb 0x1c4c835
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c4c865
vmovss %xmm11, 0x100(%rsp)
vmovss %xmm12, 0x14(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x14(%rsp), %xmm12
vmovss 0x100(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vmovaps 0x80(%rsp), %xmm3
vbroadcastss %xmm3, %xmm1
vbroadcastss 0x2d4644(%rip), %xmm4 # 0x1f20ec0
vmovaps %xmm1, 0x2e0(%rsp)
vxorps %xmm4, %xmm1, %xmm0
vmulss %xmm0, %xmm3, %xmm0
vmulss 0x2a02f7(%rip), %xmm12, %xmm1 # 0x1eecb8c
vmovaps %xmm10, %xmm2
vfnmsub213ss %xmm1, %xmm10, %xmm2 # xmm2 = -(xmm10 * xmm2) - xmm1
vfmadd231ss %xmm3, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm3) + xmm1
vmovss 0x2a02d5(%rip), %xmm3 # 0x1eecb80
vmulss %xmm3, %xmm0, %xmm0
vmulss %xmm3, %xmm2, %xmm2
vmulss %xmm3, %xmm1, %xmm1
vmulss %xmm3, %xmm11, %xmm3
vbroadcastss %xmm3, %xmm3
vmulps 0x200(%rsp), %xmm3, %xmm3
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x240(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x220(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss %xmm0, %xmm12
vfmadd132ps 0x260(%rsp), %xmm2, %xmm12 # xmm12 = (xmm12 * mem) + xmm2
vdpps $0x7f, %xmm12, %xmm12, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x29fe06(%rip), %xmm3, %xmm6 # 0x1eec718
vmulss 0x29fe02(%rip), %xmm0, %xmm8 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps %xmm4, %xmm0, %xmm1
vmovaps %xmm2, 0x2c0(%rsp)
vfnmadd213ss 0x2a46c2(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovaps %xmm12, 0x100(%rsp)
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2d0(%rsp)
jb 0x1c4c960
vsqrtss %xmm0, %xmm0, %xmm5
jmp 0x1c4c9ab
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm6, 0x24(%rsp)
vmovss %xmm8, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm8
vmovss 0x24(%rsp), %xmm6
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps %xmm0, %xmm5
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm18
vmulss %xmm3, %xmm8, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm6, %xmm0
vbroadcastss %xmm0, %xmm4
vmulps %xmm4, %xmm12, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm9, %xmm14
vaddss 0x29fd28(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm15
vmulss 0x29fd09(%rip), %xmm15, %xmm17 # 0x1eec718
vmulss 0x29fd03(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x29f003(%rip), %xmm0 # 0x1eeba24
jb 0x1c4ca2c
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c4cac2
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm5, 0x40(%rsp)
vmovaps %xmm15, 0x2b0(%rsp)
vmovss %xmm17, 0x3c(%rsp)
vmovss %xmm19, 0x38(%rsp)
vmovaps %xmm4, 0x2a0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x2a0(%rsp), %xmm4
vmovss 0x38(%rsp), %xmm19
vmovss 0x3c(%rsp), %xmm17
vmovaps 0x2b0(%rsp), %xmm15
vmovss 0x40(%rsp), %xmm5
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x24(%rsp), %xmm13
vmovss 0x14(%rsp), %xmm7
vmovaps 0x100(%rsp), %xmm12
vmovaps 0x1a0(%rsp), %xmm18
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss 0x2d43f8(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x60(%rsp), %xmm10
vmovaps 0xa0(%rsp), %xmm8
vmovss 0x2a3ef9(%rip), %xmm2 # 0x1ef09dc
vmovaps %xmm2, %xmm1
vmovaps 0x80(%rsp), %xmm3
vfmadd213ss %xmm10, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm1) + xmm10
vfmadd231ss %xmm2, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm2) + xmm3
vbroadcastss %xmm10, %xmm2
vmulps 0x200(%rsp), %xmm2, %xmm2
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x240(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x220(%rsp), %xmm3, %xmm1 # xmm1 = (xmm1 * mem) + xmm3
vmovaps 0x2e0(%rsp), %xmm2
vfmadd132ps 0x260(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vbroadcastss 0x2d0(%rsp), %xmm1
vmulps %xmm1, %xmm2, %xmm1
vdpps $0x7f, %xmm2, %xmm12, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm12, %xmm2
vsubps %xmm2, %xmm1, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x2c0(%rsp), %xmm1, %xmm3
vmulss 0x48(%rsp), %xmm8, %xmm1
vmovss 0x44(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2d4334(%rip){1to4}, %xmm12, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm4, %xmm2
vmovaps 0xd0(%rsp), %xmm11
vdpps $0x7f, %xmm11, %xmm3, %xmm4
vdivss %xmm5, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm9, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm13) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x1f0(%rsp), %xmm7
vdpps $0x7f, %xmm11, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm9, %xmm3
vmulss %xmm15, %xmm19, %xmm2
vmulss %xmm15, %xmm15, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss %xmm2, %xmm17, %xmm6
vdpps $0x7f, %xmm7, %xmm9, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm3 # xmm3 = -(xmm14 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm14, %xmm7 # xmm7 = -(xmm14 * xmm5) + xmm7
vpermilps $0xff, 0x2f0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm12, %xmm12, %xmm0 # xmm0 = xmm12[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm14, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm8, %xmm8
vbroadcastss 0x2d4273(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
movb $0x1, %al
jbe 0x1c4ccad
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x280(%rsp), %xmm3
vfmadd231ss 0x2a5241(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c4ccad
vaddss 0x290(%rsp), %xmm8, %xmm8
vucomiss 0x5c(%rsp), %xmm8
jb 0x1c4cca8
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm4
vucomiss %xmm8, %xmm4
jae 0x1c4ccc3
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c4cfdb
decq %r15
jne 0x1c4c712
jmp 0x1c4cfd8
xorl %eax, %eax
vucomiss 0x29ed57(%rip), %xmm10 # 0x1eeba24
jb 0x1c4ccaa
vmovss 0x29fa3d(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c4ccaa
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x29fa22(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x29fa1c(%rip), %xmm18, %xmm3 # 0x1eec71c
movq 0x50(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x168(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c4cca8
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c4cd52
cmpq $0x0, 0x48(%r14)
jne 0x1c4cd52
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c4ccad
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm9, %xmm1
vfmadd213ps %xmm12, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm12
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm3 # xmm3 = xmm12[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x50(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %zmm1
vbroadcastss 0x2c594f(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x2d4117(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x840(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x800(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x140(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm8, 0x200(%rcx,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x340(%rsp)
leaq 0x340(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x180(%rsp)
movq %rcx, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovss %xmm4, 0x80(%rsp)
je 0x1c4cf17
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d3fad(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c4cfb4
movq 0x50(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c4cf7c
testb $0x2, (%rcx)
jne 0x1c4cf4d
testb $0x40, 0x3e(%r14)
je 0x1c4cf7c
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x80(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm10
vbroadcastss 0x2d3f48(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x188(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x29fbe1(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c4cfb7
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c4cd4b
movq 0x28(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm4, 0x200(%rcx,%rax,4)
jmp 0x1c4cd4b
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %bl
movq 0x28(%rsp), %rdx
movq 0x18(%rsp), %rax
vmovaps 0x3c0(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c4c628
vmovdqa 0x420(%rsp), %ymm1
vpcmpltd 0x3a0(%rsp), %ymm1, %k1
vmovaps 0x780(%rsp), %ymm0
vpcmpltd 0x500(%rsp), %ymm1, %k2
vmovaps 0x320(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
movq 0x18(%rsp), %rax
vbroadcastss 0x200(%rax,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xf0(%rsp), %ecx
andb %al, %cl
vmovaps 0x4c0(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x34(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c4d0fc
movl %r13d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0xae0(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0xb00(%rsp,%rax)
vmovaps 0x130(%rsp), %xmm2
vmovlps %xmm2, 0xb20(%rsp,%rax)
movq 0x158(%rsp), %r15
leal 0x1(%r15), %ecx
movl %ecx, 0xb28(%rsp,%rax)
incl %r13d
movq 0x150(%rsp), %r9
vbroadcastss 0x29f62f(%rip), %ymm3 # 0x1eec714
movb 0x13(%rsp), %r10b
movq 0x148(%rsp), %r11
movq 0x160(%rsp), %r14
jmp 0x1c4d133
movq 0x150(%rsp), %r9
vbroadcastss 0x29f607(%rip), %ymm3 # 0x1eec714
movb 0x13(%rsp), %r10b
movq 0x148(%rsp), %r11
movq 0x160(%rsp), %r14
movq 0x158(%rsp), %r15
vmovaps 0x130(%rsp), %xmm2
testl %r13d, %r13d
je 0x1c4d304
leal -0x1(%r13), %r8d
leaq (%r8,%r8,2), %rcx
shlq $0x5, %rcx
vmovaps 0xb00(%rsp,%rcx), %ymm0
movzbl 0xae0(%rsp,%rcx), %esi
vaddps 0x320(%rsp), %ymm0, %ymm1
movq 0x18(%rsp), %rax
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %eax
andl %esi, %eax
je 0x1c4d22c
kmovd %eax, %k1
vbroadcastss 0x29e894(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %al, %sil
je 0x1c4d1c4
movzbl %sil, %edi
jmp 0x1c4d1c7
movzbl %al, %edi
leaq (%rsp,%rcx), %rsi
addq $0xae0, %rsi # imm = 0xAE0
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r15d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %eax, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c4d202
movl %r13d, %r8d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2d3d06(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x880(%rsp)
vmovsd 0x880(%rsp,%rcx,4), %xmm2
movl %r8d, %r13d
testb %al, %al
je 0x1c4d133
vmovaps 0x700(%rsp), %ymm16
vmovaps 0x6e0(%rsp), %ymm17
vmovaps 0x6c0(%rsp), %ymm18
vmovaps 0x6a0(%rsp), %ymm19
vmovaps 0x680(%rsp), %ymm20
vmovaps 0x660(%rsp), %ymm22
vmovaps 0x640(%rsp), %ymm23
vmovaps 0x620(%rsp), %ymm24
vmovaps 0x600(%rsp), %ymm25
vmovaps 0x5e0(%rsp), %ymm27
vmovaps 0x5c0(%rsp), %ymm29
vmovaps 0x5a0(%rsp), %ymm31
vmovaps 0x580(%rsp), %ymm21
vmovaps 0x560(%rsp), %ymm30
vmovaps 0x540(%rsp), %ymm6
jmp 0x1c4af71
vcmpleps 0x2d3c40(%rip), %ymm4, %k2 # 0x1f20f00
vbroadcastss 0x29f8bb(%rip), %ymm4 # 0x1eecb84
vbroadcastss 0x29e74d(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm4, %ymm16, %ymm3 {%k2}
vmovaps %ymm3, %ymm30 {%k1}
vblendmps %ymm16, %ymm4, %ymm3 {%k2}
kmovd %k2, %ecx
vmovaps %ymm3, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %al, %sil
movl %esi, %eax
jmp 0x1c4b4dd
testb $0x1, %bl
jne 0x1c4d334
movq 0x18(%rsp), %rax
vmovaps 0x520(%rsp), %ymm0
vcmpleps 0x200(%rax,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
setne %r10b
jne 0x1c4ac52
andb $0x1, %r10b
movl %r10d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersector1<8>::intersect_t<embree::avx512::RibbonCurve1Intersector1<embree::BSplineCurveT, 8>, embree::avx512::Intersect1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersector1<M>::intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,Epilog(ray,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x620, %rsp # imm = 0x620
movq %rcx, %r9
movzbl 0x1(%rcx), %r8d
leaq (%r8,%r8,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r9,%rax), %xmm0
movq %rdx, %r10
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps 0x10(%rsi), %xmm0, %xmm0
vpmovsxbd 0x6(%r9,%r8,4), %ymm1
vpmovsxbd 0x6(%r9,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%r8,%r8,2), %rdx
vpmovsxbd 0x6(%r9,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%r8,%rcx,2), %r11
vpmovsxbd 0x6(%r9,%r11), %ymm1
leal (,%rdx,4), %r11d
vpmovsxbd 0x6(%r9,%r11), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %r8, %r11
vpmovsxbd 0x6(%r9,%r11), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%r8,%r8,8), %r11
leal (%r11,%r11), %ebx
vpmovsxbd 0x6(%r9,%rbx), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %r8, %rbx
vpmovsxbd 0x6(%r9,%rbx), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r9,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2c52de(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2d3aac(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2d3a21(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2a3b33(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x29f221(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%r8,8), %rbx
subq %r8, %rbx
vpmovsxwd 0x6(%r9,%rbx), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r9,%r11), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%r8,%r8), %r11
addq %r8, %rcx
shlq $0x3, %rdx
subq %r8, %rdx
vpbroadcastd %r8d, %ymm7
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %r11, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r9,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc(%rsi){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2d292d(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x20(%rsi){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2d2909(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x30d30b(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x5e0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c4f452
leaq (%r9,%rax), %r13
addq $0x6, %r13
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %r13
leaq 0x4de4da(%rip), %r8 # 0x212bb28
leaq 0x4e08f3(%rip), %r11 # 0x212df48
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
vmovaps %ymm20, 0x200(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r9,%rax,4), %ecx
movl %ecx, 0x10(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r9), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x38(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%r13,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c4d6e2
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%r13,%rdx)
prefetcht0 0x40(%r13,%rdx)
testq %rcx, %rcx
je 0x1c4d6e2
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r13,%rcx)
prefetcht1 0x40(%r13,%rcx)
vmovups 0x10(%r13,%rax), %xmm13
vmovups 0x20(%r13,%rax), %xmm27
vmovups 0x30(%r13,%rax), %xmm23
movl 0x248(%rbx), %edx
vmovaps (%rsi), %xmm1
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%rdi), %xmm4
vmovaps 0x20(%rdi), %xmm5
vmovaps 0x30(%rdi), %xmm6
vmulps %xmm6, %xmm0, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %r12
vmovups (%r8,%r12), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2c4f3e(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r8,%r12), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r8,%r12), %ymm15
vbroadcastss %xmm10, %ymm30
vpermps %ymm10, %ymm1, %ymm29
vmovups 0xd8c(%r8,%r12), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm31
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm31, %ymm4
vfmadd231ps %ymm30, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm30) + ymm5
vfmadd231ps %ymm29, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm29) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
vmovups (%r11,%r12), %ymm2
vmovups 0x484(%r11,%r12), %ymm17
vmovups 0x908(%r11,%r12), %ymm18
vmovups 0xd8c(%r11,%r12), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm31, %ymm6
vfmadd231ps %ymm30, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm30) + ymm7
vfmadd231ps %ymm29, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm29) + ymm6
vmovaps %ymm21, 0x120(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x440(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x240(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x140(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm26
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm26, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x520(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x540(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x560(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x580(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x2a0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm26, 0x280(%rsp)
vfmadd231ps %ymm26, %ymm26, %ymm21 # ymm21 = (ymm26 * ymm26) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x40(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2d351e(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x190(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x180(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x170(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %edx, %xmm23, %xmm12
vmovaps %xmm12, 0x2c0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2d350d(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2a35a7(%rip), %xmm3, %xmm27 # 0x1ef0fe4
vbroadcastss 0x2d3496(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0xc(%rsi), %xmm8
vmovaps %ymm30, 0x5c0(%rsp)
vmovaps %ymm29, 0x5a0(%rsp)
vmovaps %ymm20, 0x500(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
vmovaps %ymm22, 0x4c0(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
je 0x1c4e1d6
vmovaps %xmm8, 0x50(%rsp)
vmovaps %xmm27, 0x260(%rsp)
vmovaps %ymm3, %ymm27
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm27, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r8,%r12), %ymm3
vmovups 0x1694(%r8,%r12), %ymm10
vmovups 0x1b18(%r8,%r12), %ymm11
vmovups 0x1f9c(%r8,%r12), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm31, %ymm8
vmulps %ymm12, %ymm27, %ymm12
vfmadd231ps %ymm30, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm30) + ymm9
vfmadd231ps %ymm29, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm29) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x120(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x440(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x240(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x140(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%r11,%r12), %ymm10
vmovups 0x1b18(%r11,%r12), %ymm11
vmovups 0x1f9c(%r11,%r12), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x1c0(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm31, 0x1a0(%rsp)
vmulps %ymm13, %ymm31, %ymm14
vmulps %ymm13, %ymm27, %ymm13
vmovaps 0x260(%rsp), %xmm27
vfmadd231ps %ymm30, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm30) + ymm3
vfmadd231ps %ymm29, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm29) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%r11,%r12), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2d32c3(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm27, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x280(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x2a0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2d3249(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm23, %xmm23, %xmm23
vfmadd213ps %ymm23, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm23
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x29ea7c(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x29ea5a(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm23, %ymm9, %ymm13
vfmadd213ps %ymm23, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm23
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm23, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm13) + ymm3
vcmpleps %ymm23, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x200(%rsp), %ymm20
vmovaps 0x1e0(%rsp), %ymm21
vmovaps %ymm17, %ymm22
je 0x1c4efcf
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29e85c(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x50(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x1a0(%rsp), %ymm31
je 0x1c4f002
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
je 0x1c4f01f
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x29e7dd(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x420(%rsp)
movzbl %al, %r15d
vmovaps %ymm2, %ymm3
testw %r15w, %r15w
je 0x1c4e213
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r15b
je 0x1c4e213
vbroadcastss 0x2a2a36(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x420(%rsp), %ymm1
vfmadd132ps 0x2a304f(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm3, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %edx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x190(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %r15b, 0x390(%rsp)
movl 0x24(%rsi), %eax
testl %eax, 0x34(%rbx)
je 0x1c4e213
vaddps 0x2d2f06(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x29e6d2(%rip), %xmm1 # 0x1eec714
vdivss 0x2c0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
kmovd %r15d, %k1
vbroadcastss 0x29d99a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c4f036
cmpq $0x0, 0x40(%rbx)
jne 0x1c4f036
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x29e61c(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmovaps %ymm3, %ymm29
vxorps 0x2d2db4(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29ea70(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %ymm6, %ymm30
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29ea44(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x170(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x180(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vmovaps %ymm30, %ymm6
vfmadd132ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vmovss 0x3e0(%rsp,%rcx,4), %xmm3
vmovss %xmm3, 0x20(%rsi)
vmovaps %ymm29, %ymm3
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0x30(%rsi)
vextractps $0x2, %xmm2, 0x38(%rsi)
vmovss %xmm0, 0x3c(%rsi)
vmovss %xmm1, 0x40(%rsi)
movl 0x10(%rsp), %eax
movl %eax, 0x44(%rsi)
movq 0x38(%rsp), %rax
movl %eax, 0x48(%rsi)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c(%rsi)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%rsi)
jmp 0x1c4e213
vxorps %xmm23, %xmm23, %xmm23
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x200(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
cmpl $0x9, %edx
jge 0x1c4e23b
vmovaps 0x5e0(%rsp), %ymm0
vcmpleps 0x20(%rsi){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c4d655
jmp 0x1c4f452
vpbroadcastd %edx, %ymm0
vmovdqa %ymm0, 0x2a0(%rsp)
vbroadcastss %xmm27, %ymm0
vmovaps %ymm0, 0x280(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x260(%rsp)
vmovss 0x29e4a5(%rip), %xmm0 # 0x1eec714
vdivss 0x2c0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
movl $0x8, %ebx
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vpbroadcastd %ebx, %ymm0
vpor 0x30c678(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x2a0(%rsp), %ymm0, %k1
leaq (%r12,%r8), %rcx
vmovups (%rcx,%rbx,4), %ymm3
vmovups 0x484(%rcx,%rbx,4), %ymm10
vmovups 0x908(%rcx,%rbx,4), %ymm11
vmovups 0xd8c(%rcx,%rbx,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm31, %ymm4
vmovaps 0x520(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x5c0(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm26) + ymm5
vmovaps 0x5a0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm19, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm19) + ymm5
vmovaps 0x440(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x560(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%r12,%r11), %rax
vmovups (%rax,%rbx,4), %ymm2
vmovups 0x484(%rax,%rbx,4), %ymm13
vmovaps 0x580(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%rbx,4), %ymm14
vmovups 0xd8c(%rax,%rbx,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm31, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm26, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm26) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm19, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm19) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm19, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm19
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c4eb3c
vmovaps %ymm25, %ymm16
vmovaps %ymm27, %ymm25
vmovaps %ymm26, %ymm27
vmovaps 0x4a0(%rsp), %ymm26
vmulps %ymm15, %ymm26, %ymm15
vmovaps 0x4c0(%rsp), %ymm30
vfmadd213ps %ymm15, %ymm30, %ymm14 # ymm14 = (ymm30 * ymm14) + ymm15
vmovaps 0x4e0(%rsp), %ymm29
vfmadd213ps %ymm14, %ymm29, %ymm13 # ymm13 = (ymm29 * ymm13) + ymm14
vmovaps 0x500(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm26, %ymm12
vfmadd213ps %ymm12, %ymm30, %ymm11 # ymm11 = (ymm30 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm29, %ymm10 # ymm10 = (ymm29 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%rbx,4), %ymm12
vmovups 0x1694(%rcx,%rbx,4), %ymm13
vmovups 0x1b18(%rcx,%rbx,4), %ymm14
vmovups 0x1f9c(%rcx,%rbx,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm31, %ymm10
vmulps %ymm15, %ymm26, %ymm15
vfmadd231ps %ymm27, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm27) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm16, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm16) + ymm10
vfmadd231ps %ymm13, %ymm29, %ymm15 # ymm15 = (ymm29 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm19, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm19) + ymm10
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%rbx,4), %ymm13
vmovups 0x1b18(%rax,%rbx,4), %ymm14
vmovups 0x1f9c(%rax,%rbx,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm31, %ymm12
vmulps %ymm16, %ymm26, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vmovups 0x1694(%rax,%rbx,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x440(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm29, %ymm16 # ymm16 = (ymm29 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2d2963(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x280(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2d28ef(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm23, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm23
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x29e126(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x29e104(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm23, %ymm14, %ymm13
vfmadd213ps %ymm23, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm23
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm23, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm23, %ymm6 # ymm6 = (ymm23 * ymm13) + ymm6
vcmpleps %ymm23, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm23, %ymm3 # ymm3 = (ymm23 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm23, %ymm4 # ymm4 = (ymm23 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm23, %ymm13, %k0 {%k1}
kortestb %k0, %k0
je 0x1c4eb68
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29df19(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x260(%rsp), %ymm2, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c4eb68
vcmpneqps %ymm23, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
je 0x1c4eb94
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x29dea3(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x400(%rsp)
movzbl %al, %ecx
vmovaps %ymm2, %ymm21
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
testw %cx, %cx
je 0x1c4eb57
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %cl
je 0x1c4eb57
vbroadcastss 0x2a20eb(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x400(%rsp), %ymm1
vfmadd132ps 0x2a2704(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x400(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %ebx, 0x340(%rsp)
movl %edx, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x190(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x180(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x170(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %cl, 0x390(%rsp)
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movl %ecx, %r15d
movq 0x38(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x24(%rsi), %eax
movq %rcx, 0x50(%rsp)
testl %eax, 0x34(%rcx)
je 0x1c4eb57
vaddps 0x2d25a5(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %ebx, %xmm22, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2c0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x400(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
kmovd %r15d, %k1
vbroadcastss 0x29d03c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movl %r15d, 0xe0(%rsp)
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r15d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c4eb9b
movq 0x50(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c4eb9b
vmovss 0x3a0(%rsp,%r15,4), %xmm0
vmovss 0x3c0(%rsp,%r15,4), %xmm1
vmovss 0x29dcae(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2d244c(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29e108(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29e0e2(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x170(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x180(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vmovss 0x3e0(%rsp,%r15,4), %xmm3
vmovss %xmm3, 0x20(%rsi)
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0x30(%rsi)
vextractps $0x2, %xmm2, 0x38(%rsi)
vmovss %xmm0, 0x3c(%rsi)
vmovss %xmm1, 0x40(%rsi)
movl 0x10(%rsp), %eax
movl %eax, 0x44(%rsi)
movq 0x38(%rsp), %rax
movl %eax, 0x48(%rsi)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c(%rsi)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%rsi)
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c4eb57
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps %ymm19, %ymm18
vmovaps %ymm24, %ymm19
addq $0x8, %rbx
cmpl %ebx, %edx
jg 0x1c4e29a
jmp 0x1c4e218
xorl %ecx, %ecx
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm19
jmp 0x1c4e8a8
xorl %ecx, %ecx
jmp 0x1c4e899
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x490(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
vmovaps %ymm20, 0x200(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
movl 0xe0(%rsp), %ecx
movq %r9, 0x30(%rsp)
movq %r10, 0x28(%rsp)
movq %rsi, 0x20(%rsp)
movq %rdi, 0x18(%rsp)
movl %edx, 0xc(%rsp)
vmovaps %ymm28, 0x1c0(%rsp)
vmovaps %ymm31, 0x1a0(%rsp)
movl %ecx, 0xe0(%rsp)
vmovss 0x3a0(%rsp,%r15,4), %xmm0
vmovss 0x3c0(%rsp,%r15,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x220(%rsp)
vmovss 0x3e0(%rsp,%r15,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%r10), %rax
movq %rax, 0x230(%rsp)
vmovss 0x29dab8(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2d2256(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29df12(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29deec(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x470(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x490(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x10(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x14(%rsp) # imm = 0xFFFFFFFF
leaq 0x14(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x50(%rsp), %rax
movq 0x18(%rax), %rcx
movq %rcx, 0x98(%rsp)
movq 0x230(%rsp), %rax
movq %rax, 0xa0(%rsp)
movq %rsi, 0xa8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, 0xb8(%rsp)
movq 0x50(%rsp), %rax
movq 0x40(%rax), %rax
testq %rax, %rax
je 0x1c4ee1d
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x4df157(%rip), %r11 # 0x212df48
leaq 0x4dcd30(%rip), %r8 # 0x212bb28
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c4ef21
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c4eec2
testb $0x2, (%rcx)
jne 0x1c4ee3e
movq 0x50(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c4eeb5
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x4df0ae(%rip), %r11 # 0x212df48
leaq 0x4dcc87(%rip), %r8 # 0x212bb28
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c4ef21
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0xc(%rsp), %edx
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c4ef2f
vmovss 0x220(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
movzbl 0xe0(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%rsi){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %ecx
ktestb %k1, %k0
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c4efc2
kmovd %ecx, %k1
vbroadcastss 0x29caa1(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %cl, %al
movzbl %al, %r15d
movl %ecx, %eax
movzbl %al, %ecx
cmovnel %r15d, %ecx
tzcntl %ecx, %r15d
movl %eax, %ecx
testb %cl, %cl
jne 0x1c4ec10
jmp 0x1c4eb57
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x50(%rsp), %xmm8
jmp 0x1c4df5c
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x40(%rsp), %xmm7
jmp 0x1c4df5c
xorl %r15d, %r15d
vmovaps 0xc0(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c4df5c
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x220(%rsp)
movq %r9, 0x30(%rsp)
movq %r10, 0x28(%rsp)
movq %rsi, 0x20(%rsp)
movq %rdi, 0x18(%rsp)
vmovaps %ymm6, 0xc0(%rsp)
vmovaps %ymm3, 0x60(%rsp)
movl %edx, 0xc(%rsp)
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x280(%rsp)
movq %rcx, 0x2a0(%rsp)
vmovss 0x3e0(%rsp,%rcx,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%r10), %rax
vmovss 0x29d63f(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2d1ddd(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29da99(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29da73(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x220(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x230(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0xe0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x10(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x14(%rsp) # imm = 0xFFFFFFFF
leaq 0x14(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %rsi, 0xa8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x1, 0xb8(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
je 0x1c4f292
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x260(%rsp), %xmm27
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x4dece2(%rip), %r11 # 0x212df48
leaq 0x4dc8bb(%rip), %r8 # 0x212bb28
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c4f3a3
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c4f344
testb $0x2, (%rcx)
jne 0x1c4f2b2
testb $0x40, 0x3e(%rbx)
je 0x1c4f337
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x260(%rsp), %xmm27
vmovaps 0x1a0(%rsp), %ymm31
vmovaps 0x1c0(%rsp), %ymm28
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
movl 0xc(%rsp), %edx
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm20
vxorps %xmm23, %xmm23, %xmm23
leaq 0x4dec2c(%rip), %r11 # 0x212df48
leaq 0x4dc805(%rip), %r8 # 0x212bb28
movq 0x18(%rsp), %rdi
movq 0x20(%rsp), %rsi
movq 0x28(%rsp), %r10
movq 0x30(%rsp), %r9
movq 0x90(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c4f3a3
movq 0xa8(%rsp), %rax
movq 0xb0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0xc(%rsp), %edx
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c4f3b1
vmovss 0x280(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
vmovaps 0x60(%rsp), %ymm3
movl $0x1, %eax
movq 0x2a0(%rsp), %rcx
shlxl %ecx, %eax, %eax
kmovd %eax, %k0
movzbl %r15b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%rsi){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r15d
ktestb %k1, %k0
vmovaps 0xc0(%rsp), %ymm6
je 0x1c4f444
kmovd %r15d, %k1
vbroadcastss 0x29c61a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r15b, %al
movzbl %al, %eax
movzbl %r15b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
testb %r15b, %r15b
jne 0x1c4f093
jmp 0x1c4e213
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersector1<8>::occluded_t<embree::avx512::RibbonCurve1Intersector1<embree::BSplineCurveT, 8>, embree::avx512::Occluded1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersector1<M>::intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x660, %rsp # imm = 0x660
movq %rcx, %r9
movq %rdi, 0x58(%rsp)
movzbl 0x1(%rcx), %ecx
leaq (%rcx,%rcx,4), %r10
leaq (%r10,%r10,4), %rax
vbroadcastss 0x12(%r9,%rax), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rax), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%r9,%rcx,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rcx,%rcx,2), %r11
vpmovsxbd 0x6(%r9,%r11,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rcx,%r10,2), %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%r11,4), %edi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rcx, %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rcx,%rcx,8), %rdi
leal (%rdi,%rdi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm0
addq %rcx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %r10d
vpmovsxbd 0x6(%r9,%r10), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x2c31c5(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x2d198d(%rip), %ymm17 # 0x1f20edc
vpermps %ymm2, %ymm17, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm17, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x2d1901(%rip), %ymm8 # 0x1f20ec4
vandps %ymm4, %ymm8, %ymm5
vbroadcastss 0x2a1a18(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm1, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm0, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x29d106(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r8
subq %rcx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rdi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rcx,%rcx), %rdi
addq %rcx, %r10
shlq $0x3, %r11
subq %rcx, %r11
movl %ecx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %rdi, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%r10), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%r11), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%rsi){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2d081b(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%rsi){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2d07f7(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %ecx, %ymm1
vpcmpgtd 0x30b1f3(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x620(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r10b
je 0x1c511b6
leaq (%r9,%rax), %r14
addq $0x6, %r14
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
addq $0x10, %r14
leaq 0x4dc3be(%rip), %r11 # 0x212bb28
movq 0x58(%rsp), %r8
vmovaps %ymm6, 0xa0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
vmovaps %ymm21, 0x200(%rsp)
vmovaps %ymm20, 0x220(%rsp)
tzcntq %r12, %rax
blsrq %r12, %r12
movl 0x6(%r9,%rax,4), %ecx
movl %ecx, 0x3c(%rsp)
shll $0x6, %eax
movq %r12, %rcx
movl 0x2(%r9), %ebx
movq (%rdx), %rdi
movq 0x1e8(%rdi), %rdi
movq %rbx, 0x78(%rsp)
movq (%rdi,%rbx,8), %r15
vmovups (%r14,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c4f7fc
andq %r12, %rcx
tzcntq %r12, %rdi
shll $0x6, %edi
prefetcht0 (%r14,%rdi)
prefetcht0 0x40(%r14,%rdi)
testq %rcx, %rcx
je 0x1c4f7fc
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r14,%rcx)
prefetcht1 0x40(%r14,%rcx)
vmovups 0x10(%r14,%rax), %xmm13
vmovups 0x20(%r14,%rax), %xmm30
vmovups 0x30(%r14,%rax), %xmm23
movl 0x248(%r15), %r13d
vmovaps (%rsi), %xmm1
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%r8), %xmm4
vmovaps 0x20(%r8), %xmm5
vmovaps 0x30(%r8), %xmm6
vmulps %xmm6, %xmm0, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm30, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm6, %xmm2, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r13d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
vmovups (%r11,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2c2e20(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r11,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r11,%rbx), %ymm15
vbroadcastss %xmm10, %ymm26
vpermps %ymm10, %ymm1, %ymm29
vmovups 0xd8c(%r11,%rbx), %ymm16
vbroadcastss %xmm11, %ymm27
vpermps %ymm11, %ymm1, %ymm28
vmulps %ymm16, %ymm27, %ymm5
vmulps %ymm16, %ymm28, %ymm4
vfmadd231ps %ymm26, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm26) + ymm5
vfmadd231ps %ymm29, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm29) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4de5e6(%rip), %rdi # 0x212df48
vmovups (%rdi,%rbx), %ymm2
vmovups 0x484(%rdi,%rbx), %ymm17
vmovups 0x908(%rdi,%rbx), %ymm18
vmovups 0xd8c(%rdi,%rbx), %ymm19
vmulps %ymm19, %ymm27, %ymm7
vmulps %ymm19, %ymm28, %ymm6
vfmadd231ps %ymm26, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm26) + ymm7
vfmadd231ps %ymm29, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm29) + ymm6
vmovaps %ymm21, 0x160(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x4a0(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x260(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x240(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm30, %xmm30, %xmm0 # xmm0 = xmm30[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x520(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x540(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x560(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x580(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x120(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x1e0(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x40(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2d13fa(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2b0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm30, %xmm3
vmovaps %xmm30, 0x2a0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x290(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r13d, %xmm23, %xmm12
vmovaps %xmm12, 0x60(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2d13ec(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x2a1488(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x2d1377(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0xc(%rsi), %xmm9
vmovaps %ymm26, 0x600(%rsp)
vmovaps %ymm29, 0x5e0(%rsp)
vmovaps %ymm27, 0x5c0(%rsp)
vmovaps %ymm28, 0x5a0(%rsp)
vmovaps %ymm20, 0x500(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
vmovaps %ymm22, 0x4c0(%rsp)
je 0x1c50512
vmovaps %xmm9, 0x1c0(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r11,%rbx), %ymm3
vmovups 0x1694(%r11,%rbx), %ymm10
vmovups 0x1b18(%r11,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%r11,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm27, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm28, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm26, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm26) + ymm9
vfmadd231ps %ymm29, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm29) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x160(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x4a0(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x260(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x240(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdi,%rbx), %ymm10
vmovups 0x1b18(%rdi,%rbx), %ymm11
vmovups 0x1f9c(%rdi,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmulps %ymm13, %ymm27, %ymm3
vmulps %ymm13, %ymm28, %ymm14
vmovaps %ymm15, 0x180(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm26, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm26) + ymm3
vfmadd231ps %ymm29, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm29) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdi,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2d11b1(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x1e0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x120(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2d1131(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm28, %xmm28, %xmm28
vfmadd213ps %ymm28, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm28
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x29c964(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x29c942(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm28, %ymm9, %ymm13
vfmadd213ps %ymm28, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm28
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm28, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm13) + ymm3
vcmpleps %ymm28, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm28, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x200(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c51123
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm28, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm28) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29c738(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x1c0(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c51154
vcmpneqps %ymm28, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
je 0x1c5117c
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x29c6bd(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x440(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c5050e
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r8){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c5050e
vbroadcastss 0x2a091d(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x440(%rsp), %ymm1
vfmadd132ps 0x2a0f36(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm2, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %r13d, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movb %al, 0x390(%rsp)
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%r15)
je 0x1c5050e
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c50164
movb $0x1, %cl
cmpq $0x0, 0x48(%r15)
je 0x1c50559
vmovaps %ymm6, 0xa0(%rsp)
vaddps 0x2d0dcb(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x29c597(%rip), %xmm1 # 0x1eec714
vdivss 0x60(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
vmovaps %ymm2, 0x3e0(%rsp)
movzbl %al, %eax
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x1b0(%rsp)
movq %rax, 0x1e0(%rsp)
tzcntq %rax, %rax
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x1a0(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x150(%rsp)
movb $0x1, %cl
vmovaps %xmm14, 0x2c0(%rsp)
movq %r9, 0x50(%rsp)
movq %rdx, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movb %r10b, 0x1b(%rsp)
movl %ecx, 0x120(%rsp)
vmovss 0x3a0(%rsp,%rax,4), %xmm0
vmovss 0x3c0(%rsp,%rax,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x20(%rsp)
movq %rax, 0x400(%rsp)
vmovss 0x3e0(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%rdx), %rax
vmovss 0x29c4b0(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2d0c4e(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29c90a(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29c8e4(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x150(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x1a0(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x1b0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x3c(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x1c(%rsp) # imm = 0xFFFFFFFF
leaq 0x1c(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x18(%r15), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %rsi, 0xd8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x1, 0xe8(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
je 0x1c5041f
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x2c0(%rsp), %xmm14
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x220(%rsp), %ymm20
vxorps %xmm28, %xmm28, %xmm28
leaq 0x4ddb5a(%rip), %rdi # 0x212df48
leaq 0x4db733(%rip), %r11 # 0x212bb28
movb 0x1b(%rsp), %r10b
movq 0x58(%rsp), %r8
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x50(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c504d4
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c51195
testb $0x2, (%rcx)
jne 0x1c50440
testb $0x40, 0x3e(%r15)
je 0x1c504c3
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x1c0(%rsp), %xmm9
vmovaps 0x2c0(%rsp), %xmm14
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x220(%rsp), %ymm20
vxorps %xmm28, %xmm28, %xmm28
leaq 0x4ddaa5(%rip), %rdi # 0x212df48
leaq 0x4db67e(%rip), %r11 # 0x212bb28
movb 0x1b(%rsp), %r10b
movq 0x58(%rsp), %r8
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0x50(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x1c51195
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0x400(%rsp), %rax
movq 0x1e0(%rsp), %rcx
btcq %rax, %rcx
movq %rcx, 0x1e0(%rsp)
tzcntq %rcx, %rax
setae %cl
jae 0x1c5021e
jmp 0x1c5119c
xorl %ecx, %ecx
jmp 0x1c50559
xorl %ecx, %ecx
vxorps %xmm28, %xmm28, %xmm28
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps %xmm12, %xmm14
cmpl $0x9, %r13d
jge 0x1c5058f
testb $0x1, %cl
jne 0x1c511b6
vmovaps 0x620(%rsp), %ymm0
vcmpleps 0x20(%rsi){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r12d
setne %r10b
jne 0x1c4f76f
jmp 0x1c511b6
vmovaps %ymm8, 0x180(%rsp)
vpbroadcastd %r13d, %ymm0
vmovdqa %ymm0, 0x1e0(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovss 0x29c149(%rip), %xmm0 # 0x1eec714
vdivss 0x60(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
movl $0x8, %r15d
vmovaps %ymm6, 0xa0(%rsp)
vmovaps %ymm2, 0x80(%rsp)
movl %ecx, 0x120(%rsp)
vpbroadcastd %r15d, %ymm0
vpor 0x30a314(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x1e0(%rsp), %ymm0, %k1
leaq (%rbx,%r11), %rcx
vmovups (%rcx,%r15,4), %ymm3
vmovups 0x484(%rcx,%r15,4), %ymm10
vmovups 0x908(%rcx,%r15,4), %ymm11
vmovups 0xd8c(%rcx,%r15,4), %ymm12
vmovaps 0x5c0(%rsp), %ymm19
vmulps %ymm12, %ymm19, %ymm5
vmovaps 0x5a0(%rsp), %ymm29
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x520(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x600(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm26) + ymm5
vmovaps 0x5e0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x4a0(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x560(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdi), %rax
vmovups (%rax,%r15,4), %ymm2
vmovups 0x484(%rax,%r15,4), %ymm13
vmovaps 0x580(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r15,4), %ymm14
vmovups 0xd8c(%rax,%r15,4), %ymm15
vmulps %ymm15, %ymm19, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm26, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm26) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c5100f
vmovaps %ymm23, %ymm16
vmovaps 0x180(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x4c0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x4e0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x500(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r15,4), %ymm12
vmovups 0x1694(%rcx,%r15,4), %ymm13
vmovups 0x1b18(%rcx,%r15,4), %ymm14
vmovups 0x1f9c(%rcx,%r15,4), %ymm15
vmulps %ymm15, %ymm19, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm26, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm26) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm19, %ymm17
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r15,4), %ymm13
vmovups 0x1b18(%rax,%r15,4), %ymm14
vmovups 0x1f9c(%rax,%r15,4), %ymm16
vmulps %ymm16, %ymm17, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm27, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm27) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r15,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2d05e4(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x1c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2d0570(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm28, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm28
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x29bda7(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x29bd85(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm28, %ymm14, %ymm13
vfmadd213ps %ymm28, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm28
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm28, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm28, %ymm6 # ymm6 = (ymm28 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm28, %ymm6 # ymm6 = (ymm28 * ymm13) + ymm6
vcmpleps %ymm28, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm28, %ymm4 # ymm4 = (ymm28 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm28, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c5105b
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm28, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm28) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29bb94(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x400(%rsp), %ymm2, %k1
vcmpleps 0x20(%rsi){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl 0x120(%rsp), %ecx
je 0x1c51089
vcmpneqps %ymm28, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
je 0x1c510b0
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x29bb1f(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x420(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
testw %ax, %ax
je 0x1c51049
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r8){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c5102d
vbroadcastss 0x29fd64(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x420(%rsp), %ymm1
vfmadd132ps 0x2a037d(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %r15d, 0x340(%rsp)
movl %r13d, 0x344(%rsp)
vmovaps %xmm7, 0x350(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
movl %eax, 0x20(%rsp)
movb %al, 0x390(%rsp)
movq (%rdx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x78(%rsp), %rax
movq (%rcx,%rax,8), %rax
movl 0x24(%rsi), %ecx
testl %ecx, 0x34(%rax)
je 0x1c51036
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c50d32
movb $0x1, %cl
movl %ecx, 0x60(%rsp)
cmpq $0x0, 0x48(%rax)
je 0x1c5103e
movq %rax, 0x150(%rsp)
movb %r10b, 0x1b(%rsp)
movq %r9, 0x50(%rsp)
vmovaps %ymm20, 0x220(%rsp)
vaddps 0x2d01ea(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r15d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2c0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x200(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
movzbl 0x20(%rsp), %r8d
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x490(%rsp)
tzcntq %r8, %rcx
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
movb $0x1, %al
movq %rdx, 0x30(%rsp)
movq %rsi, 0x28(%rsp)
movl %eax, 0x60(%rsp)
vmovss 0x3a0(%rsp,%rcx,4), %xmm0
vmovss 0x3c0(%rsp,%rcx,4), %xmm1
vmovss 0x20(%rsi), %xmm2
vmovss %xmm2, 0x1a0(%rsp)
movq %rcx, 0x1b0(%rsp)
vmovss 0x3e0(%rsp,%rcx,4), %xmm2
vmovss %xmm2, 0x20(%rsi)
movq 0x8(%rdx), %rax
vmovss 0x29b8e8(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2d0086(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29bd42(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29bd1c(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x470(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x490(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovlps %xmm2, 0xf0(%rsp)
vextractps $0x2, %xmm2, 0xf8(%rsp)
vmovss %xmm0, 0xfc(%rsp)
vmovss %xmm1, 0x100(%rsp)
movl 0x3c(%rsp), %ecx
movl %ecx, 0x104(%rsp)
movq 0x78(%rsp), %rcx
movl %ecx, 0x108(%rsp)
movl (%rax), %ecx
movl %ecx, 0x10c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0x110(%rsp)
movl $0xffffffff, 0x1c(%rsp) # imm = 0xFFFFFFFF
leaq 0x1c(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x150(%rsp), %rdi
movq 0x18(%rdi), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %rsi, 0xd8(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x1, 0xe8(%rsp)
movq 0x48(%rdi), %rax
testq %rax, %rax
movq %r8, 0x20(%rsp)
je 0x1c50f8b
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x20(%rsp), %r8
vmovaps 0x40(%rsp), %xmm7
movq 0x28(%rsp), %rsi
movq 0x30(%rsp), %rdx
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c50fe2
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c510b7
testb $0x2, (%rcx)
jne 0x1c50faf
movq 0x150(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c50fbc
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
movq 0x30(%rsp), %rdx
movq 0x28(%rsp), %rsi
vmovaps 0x40(%rsp), %xmm7
movq 0x20(%rsp), %r8
jne 0x1c510b7
vmovss 0x1a0(%rsp), %xmm0
vmovss %xmm0, 0x20(%rsi)
movq 0x1b0(%rsp), %rax
btcq %rax, %r8
tzcntq %r8, %rcx
setae %al
jae 0x1c50de6
jmp 0x1c510bb
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
movl 0x120(%rsp), %ecx
jmp 0x1c51049
movl $0x0, 0x60(%rsp)
movl 0x120(%rsp), %ecx
orb 0x60(%rsp), %cl
addq $0x8, %r15
cmpl %r15d, %r13d
jg 0x1c505f7
jmp 0x1c5055f
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
movl 0x120(%rsp), %ecx
jmp 0x1c50c2f
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm17
jmp 0x1c50c2f
xorl %eax, %eax
jmp 0x1c50c1d
movl 0x60(%rsp), %eax
andb $0x1, %al
movl %eax, 0x60(%rsp)
movq 0x50(%rsp), %r9
movq 0x58(%rsp), %r8
movb 0x1b(%rsp), %r10b
leaq 0x4daa51(%rip), %r11 # 0x212bb28
leaq 0x4dce6a(%rip), %rdi # 0x212df48
vxorps %xmm28, %xmm28, %xmm28
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x220(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x200(%rsp), %ymm21
vmovaps 0x260(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
jmp 0x1c5103e
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %xmm9
jmp 0x1c50077
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
vmovaps 0x40(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm8
jmp 0x1c50077
xorl %eax, %eax
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
jmp 0x1c50077
movl 0x120(%rsp), %ecx
andb $0x1, %cl
vmovaps 0xa0(%rsp), %ymm6
vmovaps 0x80(%rsp), %ymm2
jmp 0x1c50559
andb $0x1, %r10b
movl %r10d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 4>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BSplineCurveT, 4, 8>, embree::avx512::Intersect1KEpilogMU<8, 4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x6c0, %rsp # imm = 0x6C0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0xa0(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x40(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x10(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2c142a(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2cfbf8(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2cfb6d(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x29fc7f(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x29b36d(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2cea79(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x80(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2cea54(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x309456(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x680(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c53598
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0xa0(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x10, %rdi
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x2f0(%rsp)
leaq 0x4da5f0(%rip), %r13 # 0x212bb28
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
vmovaps %ymm21, 0x180(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x4(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x28(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c515bb
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c515bb
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm26
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x240(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x10(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm26, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2c104a(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4dc810(%rip), %rdx # 0x212df48
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0xe0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x500(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm27
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm27, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm26, %xmm26, %xmm0 # xmm0 = xmm26[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x5c0(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x5e0(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x600(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x620(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm27, 0x2c0(%rsp)
vfmadd231ps %ymm27, %ymm27, %ymm21 # ymm21 = (ymm27 * ymm27) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x30(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2cf624(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x130(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm26, %xmm3
vmovaps %xmm26, 0x120(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x110(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x100(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2cf613(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x29f6ad(%rip), %xmm3, %xmm26 # 0x1ef0fe4
vbroadcastss 0x2cf59c(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0x30(%r12,%r15,4), %xmm8
vmovaps %ymm31, 0x660(%rsp)
vmovaps %ymm30, 0x640(%rsp)
vmovaps %ymm20, 0x5a0(%rsp)
vmovaps %ymm21, 0x580(%rsp)
vmovaps %ymm22, 0x560(%rsp)
vmovaps %ymm3, 0x540(%rsp)
je 0x1c52142
vmovaps %xmm8, 0x220(%rsp)
vmovaps %xmm26, 0x80(%rsp)
vmovaps %ymm3, %ymm26
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm26, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm26, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0xe0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x500(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x160(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x140(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm26, %ymm13
vmovaps 0x80(%rsp), %xmm26
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2cf3c8(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm26, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x260(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2cf34e(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm27, %xmm27, %xmm27
vfmadd213ps %ymm27, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm27
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x29ab81(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x29ab5f(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm27, %ymm9, %ymm13
vfmadd213ps %ymm27, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm27
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm27, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm13) + ymm3
vcmpleps %ymm27, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x180(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
je 0x1c53060
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29a95b(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
je 0x1c53093
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
je 0x1c530ad
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x29a8d8(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4e0(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c52139
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xa0(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c52139
vbroadcastss 0x29eb28(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4e0(%rsp), %ymm1
vfmadd132ps 0x29f141(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm1, 0x4e0(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl $0x0, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %r13b, 0x3b0(%rsp)
movl 0x90(%r12,%r15,4), %eax
movq 0x240(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c52139
movl %ecx, (%rsp)
vaddps 0x2cefe7(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x29a7b3(%rip), %xmm1 # 0x1eec714
vdivss 0x100(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x299a7b(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
jne 0x1c530c1
movq 0x240(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c530c1
vmovss 0x3c0(%rsp,%r11,4), %xmm0
vmovss 0x3e0(%rsp,%r11,4), %xmm1
vmovss 0x29a6e6(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2cee84(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29ab40(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29ab1a(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x110(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vmovaps 0x60(%rsp), %ymm6
vfmadd132ps 0x130(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovss 0x400(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x80(%r12,%r15,4)
vmovaps 0x40(%rsp), %ymm3
vmovss %xmm2, 0xc0(%r12,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%r12,%r15,4)
vmovss %xmm0, 0xf0(%r12,%r15,4)
vmovss %xmm1, 0x100(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x110(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x120(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%r15,4)
movl (%rsp), %ecx
leaq 0x4d99e8(%rip), %r13 # 0x212bb28
jmp 0x1c5217c
vxorps %xmm27, %xmm27, %xmm27
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm17
cmpl $0x9, %ecx
jge 0x1c521a5
vmovaps 0x680(%rsp), %ymm0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c51538
jmp 0x1c53598
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm26, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovss 0x29a53b(%rip), %xmm0 # 0x1eec714
vdivss 0x100(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x100(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x210(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
movl %ecx, (%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x3086e6(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x5c0(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x660(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x640(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm26) + ymm4
vmovaps 0x5e0(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x500(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x600(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x620(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm26, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm26) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c52b33
vmovaps %ymm23, %ymm16
vmovaps 0x540(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x560(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x580(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x5a0(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm26, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm26) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm26, %ymm25
vmovaps %ymm19, %ymm26
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x500(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2ce9bc(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2ce948(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm27, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm27
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x29a17f(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x29a15d(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm27, %ymm14, %ymm13
vfmadd213ps %ymm27, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm27
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm27, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm13) + ymm6
vcmpleps %ymm27, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c52b60
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x299f6c(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x240(%rsp), %ymm2, %k1
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl (%rsp), %ecx
je 0x1c52b7a
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
vmovaps 0xe0(%rsp), %ymm17
je 0x1c52b9c
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x299efa(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4c0(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c52b4e
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xa0(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c52b4e
movl %r11d, %eax
vbroadcastss 0x29e136(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4c0(%rsp), %ymm1
vfmadd132ps 0x29e74f(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x300(%rsp)
vmovaps %ymm1, 0x4c0(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm21, 0x340(%rsp)
movl %r9d, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm7, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %al, 0x3b0(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x28(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x90(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c52b27
vaddps 0x2ce5ef(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x220(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm21, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x299086(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0x80(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0x80(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c52ba4
cmpq $0x0, 0x40(%rcx)
jne 0x1c52ba4
vmovss 0x3c0(%rsp,%r13,4), %xmm0
vmovss 0x3e0(%rsp,%r13,4), %xmm1
vmovss 0x299cf5(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2ce493(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x29a14f(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29a129(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x110(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x130(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm2) + xmm4
vmovss 0x400(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x80(%r12,%r15,4)
vmovss %xmm2, 0xc0(%r12,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%r12,%r15,4)
vmovss %xmm0, 0xf0(%r12,%r15,4)
vmovss %xmm1, 0x100(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x110(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x120(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
leaq 0x4d8ffa(%rip), %r13 # 0x212bb28
movl (%rsp), %ecx
jmp 0x1c52b4e
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
movl (%rsp), %ecx
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c5222c
jmp 0x1c52181
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
movl (%rsp), %ecx
jmp 0x1c52b8f
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0xe0(%rsp), %ymm17
jmp 0x1c5284f
xorl %r11d, %r11d
jmp 0x1c52843
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x530(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
vmovaps %ymm21, 0x180(%rsp)
movq %r9, 0xd8(%rsp)
movl %r11d, %eax
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovaps %ymm28, 0x160(%rsp)
vmovaps %ymm29, 0x140(%rsp)
movl %eax, 0x200(%rsp)
vmovss 0x80(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x1f0(%rsp)
vmovss 0x400(%rsp,%r13,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r13,4), %xmm1
vbroadcastss 0x3e0(%rsp,%r13,4), %xmm2
vmovss %xmm0, 0x80(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x299aad(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2ce24b(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x299f07(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm1, %xmm5
vfnmsub213ss %xmm4, %xmm1, %xmm5 # xmm5 = -(xmm1 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmulss %xmm1, %xmm1, %xmm4
vmovss 0x299ee1(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x530(%rsp), %xmm4, %xmm4
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1d0(%rsp), %xmm4, %xmm0 # xmm0 = (xmm0 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x1e0(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm4
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x430(%rsp)
vmovaps %xmm4, 0x440(%rsp)
vmovaps %xmm0, 0x450(%rsp)
vmovaps %xmm1, 0x460(%rsp)
vmovaps %xmm2, 0x470(%rsp)
vmovaps 0x210(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovdqa 0x100(%rsp), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
movq %rcx, %r11
leaq 0x4a0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xa8(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xb0(%rsp)
movq %rax, 0xb8(%rsp)
movq %r12, 0xc0(%rsp)
leaq 0x430(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl $0x4, 0xd0(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c52e4b
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd8(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4db111(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c52fb1
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c52f18
testb $0x2, (%rcx)
jne 0x1c52e98
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c52f18
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd8(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4db044(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c52fb1
movq 0xc0(%rsp), %rax
movq 0xc8(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c52fc4
vmovd 0x1f0(%rsp), %xmm0
vmovd %xmm0, 0x80(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x200(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c5304b
kmovd %eax, %k1
vbroadcastss 0x298a1a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0x80(%rsp), %rcx
jne 0x1c52c19
jmp 0x1c52b27
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x220(%rsp), %xmm8
jmp 0x1c51e61
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm7
jmp 0x1c51e61
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
jmp 0x1c51e61
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x210(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x200(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovss 0x80(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x400(%rsp,%r11,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r11,4), %xmm1
vbroadcastss 0x3e0(%rsp,%r11,4), %xmm2
vmovss %xmm0, 0x80(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x29959b(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2cdd39(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x2999f5(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm1, %xmm5
vfnmsub213ss %xmm4, %xmm1, %xmm5 # xmm5 = -(xmm1 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmulss %xmm1, %xmm1, %xmm4
vmovss 0x2999cf(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x1d0(%rsp), %xmm4, %xmm4
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1e0(%rsp), %xmm4, %xmm0 # xmm0 = (xmm0 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x1f0(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm4
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x430(%rsp)
vmovaps %xmm4, 0x440(%rsp)
vmovaps %xmm0, 0x450(%rsp)
vmovaps %xmm1, 0x460(%rsp)
vmovaps %xmm2, 0x470(%rsp)
vmovaps 0x200(%rsp), %xmm0
vmovaps %xmm0, 0x480(%rsp)
vmovdqa 0x210(%rsp), %xmm0
vmovdqa %xmm0, 0x490(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x4a0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4a0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x90(%rsp)
leaq 0x90(%rsp), %rcx
movq %rcx, 0xa8(%rsp)
movq 0x240(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xb0(%rsp)
movq %rax, 0xb8(%rsp)
movq %r12, 0xc0(%rsp)
leaq 0x430(%rsp), %rax
movq %rax, 0xc8(%rsp)
movl $0x4, 0xd0(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x260(%rsp)
je 0x1c5337f
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4dabdd(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c534f6
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c5345d
testb $0x2, (%rcx)
jne 0x1c533cc
movq 0x240(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5345d
leaq 0xa8(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4daaff(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x90(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c534f6
movq 0xc0(%rsp), %rax
movq 0xc8(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c53509
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x80(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c53587
kmovd %r13d, %k1
vbroadcastss 0x2984d8(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl (%rsp), %ecx
jne 0x1c53132
jmp 0x1c52139
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 4>::occluded_t<embree::avx512::RibbonCurve1IntersectorK<embree::BSplineCurveT, 4, 8>, embree::avx512::Occluded1KEpilogMU<8, 4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x720, %rsp # imm = 0x720
movq %r8, %r9
movq %rdx, %r14
movq %rsi, %r15
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rsi
leaq (%rsi,%rsi,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%r15,%r14,4), %xmm1
vmovss 0x40(%r15,%r14,4), %xmm2
vinsertps $0x10, 0x10(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x28(%rsp)
vinsertps $0x20, 0x60(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rdx,%rdx,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rdx,%rsi,2), %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %r10
leal (%r10,%r10), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2bf052(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2cd81b(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2cd78a(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm4, %ymm5
vbroadcastss 0x29d8a1(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x298f8f(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r8
subq %rdx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%r10), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rdx,%rdx), %r10
addq %rdx, %rsi
shlq $0x3, %rcx
subq %rdx, %rcx
movl %edx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rsi), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x30(%r15,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2cc69f(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x80(%r15,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2cc67a(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %edx, %ymm1
vpcmpgtd 0x307076(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x6e0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %sil
je 0x1c55586
leaq (%r9,%rax), %r12
addq $0x6, %r12
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
addq $0x10, %r12
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r10
addq $0x10, %r10
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x2c0(%rsp)
vmovaps %ymm5, 0x80(%rsp)
vmovaps %ymm2, 0x60(%rsp)
vmovaps %ymm20, 0x260(%rsp)
tzcntq %r13, %rax
blsrq %r13, %r13
movl 0x6(%r9,%rax,4), %r11d
shll $0x6, %eax
movq %r13, %rcx
movl 0x2(%r9), %r8d
movq 0x28(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq %r8, 0xc8(%rsp)
movq (%rdx,%r8,8), %r8
vmovups (%r12,%rax), %xmm2
subq $0x1, %rcx
jb 0x1c53992
andq %r13, %rcx
tzcntq %r13, %rdx
shll $0x6, %edx
prefetcht0 (%r12,%rdx)
prefetcht0 0x40(%r12,%rdx)
testq %rcx, %rcx
je 0x1c53992
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r12,%rcx)
prefetcht1 0x40(%r12,%rcx)
vmovups 0x10(%r12,%rax), %xmm13
vmovups 0x20(%r12,%rax), %xmm27
vmovups 0x30(%r12,%rax), %xmm23
movq %r8, 0x220(%rsp)
movl 0x248(%r8), %r8d
vmovss (%r15,%r14,4), %xmm0
vinsertps $0x1c, 0x10(%r15,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r15,%r14,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm2, %xmm0
vmovaps %xmm2, %xmm7
vmovaps %xmm2, 0x30(%rsp)
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r10), %xmm4
vmovaps 0x10(%r10), %xmm5
vmovaps 0x20(%r10), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm7, %xmm7, %xmm0 # xmm0 = xmm7[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r8d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
leaq 0x4d809d(%rip), %rdx # 0x212bb28
vmovups (%rdx,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2bec64(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%rdx,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm28
vmovups 0x908(%rdx,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm29
vmovups 0xd8c(%rdx,%rbx), %ymm16
vbroadcastss %xmm11, %ymm30
vpermps %ymm11, %ymm1, %ymm26
vmulps %ymm16, %ymm30, %ymm5
vmulps %ymm16, %ymm26, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm29, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm29) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm28, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm28) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4da42a(%rip), %rcx # 0x212df48
vmovups (%rcx,%rbx), %ymm2
vmovups 0x484(%rcx,%rbx), %ymm17
vmovups 0x908(%rcx,%rbx), %ymm18
vmovups 0xd8c(%rcx,%rbx), %ymm19
vmulps %ymm19, %ymm30, %ymm7
vmulps %ymm19, %ymm26, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm29, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm29) + ymm6
vmovaps %ymm21, 0x180(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vfmadd231ps %ymm28, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm28) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm24
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm24, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x5e0(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x600(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x620(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x640(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x100(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vfmadd231ps %ymm24, %ymm24, %ymm21 # ymm21 = (ymm24 * ymm24) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vblendps $0x8, 0x30(%rsp), %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],mem[3]
vbroadcastss 0x2cd25b(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2f0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x2e0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x2d0(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r8d, %xmm23, %xmm12
vmovaps %xmm12, 0x170(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2cd24a(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x29d2e6(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x2cd1d5(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm21
vpermps %ymm9, %ymm3, %ymm20
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0x30(%r15,%r14,4), %xmm9
vmovaps %ymm30, 0x680(%rsp)
vmovaps %ymm26, 0x660(%rsp)
vmovaps %ymm28, 0x560(%rsp)
vmovaps %ymm31, 0x6c0(%rsp)
vmovaps %ymm29, 0x6a0(%rsp)
vmovaps %ymm21, 0x5c0(%rsp)
vmovaps %ymm20, 0x5a0(%rsp)
vmovaps %ymm22, 0x580(%rsp)
je 0x1c547d9
vmovaps %xmm9, 0x200(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm20, %ymm17 # ymm17 = (ymm20 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm21, %ymm2 # ymm2 = (ymm21 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm20, %ymm14 # ymm14 = (ymm20 * ymm14) + ymm15
vmovups 0x1210(%rdx,%rbx), %ymm3
vmovups 0x1694(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%rdx,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm21, %ymm25 # ymm25 = (ymm21 * ymm25) + ymm14
vmulps %ymm12, %ymm30, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm26, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm29, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm29) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x180(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vfmadd231ps %ymm28, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm28) + ymm8
vfmadd231ps %ymm10, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rcx,%rbx), %ymm10
vmovups 0x1b18(%rcx,%rbx), %ymm11
vmovups 0x1f9c(%rcx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm3) + ymm12
vmulps %ymm13, %ymm30, %ymm3
vmulps %ymm13, %ymm26, %ymm14
vmovaps %ymm15, 0x1a0(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm29, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm29) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rcx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm28, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm28) + ymm14
vfmadd231ps %ymm11, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm10) + ymm13
vbroadcastss 0x2cd011(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm23
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm24, %ymm9 {%k2}
vmovaps 0x100(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm24, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2ccf99(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm26, %xmm26, %xmm26
vfmadd213ps %ymm26, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm26
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2987cc(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2987aa(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm26, %ymm9, %ymm13
vfmadd213ps %ymm26, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm26
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm26, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm13) + ymm3
vcmpleps %ymm26, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm26, %ymm4 # ymm4 = (ymm26 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm26, %ymm4 # ymm4 = (ymm26 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm26, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x260(%rsp), %ymm20
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm23, %xmm14
je 0x1c5550f
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm26, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm26) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vbroadcastss 0x2985a9(%rip), %ymm12 # 0x1eec714
vfnmadd213ps %ymm12, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + ymm12
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x200(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c5553d
vcmpneqps %ymm26, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm8
je 0x1c55562
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vsubps %ymm3, %ymm12, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm12, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x520(%rsp)
movzbl %al, %eax
testw %ax, %ax
vmovaps 0x240(%rsp), %ymm21
je 0x1c5481f
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c54823
movl %r11d, 0x100(%rsp)
vbroadcastss 0x29c780(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x520(%rsp), %ymm1
vfmadd132ps 0x29cd99(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x3e0(%rsp)
vmovaps %ymm1, 0x520(%rsp)
vmovaps %ymm1, 0x400(%rsp)
vmovaps %ymm2, 0x420(%rsp)
movl $0x0, 0x440(%rsp)
movl %r8d, 0x444(%rsp)
vmovaps %xmm7, 0x450(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x460(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x480(%rsp)
movb %al, 0x490(%rsp)
movl 0x90(%r15,%r14,4), %ecx
movq 0x220(%rsp), %r11
testl %ecx, 0x34(%r11)
je 0x1c5482e
movq 0x28(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c5431c
movb $0x1, %r11b
movq 0x220(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
je 0x1c54831
vaddps 0x2ccc1c(%rip), %ymm5, %ymm0 # 0x1f20f40
vmovss 0x2983e8(%rip), %xmm1 # 0x1eec714
vdivss 0x170(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x520(%rsp), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps %ymm2, 0x4e0(%rsp)
movzbl %al, %ecx
tzcntq %rcx, %r11
movq 0xc8(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x160(%rsp)
vmovaps 0x460(%rsp), %xmm0
vmovaps %xmm0, 0x150(%rsp)
movl 0x100(%rsp), %eax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x140(%rsp)
movq %r11, %rax
vmovaps 0x470(%rsp), %xmm0
vmovaps %xmm0, 0x130(%rsp)
vmovaps 0x480(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
movb $0x1, %r11b
vmovaps %xmm14, 0x120(%rsp)
movq %r9, 0x58(%rsp)
movq %rdi, 0x50(%rsp)
movb %sil, 0x1f(%rsp)
movq %r10, 0x48(%rsp)
vmovaps %ymm5, 0x80(%rsp)
vmovaps %ymm2, 0x60(%rsp)
movl %r8d, 0x24(%rsp)
movq %rcx, 0x320(%rsp)
movl %r11d, 0x20(%rsp)
vmovss 0x80(%r15,%r14,4), %xmm10
vmovss 0x4e0(%rsp,%rax,4), %xmm0
vbroadcastss 0x4a0(%rsp,%rax,4), %xmm1
movq %rax, 0x1e0(%rsp)
vbroadcastss 0x4c0(%rsp,%rax,4), %xmm2
vmovss %xmm0, 0x80(%r15,%r14,4)
movq 0x28(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2982b9(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2cca57(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x298713(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm1, %xmm5
vfnmsub213ss %xmm4, %xmm1, %xmm5 # xmm5 = -(xmm1 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmulss %xmm1, %xmm1, %xmm4
vmovss 0x2986ed(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x1d0(%rsp), %xmm4, %xmm4
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x130(%rsp), %xmm4, %xmm0 # xmm0 = (xmm0 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x150(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm4
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x350(%rsp)
vmovaps %xmm4, 0x360(%rsp)
vmovaps %xmm0, 0x370(%rsp)
vmovaps %xmm1, 0x380(%rsp)
vmovaps %xmm2, 0x390(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovdqa 0x160(%rsp), %xmm0
vmovdqa %xmm0, 0x3b0(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x3c0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vmovaps 0x2c0(%rsp), %xmm0
vmovaps %xmm0, 0xb0(%rsp)
leaq 0xb0(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
movq 0x220(%rsp), %r11
movq 0x18(%r11), %rcx
movq %rcx, 0xd8(%rsp)
movq %rax, 0xe0(%rsp)
movq %r15, 0xe8(%rsp)
leaq 0x350(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x48(%r11), %rax
testq %rax, %rax
movq %rdi, %r11
vmovss %xmm10, 0x300(%rsp)
je 0x1c54665
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x300(%rsp), %xmm10
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x120(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x24(%rsp), %r8d
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x260(%rsp), %ymm20
vxorps %xmm26, %xmm26, %xmm26
leaq 0x4d74df(%rip), %rdx # 0x212bb28
movq 0x48(%rsp), %r10
movb 0x1f(%rsp), %sil
vmovaps 0x240(%rsp), %ymm21
movq 0x50(%rsp), %r11
movq 0x58(%rsp), %r9
vmovdqa 0xb0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c54795
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
je 0x1c5474e
testb $0x2, (%rcx)
jne 0x1c546ba
movq 0x220(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5474e
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x300(%rsp), %xmm10
vmovaps 0x200(%rsp), %xmm9
vmovaps 0x120(%rsp), %xmm14
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x24(%rsp), %r8d
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm5
vxorps %xmm26, %xmm26, %xmm26
leaq 0x4d73f6(%rip), %rdx # 0x212bb28
movq 0x48(%rsp), %r10
movb 0x1f(%rsp), %sil
vmovaps 0x240(%rsp), %ymm21
movq 0x50(%rsp), %r11
movq 0x58(%rsp), %r9
movq %r11, %rdi
vmovdqa 0xb0(%rsp), %xmm0
movq 0xe8(%rsp), %rax
vmovaps 0x80(%rax), %xmm1
vptestmd %xmm0, %xmm0, %k1
vbroadcastss 0x29840a(%rip), %xmm1 {%k1} # 0x1eecb84
vmovaps %xmm1, 0x80(%rax)
kortestb %k1, %k1
movq 0x320(%rsp), %rcx
je 0x1c547af
jmp 0x1c55578
movq %r11, %rdi
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
movq 0x320(%rsp), %rcx
vmovss %xmm10, 0x80(%r15,%r14,4)
movq 0x1e0(%rsp), %rax
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %r11b
jae 0x1c54404
jmp 0x1c5557d
xorl %eax, %eax
vxorps %xmm26, %xmm26, %xmm26
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps %xmm12, %xmm14
jmp 0x1c54843
xorl %eax, %eax
jmp 0x1c54843
xorl %eax, %eax
leaq 0x4d971c(%rip), %rcx # 0x212df48
jmp 0x1c54843
xorl %r11d, %r11d
leaq 0x4d9710(%rip), %rcx # 0x212df48
movl %r11d, %eax
movl 0x100(%rsp), %r11d
cmpl $0x9, %r8d
jge 0x1c54881
vmovaps %ymm21, 0x240(%rsp)
testb $0x1, %al
jne 0x1c55586
vmovaps 0x6e0(%rsp), %ymm0
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne %sil
jne 0x1c5390d
jmp 0x1c55586
vmovaps %ymm8, 0x1a0(%rsp)
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x220(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x200(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x320(%rsp)
vmovss 0x297e57(%rip), %xmm0 # 0x1eec714
vdivss 0x170(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x300(%rsp)
movl %r11d, 0x100(%rsp)
movl %eax, %r11d
movq 0xc8(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x170(%rsp)
movl %r11d, %eax
movl 0x100(%rsp), %r11d
vpbroadcastd %r11d, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
movl $0x8, %r11d
vmovaps %ymm5, 0x80(%rsp)
vmovaps %ymm2, 0x60(%rsp)
movl %r8d, 0x24(%rsp)
movl %eax, 0x20(%rsp)
vpbroadcastd %r11d, %ymm0
vpor 0x305fe4(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x220(%rsp), %ymm0, %k1
movq %rcx, %rax
leaq (%rbx,%rdx), %rcx
vmovups (%rcx,%r11,4), %ymm3
vmovups 0x484(%rcx,%r11,4), %ymm10
vmovups 0x908(%rcx,%r11,4), %ymm11
vmovups 0xd8c(%rcx,%r11,4), %ymm12
vmovaps 0x680(%rsp), %ymm28
vmulps %ymm12, %ymm28, %ymm5
vmovaps 0x660(%rsp), %ymm29
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x5e0(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x6c0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x6a0(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x600(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x560(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x620(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rax), %rax
vmovups (%rax,%r11,4), %ymm2
vmovups 0x484(%rax,%r11,4), %ymm13
vmovaps 0x640(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r11,4), %ymm14
vmovups 0xd8c(%rax,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c55480
vmovaps %ymm23, %ymm16
vmovaps 0x1a0(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x580(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x5a0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x5c0(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r11,4), %ymm12
vmovups 0x1694(%rcx,%r11,4), %ymm13
vmovups 0x1b18(%rcx,%r11,4), %ymm14
vmovups 0x1f9c(%rcx,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm27, %ymm25
vmovaps %ymm19, %ymm27
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r11,4), %ymm13
vmovups 0x1b18(%rax,%r11,4), %ymm14
vmovups 0x1f9c(%rax,%r11,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r11,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x560(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2cc2a7(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x200(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2cc233(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm26, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm26
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x297a6a(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x297a48(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm26, %ymm14, %ymm13
vfmadd213ps %ymm26, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm26
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm26, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm26, %ymm6 # ymm6 = (ymm26 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm26, %ymm6 # ymm6 = (ymm26 * ymm13) + ymm6
vcmpleps %ymm26, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm26, %ymm3 # ymm3 = (ymm26 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm26, %ymm4 # ymm4 = (ymm26 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm26, %ymm4 # ymm4 = (ymm26 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm26, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c554d9
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm26, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm26) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x297857(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x320(%rsp), %ymm2, %k1
vcmpleps 0x80(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
leaq 0x4d9053(%rip), %rcx # 0x212df48
je 0x1c554e4
vcmpneqps %ymm26, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm17
je 0x1c55508
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2977e1(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x500(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
testw %ax, %ax
je 0x1c554ab
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c554a4
vbroadcastss 0x29ba28(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x500(%rsp), %ymm1
vfmadd132ps 0x29c041(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x3e0(%rsp)
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm1, 0x400(%rsp)
vmovaps %ymm21, 0x420(%rsp)
movl %r11d, 0x440(%rsp)
movl %r8d, 0x444(%rsp)
vmovaps %xmm7, 0x450(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x460(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x470(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x480(%rsp)
movb %al, 0x490(%rsp)
movq 0x28(%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0xc8(%rsp), %r8
movq (%rcx,%r8,8), %r8
movl 0x90(%r15,%r14,4), %ecx
movq %r8, 0x100(%rsp)
testl %ecx, 0x34(%r8)
je 0x1c554b1
movq 0x28(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
leaq 0x4d8ecf(%rip), %rcx # 0x212df48
jne 0x1c55091
movq 0x100(%rsp), %r8
cmpq $0x0, 0x48(%r8)
movb $0x1, %r8b
je 0x1c554bb
movq %r10, 0x48(%rsp)
movb %sil, 0x1f(%rsp)
movq %rdi, 0x50(%rsp)
movq %r9, 0x58(%rsp)
vmovaps %ymm20, 0x260(%rsp)
vaddps 0x2cbe89(%rip), %ymm20, %ymm0 # 0x1f20f40
movq %r11, 0x1f8(%rsp)
vcvtsi2ss %r11d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x300(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps %ymm21, 0x240(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
movzbl %al, %edi
vmovaps 0x460(%rsp), %xmm0
vmovaps %xmm0, 0x1d0(%rsp)
tzcntq %rdi, %r8
vmovaps 0x470(%rsp), %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmovaps 0x480(%rsp), %xmm0
vmovaps %xmm0, 0x550(%rsp)
movb $0x1, %dl
vmovss 0x80(%r15,%r14,4), %xmm8
vmovss 0x4e0(%rsp,%r8,4), %xmm0
vbroadcastss 0x4a0(%rsp,%r8,4), %xmm1
vbroadcastss 0x4c0(%rsp,%r8,4), %xmm2
vmovss %xmm0, 0x80(%r15,%r14,4)
movq 0x28(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x29758f(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2cbd2d(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x2979e9(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm1, %xmm5
vfnmsub213ss %xmm4, %xmm1, %xmm5 # xmm5 = -(xmm1 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmulss %xmm1, %xmm1, %xmm4
vmovss 0x2979c3(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x550(%rsp), %xmm4, %xmm4
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x120(%rsp), %xmm4, %xmm0 # xmm0 = (xmm0 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x1d0(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm0) + xmm4
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x350(%rsp)
vmovaps %xmm4, 0x360(%rsp)
vmovaps %xmm0, 0x370(%rsp)
vmovaps %xmm1, 0x380(%rsp)
vmovaps %xmm2, 0x390(%rsp)
vmovaps 0x1e0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovdqa 0x170(%rsp), %xmm0
vmovdqa %xmm0, 0x3b0(%rsp)
vpcmpeqd %ymm0, %ymm0, %ymm0
leaq 0x3c0(%rsp), %rcx
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vmovaps 0x2c0(%rsp), %xmm0
vmovaps %xmm0, 0xb0(%rsp)
leaq 0xb0(%rsp), %rcx
movq %rcx, 0xd0(%rsp)
movq 0x100(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0xd8(%rsp)
movq %rax, 0xe0(%rsp)
movq %r15, 0xe8(%rsp)
leaq 0x350(%rsp), %rax
movq %rax, 0xf0(%rsp)
movl $0x4, 0xf8(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movl %edx, 0x160(%rsp)
movq %rdi, 0x150(%rsp)
movq %r8, 0x140(%rsp)
vmovss %xmm8, 0x130(%rsp)
je 0x1c5534d
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x130(%rsp), %xmm8
movq 0x140(%rsp), %r8
movq 0x150(%rsp), %rdi
movl 0x160(%rsp), %edx
vmovaps 0x30(%rsp), %xmm7
vmovdqa 0xb0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c553f5
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c553be
testb $0x2, (%rcx)
jne 0x1c5538b
movq 0x100(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c553be
leaq 0xd0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x130(%rsp), %xmm8
movq 0x140(%rsp), %r8
movq 0x150(%rsp), %rdi
movl 0x160(%rsp), %edx
vmovaps 0x30(%rsp), %xmm7
vmovdqa 0xb0(%rsp), %xmm0
movq 0xe8(%rsp), %rax
vmovaps 0x80(%rax), %xmm1
vptestmd %xmm0, %xmm0, %k1
vbroadcastss 0x29779d(%rip), %xmm1 {%k1} # 0x1eecb84
vmovaps %xmm1, 0x80(%rax)
kortestb %k1, %k1
jne 0x1c55411
vmovss %xmm8, 0x80(%r15,%r14,4)
btcq %r8, %rdi
tzcntq %rdi, %r8
setae %dl
jae 0x1c55142
andb $0x1, %dl
movq 0x58(%rsp), %r9
movq 0x50(%rsp), %rdi
movb 0x1f(%rsp), %sil
movq 0x48(%rsp), %r10
movl %edx, %r8d
leaq 0x4d66f6(%rip), %rdx # 0x212bb28
leaq 0x4d8b0f(%rip), %rcx # 0x212df48
vxorps %xmm26, %xmm26, %xmm26
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
movq 0x1f8(%rsp), %r11
jmp 0x1c554bb
leaq 0x4d8ac1(%rip), %rcx # 0x212df48
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
jmp 0x1c554ab
leaq 0x4d8a9d(%rip), %rcx # 0x212df48
movl 0x20(%rsp), %eax
jmp 0x1c554c7
xorl %r8d, %r8d
leaq 0x4d8a8d(%rip), %rcx # 0x212df48
movl 0x20(%rsp), %eax
orb %r8b, %al
movl 0x24(%rsp), %r8d
addq $0x8, %r11
cmpl %r11d, %r8d
jg 0x1c5492a
jmp 0x1c54849
xorl %eax, %eax
leaq 0x4d8a66(%rip), %rcx # 0x212df48
jmp 0x1c554e6
xorl %eax, %eax
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x180(%rsp), %ymm17
jmp 0x1c54f6a
xorl %eax, %eax
jmp 0x1c54f5b
xorl %eax, %eax
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm8
vmovaps 0x200(%rsp), %xmm9
jmp 0x1c54203
xorl %eax, %eax
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x30(%rsp), %xmm7
vmovaps 0x1a0(%rsp), %ymm8
jmp 0x1c54203
xorl %eax, %eax
vmovaps 0x80(%rsp), %ymm5
vmovaps 0x60(%rsp), %ymm2
jmp 0x1c54203
movl 0x20(%rsp), %r11d
andb $0x1, %r11b
jmp 0x1c54831
andb $0x1, %sil
movl %esi, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 8>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BSplineCurveT, 8, 8>, embree::avx512::Intersect1KEpilogMU<8, 8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x7a0, %rsp # imm = 0x7A0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0x98(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x80(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2bd053(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2cb821(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2cb796(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x29b8a8(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x296f96(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2ca6a2(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x100(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2ca67d(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x30507f(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x760(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c579c4
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0x98(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x20, %rdi
leaq 0x500(%rsp), %rax
leaq 0xe0(%rax), %rax
movq %rax, 0xd8(%rsp)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
leaq 0x4d6202(%rip), %r13 # 0x212bb28
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
vmovaps %ymm21, 0x180(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x4(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x28(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c559a9
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c559a9
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm26
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x240(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x20(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm26, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2bcc5c(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4d8422(%rip), %rdx # 0x212df48
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0xe0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x4a0(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm27
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm27, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm26, %xmm26, %xmm0 # xmm0 = xmm26[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x6a0(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x6c0(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x6e0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x700(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm27, 0x2c0(%rsp)
vfmadd231ps %ymm27, %ymm27, %ymm21 # ymm21 = (ymm27 * ymm27) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x30(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2cb236(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x130(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm26, %xmm3
vmovaps %xmm26, 0x120(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x110(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x2e0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2cb225(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x29b2bf(%rip), %xmm3, %xmm26 # 0x1ef0fe4
vbroadcastss 0x2cb1ae(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0x60(%r12,%r15,4), %xmm9
vmovaps %ymm31, 0x740(%rsp)
vmovaps %ymm30, 0x720(%rsp)
vmovaps %ymm20, 0x680(%rsp)
vmovaps %ymm21, 0x660(%rsp)
vmovaps %ymm22, 0x640(%rsp)
vmovaps %ymm3, 0x620(%rsp)
je 0x1c56530
vmovaps %xmm9, 0x220(%rsp)
vmovaps %xmm26, 0x80(%rsp)
vmovaps %ymm3, %ymm26
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm26, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm26, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0xe0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x4a0(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x160(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x140(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm26, %ymm13
vmovaps 0x80(%rsp), %xmm26
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2cafda(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm26, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x260(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2caf60(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm27, %xmm27, %xmm27
vfmadd213ps %ymm27, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm27
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x296793(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x296771(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm27, %ymm9, %ymm13
vfmadd213ps %ymm27, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm27
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm27, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm13) + ymm3
vcmpleps %ymm27, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x180(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
je 0x1c5746d
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x29656d(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
je 0x1c574a0
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm8
je 0x1c574ba
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2964ea(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x480(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c56527
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x98(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c56527
vbroadcastss 0x29a73a(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x480(%rsp), %ymm1
vfmadd132ps 0x29ad53(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x300(%rsp)
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl $0x0, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm8, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %r13b, 0x3b0(%rsp)
movl 0x120(%r12,%r15,4), %eax
movq 0x240(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c56527
movl %ecx, (%rsp)
vaddps 0x2cabf9(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x2963c5(%rip), %xmm1 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x480(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x29568d(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
jne 0x1c574ce
movq 0x240(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c574ce
vmovss 0x3c0(%rsp,%r11,4), %xmm0
vmovss 0x3e0(%rsp,%r11,4), %xmm1
vmovss 0x2962f8(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2caa96(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x296752(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29672c(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x110(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vmovaps 0x60(%rsp), %ymm6
vfmadd132ps 0x130(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm4
vmovss 0x400(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x100(%r12,%r15,4)
vmovaps 0x40(%rsp), %ymm3
vmovss %xmm2, 0x180(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x1a0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x1c0(%r12,%r15,4)
vmovss %xmm0, 0x1e0(%r12,%r15,4)
vmovss %xmm1, 0x200(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x220(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x240(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%r15,4)
movl (%rsp), %ecx
leaq 0x4d55fa(%rip), %r13 # 0x212bb28
jmp 0x1c5656a
vxorps %xmm27, %xmm27, %xmm27
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0xe0(%rsp), %ymm17
cmpl $0x9, %ecx
jge 0x1c56593
vmovaps 0x760(%rsp), %ymm0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c55926
jmp 0x1c579c4
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm26, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovss 0x29614d(%rip), %xmm0 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x2e0(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm3, 0x40(%rsp)
movl %ecx, (%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x3042f8(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x6a0(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x740(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x720(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm26) + ymm4
vmovaps 0x6c0(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x4a0(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x6e0(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x700(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm26, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm26) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c56f21
vmovaps %ymm23, %ymm16
vmovaps 0x620(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x640(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x660(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x680(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm26, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm26) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm26, %ymm25
vmovaps %ymm19, %ymm26
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x4a0(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2ca5ce(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2ca55a(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm27, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm27
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x295d91(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x295d6f(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm27, %ymm14, %ymm13
vfmadd213ps %ymm27, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm27
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm27, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm13) + ymm6
vcmpleps %ymm27, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c56f4e
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x295b7e(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x240(%rsp), %ymm2, %k1
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl (%rsp), %ecx
je 0x1c56f68
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x30(%rsp), %xmm8
vmovaps 0xe0(%rsp), %ymm17
je 0x1c56f8a
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x295b0c(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c56f3c
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x98(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c56f3c
movl %r11d, %eax
vbroadcastss 0x299d48(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x29a361(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x300(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm21, 0x340(%rsp)
movl %r9d, 0x360(%rsp)
movl %ecx, 0x364(%rsp)
vmovaps %xmm8, 0x370(%rsp)
vmovaps 0x130(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0x120(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovdqa 0x110(%rsp), %xmm0
vmovdqa %xmm0, 0x3a0(%rsp)
movb %al, 0x3b0(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x28(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x120(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c56f15
vaddps 0x2ca201(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x220(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm21, 0x400(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x294c98(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0x80(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0x80(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c56f92
cmpq $0x0, 0x40(%rcx)
jne 0x1c56f92
vmovss 0x3c0(%rsp,%r13,4), %xmm0
vmovss 0x3e0(%rsp,%r13,4), %xmm1
vmovss 0x295907(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2ca0a5(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x295d61(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x295d3b(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x110(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x120(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x130(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm4
vmovss 0x400(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x100(%r12,%r15,4)
vmovss %xmm2, 0x180(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x1a0(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x1c0(%r12,%r15,4)
vmovss %xmm0, 0x1e0(%r12,%r15,4)
vmovss %xmm1, 0x200(%r12,%r15,4)
movl 0x4(%rsp), %eax
movl %eax, 0x220(%r12,%r15,4)
movq 0x28(%rsp), %rax
movl %eax, 0x240(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
leaq 0x4d4c0c(%rip), %r13 # 0x212bb28
movl (%rsp), %ecx
jmp 0x1c56f3c
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
movl (%rsp), %ecx
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c5661a
jmp 0x1c5656f
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm8
movl (%rsp), %ecx
jmp 0x1c56f7d
xorl %r11d, %r11d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm8
vmovaps 0xe0(%rsp), %ymm17
jmp 0x1c56c3d
xorl %r11d, %r11d
jmp 0x1c56c31
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x4d0(%rsp)
vmovaps %ymm20, 0x1a0(%rsp)
vmovaps %ymm21, 0x180(%rsp)
movq %r9, 0xd0(%rsp)
movl %r11d, %eax
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovaps %ymm28, 0x160(%rsp)
vmovaps %ymm29, 0x140(%rsp)
movl %eax, 0x420(%rsp)
vmovss 0x100(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x210(%rsp)
vmovss 0x400(%rsp,%r13,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r13,4), %ymm1
vbroadcastss 0x3e0(%rsp,%r13,4), %ymm2
vmovss %xmm0, 0x100(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2956bf(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c9e5d(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x295b19(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x295af3(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4d0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x200(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2bb623(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2c9ded(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm4, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps %ymm2, 0x580(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x2e0(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq %rcx, %r11
movq 0xd8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %r12, 0xb8(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x8, 0xc8(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c57250
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d6d0c(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c573be
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c57318
testb $0x2, (%rcx)
jne 0x1c57298
movq 0x80(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c57318
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xd0(%rsp), %r9
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d6c44(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c573be
vptestmd %ymm0, %ymm0, %k1
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c573d1
vmovd 0x210(%rsp), %xmm0
vmovd %xmm0, 0x100(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x420(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c57458
kmovd %eax, %k1
vbroadcastss 0x29460d(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0x80(%rsp), %rcx
jne 0x1c57007
jmp 0x1c56f15
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm8
vmovaps 0x160(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x220(%rsp), %xmm9
jmp 0x1c5624f
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x30(%rsp), %xmm8
jmp 0x1c5624f
xorl %r13d, %r13d
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
jmp 0x1c5624f
movq 0x28(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movl 0x4(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x420(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r8, 0x20(%rsp)
movq %r10, 0x18(%rsp)
movq %rsi, 0x10(%rsp)
movq %rdi, 0x8(%rsp)
vmovss 0x100(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x400(%rsp,%r11,4), %xmm0
vbroadcastss 0x3c0(%rsp,%r11,4), %ymm1
vbroadcastss 0x3e0(%rsp,%r11,4), %ymm2
vmovss %xmm0, 0x100(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x29518e(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c992c(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x2955e8(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x2955c2(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x200(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x210(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2bb0f2(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2c98bc(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm4, 0x520(%rsp)
vmovaps %ymm0, 0x540(%rsp)
vmovaps %ymm1, 0x560(%rsp)
vmovaps %ymm2, 0x580(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovdqa 0x440(%rsp), %ymm0
vmovdqa %ymm0, 0x5c0(%rsp)
movq 0xd8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x600(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rcx
movq %rcx, 0xa0(%rsp)
movq 0x240(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xa8(%rsp)
movq %rax, 0xb0(%rsp)
movq %r12, 0xb8(%rsp)
leaq 0x500(%rsp), %rax
movq %rax, 0xc0(%rsp)
movl $0x8, 0xc8(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x260(%rsp)
je 0x1c577a3
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d67b9(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm3
je 0x1c57922
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c5787c
testb $0x2, (%rcx)
jne 0x1c577eb
movq 0x240(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5787c
leaq 0xa0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm26
vmovaps 0x140(%rsp), %ymm29
vmovaps 0x160(%rsp), %ymm28
vmovaps 0xe0(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x30(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d66e0(%rip), %rdx # 0x212df48
movq 0x8(%rsp), %rdi
movq 0x10(%rsp), %rsi
movq 0x18(%rsp), %r10
movq 0x20(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c57922
vptestmd %ymm0, %ymm0, %k1
movq 0xb8(%rsp), %rax
movq 0xc0(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c57935
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x100(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x100(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c579b3
kmovd %r13d, %k1
vbroadcastss 0x2940ac(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl (%rsp), %ecx
jne 0x1c5753f
jmp 0x1c56527
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNvIntersectorK<8, 8>::occluded_t<embree::avx512::RibbonCurve1IntersectorK<embree::BSplineCurveT, 8, 8>, embree::avx512::Occluded1KEpilogMU<8, 8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline bool occluded_t(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x800, %rsp # imm = 0x800
movq %r8, %r9
movq %rdx, %r14
movq %rsi, %r15
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rsi
leaq (%rsi,%rsi,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%r15,%r14,4), %xmm1
vmovss 0x80(%r15,%r14,4), %xmm2
vinsertps $0x10, 0x20(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r15,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x8(%rsp)
vinsertps $0x20, 0xc0(%r15,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rdx,%rdx,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rdx,%rsi,2), %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %r10
leal (%r10,%r10), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
addq %rdx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2bac1d(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2c93e6(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2c9355(%rip), %ymm7 # 0x1f20ec4
vandps %ymm7, %ymm4, %ymm5
vbroadcastss 0x29946c(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x294b5a(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r8
subq %rdx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%r10), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rdx,%rdx), %r10
addq %rdx, %rsi
shlq $0x3, %rcx
subq %rdx, %rcx
movl %edx, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r9,%r8), %ymm6
subq %r10, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rsi), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r15,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2c826a(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r15,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2c8245(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %edx, %ymm1
vpcmpgtd 0x302c41(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x7c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %sil
je 0x1c59a3b
leaq (%r9,%rax), %r12
addq $0x6, %r12
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
addq $0x10, %r12
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r10
addq $0x20, %r10
leaq 0x580(%rsp), %rax
leaq 0xe0(%rax), %rax
movq %rax, 0x88(%rsp)
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x560(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
vmovaps %ymm21, 0x220(%rsp)
vmovaps %ymm20, 0x240(%rsp)
tzcntq %r13, %rax
blsrq %r13, %r13
movl 0x6(%r9,%rax,4), %r11d
shll $0x6, %eax
movq %r13, %rcx
movl 0x2(%r9), %r8d
movq 0x8(%rsp), %rdx
movq (%rdx), %rdx
movq 0x1e8(%rdx), %rdx
movq %r8, 0x80(%rsp)
movq (%rdx,%r8,8), %r8
vmovups (%r12,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c57de4
andq %r13, %rcx
tzcntq %r13, %rdx
shll $0x6, %edx
prefetcht0 (%r12,%rdx)
prefetcht0 0x40(%r12,%rdx)
testq %rcx, %rcx
je 0x1c57de4
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%r12,%rcx)
prefetcht1 0x40(%r12,%rcx)
vmovups 0x10(%r12,%rax), %xmm13
vmovups 0x20(%r12,%rax), %xmm27
vmovups 0x30(%r12,%rax), %xmm23
movq %r8, 0x200(%rsp)
movl 0x248(%r8), %r8d
vmovss (%r15,%r14,4), %xmm0
vinsertps $0x1c, 0x20(%r15,%r14,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r15,%r14,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r10), %xmm4
vmovaps 0x10(%r10), %xmm5
vmovaps 0x20(%r10), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm27, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %r8d, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
leaq 0x4d3c51(%rip), %rdx # 0x212bb28
vmovups (%rdx,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2ba818(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%rdx,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%rdx,%rbx), %ymm15
vbroadcastss %xmm10, %ymm29
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%rdx,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm26
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm26, %ymm4
vfmadd231ps %ymm29, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm29) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4d5fe0(%rip), %rcx # 0x212df48
vmovups (%rcx,%rbx), %ymm2
vmovups 0x484(%rcx,%rbx), %ymm17
vmovups 0x908(%rcx,%rbx), %ymm18
vmovups 0xd8c(%rcx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm26, %ymm6
vfmadd231ps %ymm29, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm29) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x140(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x520(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm31, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm27, %xmm27, %xmm0 # xmm0 = xmm27[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x700(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x720(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x740(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x760(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0xc0(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm31, 0x120(%rsp)
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x10(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2c8df4(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x2f0(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm27, %xmm3
vmovaps %xmm27, 0x2e0(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x2d0(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %r8d, %xmm23, %xmm12
vmovaps %xmm12, 0x380(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2c8de3(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x298e7f(%rip), %xmm3, %xmm12 # 0x1ef0fe4
vbroadcastss 0x2c8d6e(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm8
kortestb %k1, %k1
vmovss 0x60(%r15,%r14,4), %xmm9
vmovaps %ymm29, 0x7a0(%rsp)
vmovaps %ymm30, 0x780(%rsp)
vmovaps %ymm20, 0x6e0(%rsp)
vmovaps %ymm21, 0x6c0(%rsp)
vmovaps %ymm22, 0x6a0(%rsp)
je 0x1c58c6c
vmovaps %xmm9, 0x1a0(%rsp)
vmulps %ymm19, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm8, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%rdx,%rbx), %ymm3
vmovups 0x1694(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovaps %xmm12, %xmm16
vmovups 0x1f9c(%rdx,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmovaps %ymm8, %ymm15
vmulps %ymm12, %ymm26, %ymm8
vmulps %ymm12, %ymm15, %ymm12
vfmadd231ps %ymm29, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm29) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x140(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x520(%rsp), %ymm23
vfmadd231ps %ymm23, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm23) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rcx,%rbx), %ymm10
vmovups 0x1b18(%rcx,%rbx), %ymm11
vmovups 0x1f9c(%rcx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x1e0(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm26, 0x1c0(%rsp)
vmulps %ymm13, %ymm26, %ymm14
vmovaps %ymm15, 0x260(%rsp)
vmulps %ymm13, %ymm15, %ymm13
vfmadd231ps %ymm29, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm29) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rcx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm23, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm23) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2c8baa(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm16, %ymm11
vmovaps %xmm16, %xmm26
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x120(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0xc0(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2c8b2a(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm24, %xmm24, %xmm24
vfmadd213ps %ymm24, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm24
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x29435d(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x29433b(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm24, %ymm9, %ymm13
vfmadd213ps %ymm24, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm24
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm24, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm13) + ymm3
vcmpleps %ymm24, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x220(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
vmovaps %xmm26, %xmm14
je 0x1c599cc
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x294131(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x1a0(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
je 0x1c599fe
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm8
je 0x1c59a17
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2940ae(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x500(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c58cb5
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c58cb9
movl %r11d, 0xc0(%rsp)
vbroadcastss 0x298305(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x500(%rsp), %ymm1
vfmadd132ps 0x29891e(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x3a0(%rsp)
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm2, 0x3e0(%rsp)
movl $0x0, 0x400(%rsp)
movl %r8d, 0x404(%rsp)
vmovaps %xmm8, 0x410(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
movb %al, 0x450(%rsp)
movl 0x120(%r15,%r14,4), %ecx
movq 0x200(%rsp), %r11
testl %ecx, 0x34(%r11)
je 0x1c58ccd
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c58797
movb $0x1, %r11b
movq 0x200(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
je 0x1c58cd0
vaddps 0x2c87a1(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x293f6d(%rip), %xmm1 # 0x1eec714
vdivss 0x380(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps %ymm2, 0x4a0(%rsp)
movzbl %al, %ecx
tzcntq %rcx, %r11
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x320(%rsp)
vmovaps 0x420(%rsp), %xmm0
vmovaps %xmm0, 0x110(%rsp)
movl 0xc0(%rsp), %eax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x300(%rsp)
movq %r11, %rax
vmovaps 0x430(%rsp), %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps 0x440(%rsp), %xmm0
vmovaps %xmm0, 0x180(%rsp)
movb $0x1, %r11b
vmovaps %xmm14, 0xf0(%rsp)
movq %r9, 0x38(%rsp)
movq %rdi, 0x30(%rsp)
movb %sil, 0x3(%rsp)
movq %r10, 0x28(%rsp)
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movq %rcx, 0x360(%rsp)
movl %r11d, 0x120(%rsp)
vmovss 0x100(%r15,%r14,4), %xmm10
vmovss 0x4a0(%rsp,%rax,4), %xmm0
vbroadcastss 0x460(%rsp,%rax,4), %ymm1
movq %rax, 0x4c0(%rsp)
vbroadcastss 0x480(%rsp,%rax,4), %ymm2
vmovss %xmm0, 0x100(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x293e3e(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c85dc(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x294298(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x294272(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x180(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x100(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x110(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2b9da2(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2c856c(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x580(%rsp)
vmovaps %ymm4, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm1, 0x5e0(%rsp)
vmovaps %ymm2, 0x600(%rsp)
vmovaps 0x300(%rsp), %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovdqa 0x320(%rsp), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x660(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x160(%rsp)
leaq 0x160(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0x200(%rsp), %r11
movq 0x18(%r11), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x8, 0xb8(%rsp)
movq 0x48(%r11), %rax
testq %rax, %rax
movq %rdi, %r11
vmovss %xmm10, 0x340(%rsp)
je 0x1c58afe
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x240(%rsp), %ymm20
vxorps %xmm24, %xmm24, %xmm24
leaq 0x4d303e(%rip), %rdx # 0x212bb28
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
vmovdqa 0x160(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c58c2b
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
je 0x1c58be3
testb $0x2, (%rcx)
jne 0x1c58b4b
movq 0x200(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c58be3
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x340(%rsp), %xmm10
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
movl 0x4(%rsp), %r8d
vmovaps 0x10(%rsp), %xmm8
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm6
vxorps %xmm24, %xmm24, %xmm24
leaq 0x4d2f59(%rip), %rdx # 0x212bb28
movq 0x28(%rsp), %r10
movb 0x3(%rsp), %sil
movq 0x30(%rsp), %r11
movq 0x38(%rsp), %r9
movq %r11, %rdi
vmovdqa 0x160(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0xa8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x293f75(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
movq 0x360(%rsp), %rcx
je 0x1c58c42
jmp 0x1c59a2a
movq %r11, %rdi
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
movq 0x360(%rsp), %rcx
vmovss %xmm10, 0x100(%r15,%r14,4)
movq 0x4c0(%rsp), %rax
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %r11b
jae 0x1c5887c
jmp 0x1c59a32
xorl %eax, %eax
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps %ymm26, %ymm29
vmovaps %xmm12, %xmm14
jmp 0x1c58ceb
xorl %eax, %eax
jmp 0x1c58cc2
xorl %eax, %eax
leaq 0x4d5286(%rip), %rcx # 0x212df48
vmovaps 0x260(%rsp), %ymm8
jmp 0x1c58ceb
xorl %r11d, %r11d
leaq 0x4d5271(%rip), %rcx # 0x212df48
vmovaps 0x260(%rsp), %ymm8
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
cmpl $0x9, %r8d
jge 0x1c58d21
testb $0x1, %al
jne 0x1c59a3b
vmovaps 0x7c0(%rsp), %ymm0
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne %sil
jne 0x1c57d59
jmp 0x1c59a3b
vmovaps %ymm8, 0x260(%rsp)
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x200(%rsp)
vbroadcastss %xmm14, %ymm0
vmovaps %ymm0, 0x1a0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x360(%rsp)
vmovss 0x2939b7(%rip), %xmm0 # 0x1eec714
vdivss 0x380(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x340(%rsp)
movl %r11d, 0xc0(%rsp)
movl %eax, %r11d
movq 0x80(%rsp), %rax
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
movl %r11d, %eax
movl 0xc0(%rsp), %r11d
vpbroadcastd %r11d, %ymm0
vmovdqa %ymm0, 0x4c0(%rsp)
movl $0x8, %r11d
vmovaps %ymm6, 0x60(%rsp)
vmovaps %ymm2, 0x40(%rsp)
movl %r8d, 0x4(%rsp)
movl %eax, 0x120(%rsp)
vpbroadcastd %r11d, %ymm0
vpor 0x301b44(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x200(%rsp), %ymm0, %k1
movq %rcx, %rax
leaq (%rbx,%rdx), %rcx
vmovups (%rcx,%r11,4), %ymm3
vmovups 0x484(%rcx,%r11,4), %ymm10
vmovups 0x908(%rcx,%r11,4), %ymm11
vmovups 0xd8c(%rcx,%r11,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x700(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x7a0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x780(%rsp), %ymm27
vfmadd231ps %ymm27, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm27) + ymm4
vmovaps 0x720(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x520(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x740(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rax), %rax
vmovups (%rax,%r11,4), %ymm2
vmovups 0x484(%rax,%r11,4), %ymm13
vmovaps 0x760(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r11,4), %ymm14
vmovups 0xd8c(%rax,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm27, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm27) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm26
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c5993d
vmovaps %ymm23, %ymm16
vmovaps 0x260(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x6a0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x6c0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x6e0(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r11,4), %ymm12
vmovups 0x1694(%rcx,%r11,4), %ymm13
vmovups 0x1b18(%rcx,%r11,4), %ymm14
vmovups 0x1f9c(%rcx,%r11,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm27, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm27) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm26, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm26) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm27, %ymm25
vmovaps %ymm19, %ymm27
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r11,4), %ymm13
vmovups 0x1b18(%rax,%r11,4), %ymm14
vmovups 0x1f9c(%rax,%r11,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm27, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm27) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r11,4), %ymm14
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps 0x520(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2c7e17(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x1a0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2c7da3(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm24, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm24
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2935da(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2935b8(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm24, %ymm14, %ymm13
vfmadd213ps %ymm24, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm24
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm24, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm24, %ymm6 # ymm6 = (ymm24 * ymm13) + ymm6
vcmpleps %ymm24, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm24, %ymm3 # ymm3 = (ymm24 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm24, %ymm4 # ymm4 = (ymm24 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm24, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c59999
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm24, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm24) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2933c7(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x360(%rsp), %ymm2, %k1
vcmpleps 0x100(%r15,%r14,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
leaq 0x4d4bc3(%rip), %rcx # 0x212df48
je 0x1c599a4
vcmpneqps %ymm24, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x10(%rsp), %xmm8
vmovaps 0x140(%rsp), %ymm17
je 0x1c599c5
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x293351(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4e0(%rsp)
movzbl %al, %eax
vmovaps %ymm2, %ymm21
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
testw %ax, %ax
je 0x1c59965
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%rdi,%r14,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c5995e
vbroadcastss 0x29759b(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4e0(%rsp), %ymm1
vfmadd132ps 0x297bb4(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x3a0(%rsp)
vmovaps %ymm1, 0x4e0(%rsp)
vmovaps %ymm1, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
movl %r11d, 0x400(%rsp)
movl %r8d, 0x404(%rsp)
vmovaps %xmm8, 0x410(%rsp)
vmovaps 0x2f0(%rsp), %xmm0
vmovaps %xmm0, 0x420(%rsp)
vmovaps 0x2e0(%rsp), %xmm0
vmovaps %xmm0, 0x430(%rsp)
vmovdqa 0x2d0(%rsp), %xmm0
vmovdqa %xmm0, 0x440(%rsp)
movb %al, 0x450(%rsp)
movq 0x8(%rsp), %rcx
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x80(%rsp), %r8
movq (%rcx,%r8,8), %r8
movl 0x120(%r15,%r14,4), %ecx
movq %r8, 0xc0(%rsp)
testl %ecx, 0x34(%r8)
je 0x1c5996e
movq 0x8(%rsp), %rcx
movq 0x10(%rcx), %rcx
cmpq $0x0, 0x10(%rcx)
leaq 0x4d4a42(%rip), %rcx # 0x212df48
jne 0x1c5951e
movq 0xc0(%rsp), %r8
cmpq $0x0, 0x48(%r8)
movb $0x1, %r8b
je 0x1c59978
vmovaps %ymm29, 0x1c0(%rsp)
vmovaps %ymm28, 0x1e0(%rsp)
movq %r10, 0x28(%rsp)
movb %sil, 0x3(%rsp)
movq %rdi, 0x30(%rsp)
movq %r9, 0x38(%rsp)
vmovaps %ymm20, 0x240(%rsp)
vaddps 0x2c79ec(%rip), %ymm20, %ymm0 # 0x1f20f40
movq %r11, 0x198(%rsp)
vcvtsi2ss %r11d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x340(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x460(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovaps %ymm21, 0x220(%rsp)
vmovaps %ymm21, 0x4a0(%rsp)
movzbl %al, %edi
vmovaps 0x420(%rsp), %xmm0
vmovaps %xmm0, 0x180(%rsp)
tzcntq %rdi, %r8
vmovaps 0x430(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps 0x440(%rsp), %xmm0
vmovaps %xmm0, 0x550(%rsp)
movb $0x1, %dl
vmovss 0x100(%r15,%r14,4), %xmm9
vmovss 0x4a0(%rsp,%r8,4), %xmm0
vbroadcastss 0x460(%rsp,%r8,4), %ymm1
vbroadcastss 0x480(%rsp,%r8,4), %ymm2
vmovss %xmm0, 0x100(%r15,%r14,4)
movq 0x8(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2930f2(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c7890(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x29354c(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x293526(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x550(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0xf0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x180(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2b9056(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm4
vbroadcastss 0x2c7820(%rip), %ymm5 # 0x1f20edc
vpermps %ymm0, %ymm5, %ymm0
vmovaps %ymm3, 0x580(%rsp)
vmovaps %ymm4, 0x5a0(%rsp)
vmovaps %ymm0, 0x5c0(%rsp)
vmovaps %ymm1, 0x5e0(%rsp)
vmovaps %ymm2, 0x600(%rsp)
vmovaps 0x4c0(%rsp), %ymm0
vmovaps %ymm0, 0x620(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x640(%rsp)
movq 0x88(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x660(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x680(%rsp)
vmovaps 0x560(%rsp), %ymm0
vmovaps %ymm0, 0x160(%rsp)
leaq 0x160(%rsp), %rcx
movq %rcx, 0x90(%rsp)
movq 0xc0(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0x98(%rsp)
movq %rax, 0xa0(%rsp)
movq %r15, 0xa8(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0xb0(%rsp)
movl $0x8, 0xb8(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movl %edx, 0x320(%rsp)
movq %rdi, 0x110(%rsp)
movq %r8, 0x300(%rsp)
vmovss %xmm9, 0x100(%rsp)
je 0x1c59801
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm9
movq 0x300(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x320(%rsp), %edx
vmovaps 0x10(%rsp), %xmm8
vmovdqa 0x160(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c598a5
movq 0x8(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c5986d
testb $0x2, (%rcx)
jne 0x1c5983a
movq 0xc0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5986d
leaq 0x90(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x100(%rsp), %xmm9
movq 0x300(%rsp), %r8
movq 0x110(%rsp), %rdi
movl 0x320(%rsp), %edx
vmovaps 0x10(%rsp), %xmm8
vmovdqa 0x160(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0xa8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2932ee(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
jne 0x1c598c1
vmovss %xmm9, 0x100(%r15,%r14,4)
btcq %r8, %rdi
tzcntq %rdi, %r8
setae %dl
jae 0x1c595df
andb $0x1, %dl
movq 0x38(%rsp), %r9
movq 0x30(%rsp), %rdi
movb 0x3(%rsp), %sil
movq 0x28(%rsp), %r10
movl %edx, %r8d
leaq 0x4d2246(%rip), %rdx # 0x212bb28
leaq 0x4d465f(%rip), %rcx # 0x212df48
vxorps %xmm24, %xmm24, %xmm24
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm17
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
movq 0x198(%rsp), %r11
jmp 0x1c59978
leaq 0x4d4604(%rip), %rcx # 0x212df48
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps %ymm23, %ymm18
vmovaps %ymm26, %ymm17
jmp 0x1c59965
leaq 0x4d45e3(%rip), %rcx # 0x212df48
movl 0x120(%rsp), %eax
jmp 0x1c59987
xorl %r8d, %r8d
leaq 0x4d45d0(%rip), %rcx # 0x212df48
movl 0x120(%rsp), %eax
orb %r8b, %al
movl 0x4(%rsp), %r8d
addq $0x8, %r11
cmpl %r11d, %r8d
jg 0x1c58dc7
jmp 0x1c58cf1
xorl %eax, %eax
leaq 0x4d45a6(%rip), %rcx # 0x212df48
jmp 0x1c599a6
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm8
vmovaps 0x140(%rsp), %ymm17
jmp 0x1c593f7
xorl %eax, %eax
jmp 0x1c593eb
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm28
vmovaps 0x1c0(%rsp), %ymm29
vmovaps 0x1a0(%rsp), %xmm9
jmp 0x1c58686
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x10(%rsp), %xmm8
jmp 0x1c58686
xorl %eax, %eax
vmovaps 0x60(%rsp), %ymm6
vmovaps 0x40(%rsp), %ymm2
jmp 0x1c58686
movl 0x120(%rsp), %r11d
andb $0x1, %r11b
jmp 0x1c58cd0
andb $0x1, %sil
movl %esi, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
void embree::avx512::CurveNvIntersectorK<8, 16>::intersect_t<embree::avx512::RibbonCurve1IntersectorK<embree::BSplineCurveT, 16, 8>, embree::avx512::Intersect1KEpilogMU<8, 16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayHitK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNv<8> const&)
|
static __forceinline void intersect_t(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = CurveNiIntersectorK<M,K>::intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = (CurveGeometry*) context->scene->get(geomID);
const Vec3ff a0 = Vec3ff::loadu(&prim.vertices(i,N)[0]);
const Vec3ff a1 = Vec3ff::loadu(&prim.vertices(i,N)[1]);
const Vec3ff a2 = Vec3ff::loadu(&prim.vertices(i,N)[2]);
const Vec3ff a3 = Vec3ff::loadu(&prim.vertices(i,N)[3]);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
prefetchL1(&prim.vertices(i1,N)[0]);
prefetchL1(&prim.vertices(i1,N)[4]);
if (mask1) {
const size_t i2 = bsf(mask1);
prefetchL2(&prim.vertices(i2,N)[0]);
prefetchL2(&prim.vertices(i2,N)[4]);
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x9c0, %rsp # imm = 0x9C0
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r12
movq %rdi, 0xb8(%rsp)
movzbl 0x1(%r8), %edx
leaq (%rdx,%rdx,4), %rcx
leaq (%rcx,%rcx,4), %rax
vbroadcastss 0x12(%r8,%rax), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x100(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rax), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rdx,4), %ymm1
vpmovsxbd 0x6(%r8,%rcx), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rdx,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rdx,%rcx,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rsi,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rdx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rdx,%rdx,8), %rdi
leal (%rdi,%rdi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rdx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2b8b9a(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2c7368(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2c72dd(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x2973ef(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x292add(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rdx,8), %r9
subq %rdx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rdi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rdx,%rdx), %rdi
addq %rdx, %rcx
shlq $0x3, %rsi
subq %rdx, %rsi
vpbroadcastd %edx, %ymm7
shll $0x4, %edx
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rdx
vpmovsxwd 0x6(%r8,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rsi), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc0(%r12,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2c61e9(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x200(%r12,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2c61c1(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x300bc3(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x6e0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c5bee1
leaq (%r8,%rax), %rsi
addq $0x6, %rsi
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
addq $0x10, %rsi
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0xb8(%rsp), %rcx
leaq (%rcx,%rax), %rdi
addq $0x40, %rdi
leaq 0x740(%rsp), %rax
leaq 0x1c0(%rax), %rax
movq %rax, 0xf8(%rsp)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x700(%rsp)
leaq 0x4d1d47(%rip), %r13 # 0x212bb28
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
tzcntq %r14, %rax
blsrq %r14, %r14
movl 0x6(%r8,%rax,4), %ecx
movl %ecx, 0x24(%rsp)
shll $0x6, %eax
movq %r14, %rcx
movl 0x2(%r8), %ebx
movq (%r10), %rdx
movq 0x1e8(%rdx), %rdx
movq %rbx, 0x48(%rsp)
movq (%rdx,%rbx,8), %rbx
vmovups (%rsi,%rax), %xmm24
subq $0x1, %rcx
jb 0x1c59e67
andq %r14, %rcx
tzcntq %r14, %rdx
shll $0x6, %edx
prefetcht0 (%rsi,%rdx)
prefetcht0 0x40(%rsi,%rdx)
testq %rcx, %rcx
je 0x1c59e67
tzcntq %rcx, %rcx
shll $0x6, %ecx
prefetcht1 (%rsi,%rcx)
prefetcht1 0x40(%rsi,%rcx)
vmovups 0x10(%rsi,%rax), %xmm13
vmovups 0x20(%rsi,%rax), %xmm26
vmovups 0x30(%rsi,%rax), %xmm23
movq %rbx, 0x240(%rsp)
movl 0x248(%rbx), %edx
vmovss (%r12,%r15,4), %xmm0
vinsertps $0x1c, 0x40(%r12,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r12,%r15,4), %xmm0, %xmm1 # xmm1 = xmm0[0,1],mem[0],zero
vsubps %xmm1, %xmm24, %xmm0
vbroadcastss %xmm0, %xmm2
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
vmulps %xmm0, %xmm6, %xmm8
vfmadd231ps %xmm3, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm3) + xmm8
vfmadd231ps %xmm2, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm2) + xmm8
vshufps $0xff, %xmm24, %xmm24, %xmm0 # xmm0 = xmm24[3,3,3,3]
vsubps %xmm1, %xmm13, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm9
vfmadd231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) + xmm9
vfmadd231ps %xmm3, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm3) + xmm9
vsubps %xmm1, %xmm26, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm10
vfmadd231ps %xmm7, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm7) + xmm10
vfmadd231ps %xmm3, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm3) + xmm10
vsubps %xmm1, %xmm23, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm11
vfmadd231ps %xmm3, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm3) + xmm11
vfmadd231ps %xmm2, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm2) + xmm11
movslq %edx, %rax
movq %rax, %rcx
shlq $0x6, %rcx
leaq (%rcx,%rax,4), %rbx
movl %edx, %ecx
vmovups (%r13,%rbx), %ymm25
vbroadcastss %xmm8, %ymm3
vbroadcastss 0x2b879b(%rip), %ymm1 # 0x1f12704
vpermps %ymm8, %ymm1, %ymm20
vmovups 0x484(%r13,%rbx), %ymm14
vbroadcastss %xmm9, %ymm21
vpermps %ymm9, %ymm1, %ymm12
vmovups 0x908(%r13,%rbx), %ymm15
vbroadcastss %xmm10, %ymm31
vpermps %ymm10, %ymm1, %ymm30
vmovups 0xd8c(%r13,%rbx), %ymm16
vbroadcastss %xmm11, %ymm28
vpermps %ymm11, %ymm1, %ymm29
vmulps %ymm16, %ymm28, %ymm5
vmulps %ymm16, %ymm29, %ymm4
vfmadd231ps %ymm31, %ymm15, %ymm5 # ymm5 = (ymm15 * ymm31) + ymm5
vfmadd231ps %ymm30, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm30) + ymm4
vfmadd231ps %ymm21, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm21) + ymm5
vfmadd231ps %ymm12, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm12) + ymm4
vfmadd231ps %ymm3, %ymm25, %ymm5 # ymm5 = (ymm25 * ymm3) + ymm5
vfmadd231ps %ymm20, %ymm25, %ymm4 # ymm4 = (ymm25 * ymm20) + ymm4
leaq 0x4d3f61(%rip), %rdx # 0x212df48
vmovups (%rdx,%rbx), %ymm2
vmovups 0x484(%rdx,%rbx), %ymm17
vmovups 0x908(%rdx,%rbx), %ymm18
vmovups 0xd8c(%rdx,%rbx), %ymm19
vmulps %ymm19, %ymm28, %ymm7
vmulps %ymm19, %ymm29, %ymm6
vfmadd231ps %ymm31, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm31) + ymm7
vfmadd231ps %ymm30, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm30) + ymm6
vmovaps %ymm21, 0x100(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm12, 0x480(%rsp)
vfmadd231ps %ymm12, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm12) + ymm6
vmovaps %ymm3, 0x2a0(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm3) + ymm7
vmovaps %ymm20, 0x280(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm27
vsubps %ymm4, %ymm6, %ymm12
vmulps %ymm27, %ymm4, %ymm1
vmulps %ymm12, %ymm5, %ymm20
vsubps %ymm20, %ymm1, %ymm20
vshufps $0xff, %xmm13, %xmm13, %xmm1 # xmm1 = xmm13[3,3,3,3]
vbroadcastsd %xmm0, %ymm3
vbroadcastsd %xmm1, %ymm21
vshufps $0xff, %xmm26, %xmm26, %xmm0 # xmm0 = xmm26[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vshufps $0xff, %xmm23, %xmm23, %xmm0 # xmm0 = xmm23[3,3,3,3]
vbroadcastsd %xmm0, %ymm1
vmulps %ymm16, %ymm1, %ymm0
vfmadd231ps %ymm22, %ymm15, %ymm0 # ymm0 = (ymm15 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm21) + ymm0
vfmadd231ps %ymm3, %ymm25, %ymm0 # ymm0 = (ymm25 * ymm3) + ymm0
vmovaps %ymm1, 0x620(%rsp)
vmulps %ymm19, %ymm1, %ymm1
vmovaps %ymm22, 0x640(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x660(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vmovaps %ymm3, 0x680(%rsp)
vfmadd231ps %ymm3, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm3) + ymm1
vmovaps %ymm12, 0x260(%rsp)
vmulps %ymm12, %ymm12, %ymm21
vmovaps %ymm27, 0x2c0(%rsp)
vfmadd231ps %ymm27, %ymm27, %ymm21 # ymm21 = (ymm27 * ymm27) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %xmm24, 0x50(%rsp)
vmovaps %xmm24, %xmm12
vblendps $0x8, %xmm12, %xmm8, %xmm12 # xmm12 = xmm8[0,1,2],xmm12[3]
vbroadcastss 0x2c6d75(%rip), %xmm20 # 0x1f20ec4
vandps %xmm20, %xmm12, %xmm12
vmovaps %xmm13, 0x150(%rsp)
vblendps $0x8, %xmm13, %xmm9, %xmm13 # xmm13 = xmm9[0,1,2],xmm13[3]
vandps %xmm20, %xmm13, %xmm13
vmaxps %xmm13, %xmm12, %xmm12
vmovaps %xmm26, %xmm3
vmovaps %xmm26, 0x140(%rsp)
vblendps $0x8, %xmm3, %xmm10, %xmm13 # xmm13 = xmm10[0,1,2],xmm3[3]
vandps %xmm20, %xmm13, %xmm13
vmovaps %xmm23, %xmm3
vmovaps %xmm23, 0x130(%rsp)
vblendps $0x8, %xmm3, %xmm11, %xmm3 # xmm3 = xmm11[0,1,2],xmm3[3]
vandps %xmm20, %xmm3, %xmm3
vmaxps %xmm3, %xmm13, %xmm3
vmaxps %xmm3, %xmm12, %xmm3
vmovshdup %xmm3, %xmm12 # xmm12 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm12, %xmm12
vshufpd $0x1, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[1,0]
vmaxss %xmm12, %xmm3, %xmm3
vcvtsi2ss %ecx, %xmm23, %xmm12
vmovaps %xmm12, 0x4c0(%rsp)
vbroadcastss %xmm12, %ymm12
vcmpgtps 0x2c6d64(%rip), %ymm12, %k1 {%k1} # 0x1f20f40
vmulss 0x296dfe(%rip), %xmm3, %xmm26 # 0x1ef0fe4
vbroadcastss 0x2c6ced(%rip), %ymm3 # 0x1f20edc
vpermps %ymm8, %ymm3, %ymm20
vpermps %ymm9, %ymm3, %ymm21
vpermps %ymm10, %ymm3, %ymm22
vpermps %ymm11, %ymm3, %ymm3
kortestb %k1, %k1
vmovss 0xc0(%r12,%r15,4), %xmm9
vmovaps %ymm31, 0x6c0(%rsp)
vmovaps %ymm30, 0x6a0(%rsp)
vmovaps %ymm20, 0x600(%rsp)
vmovaps %ymm21, 0x5e0(%rsp)
vmovaps %ymm22, 0x5c0(%rsp)
vmovaps %ymm3, 0x5a0(%rsp)
je 0x1c5a9ff
vmovaps %xmm9, 0x220(%rsp)
vmovaps %xmm26, 0xa0(%rsp)
vmovaps %ymm3, %ymm26
vmulps %ymm19, %ymm3, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm18 # ymm18 = (ymm22 * ymm18) + ymm3
vfmadd213ps %ymm18, %ymm21, %ymm17 # ymm17 = (ymm21 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm20, %ymm2 # ymm2 = (ymm20 * ymm2) + ymm17
vmulps %ymm16, %ymm26, %ymm3
vfmadd213ps %ymm3, %ymm22, %ymm15 # ymm15 = (ymm22 * ymm15) + ymm3
vfmadd213ps %ymm15, %ymm21, %ymm14 # ymm14 = (ymm21 * ymm14) + ymm15
vmovups 0x1210(%r13,%rbx), %ymm3
vmovups 0x1694(%r13,%rbx), %ymm10
vmovups 0x1b18(%r13,%rbx), %ymm11
vmovups 0x1f9c(%r13,%rbx), %ymm12
vfmadd213ps %ymm14, %ymm20, %ymm25 # ymm25 = (ymm20 * ymm25) + ymm14
vmulps %ymm12, %ymm28, %ymm9
vmulps %ymm12, %ymm29, %ymm8
vmulps %ymm12, %ymm26, %ymm12
vfmadd231ps %ymm31, %ymm11, %ymm9 # ymm9 = (ymm11 * ymm31) + ymm9
vfmadd231ps %ymm30, %ymm11, %ymm8 # ymm8 = (ymm11 * ymm30) + ymm8
vfmadd231ps %ymm11, %ymm22, %ymm12 # ymm12 = (ymm22 * ymm11) + ymm12
vmovaps 0x100(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm10, %ymm9 # ymm9 = (ymm10 * ymm19) + ymm9
vmovaps 0x480(%rsp), %ymm15
vfmadd231ps %ymm15, %ymm10, %ymm8 # ymm8 = (ymm10 * ymm15) + ymm8
vfmadd231ps %ymm10, %ymm21, %ymm12 # ymm12 = (ymm21 * ymm10) + ymm12
vmovaps 0x2a0(%rsp), %ymm17
vfmadd231ps %ymm17, %ymm3, %ymm9 # ymm9 = (ymm3 * ymm17) + ymm9
vmovaps 0x280(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm3, %ymm8 # ymm8 = (ymm3 * ymm18) + ymm8
vmovups 0x1210(%rdx,%rbx), %ymm10
vmovups 0x1b18(%rdx,%rbx), %ymm11
vmovups 0x1f9c(%rdx,%rbx), %ymm13
vfmadd231ps %ymm3, %ymm20, %ymm12 # ymm12 = (ymm20 * ymm3) + ymm12
vmovaps %ymm28, 0x180(%rsp)
vmulps %ymm13, %ymm28, %ymm3
vmovaps %ymm29, 0x160(%rsp)
vmulps %ymm13, %ymm29, %ymm14
vmulps %ymm13, %ymm26, %ymm13
vmovaps 0xa0(%rsp), %xmm26
vfmadd231ps %ymm31, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm31) + ymm3
vfmadd231ps %ymm30, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm30) + ymm14
vfmadd231ps %ymm11, %ymm22, %ymm13 # ymm13 = (ymm22 * ymm11) + ymm13
vmovups 0x1694(%rdx,%rbx), %ymm11
vfmadd231ps %ymm19, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm19) + ymm3
vfmadd231ps %ymm15, %ymm11, %ymm14 # ymm14 = (ymm11 * ymm15) + ymm14
vfmadd231ps %ymm11, %ymm21, %ymm13 # ymm13 = (ymm21 * ymm11) + ymm13
vfmadd231ps %ymm17, %ymm10, %ymm3 # ymm3 = (ymm10 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm18) + ymm14
vfmadd231ps %ymm10, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm10) + ymm13
vbroadcastss 0x2c6b16(%rip), %ymm15 # 0x1f20ec4
vandps %ymm15, %ymm9, %ymm10
vandps %ymm15, %ymm8, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vandps %ymm15, %ymm12, %ymm11
vmaxps %ymm11, %ymm10, %ymm10
vbroadcastss %xmm26, %ymm11
vcmpltps %ymm11, %ymm10, %k2
vmovaps 0x2c0(%rsp), %ymm20
vmovaps %ymm20, %ymm9 {%k2}
vmovaps 0x260(%rsp), %ymm16
vmovaps %ymm16, %ymm8 {%k2}
vandps %ymm3, %ymm15, %ymm10
vandps %ymm15, %ymm14, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vandps %ymm15, %ymm13, %ymm12
vmaxps %ymm12, %ymm10, %ymm10
vcmpltps %ymm11, %ymm10, %k2
vmovaps %ymm20, %ymm3 {%k2}
vmovaps %ymm16, %ymm14 {%k2}
vbroadcastss 0x2c6a9c(%rip), %ymm11 # 0x1f20ec0
vxorps %ymm11, %ymm9, %ymm10
vxorps %ymm3, %ymm11, %ymm12
vxorps %xmm27, %xmm27, %xmm27
vfmadd213ps %ymm27, %ymm9, %ymm9 # ymm9 = (ymm9 * ymm9) + ymm27
vfmadd231ps %ymm8, %ymm8, %ymm9 # ymm9 = (ymm8 * ymm8) + ymm9
vrsqrt14ps %ymm9, %ymm11
vbroadcastss 0x2922cf(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm9, %ymm9
vmulps %ymm9, %ymm11, %ymm9
vmulps %ymm11, %ymm11, %ymm13
vmulps %ymm9, %ymm13, %ymm9
vbroadcastss 0x2922ad(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm11, %ymm16, %ymm9 # ymm9 = (ymm16 * ymm11) + ymm9
vmulps %ymm9, %ymm8, %ymm11
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm27, %ymm9, %ymm13
vfmadd213ps %ymm27, %ymm3, %ymm3 # ymm3 = (ymm3 * ymm3) + ymm27
vfmadd231ps %ymm14, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm14) + ymm3
vrsqrt14ps %ymm3, %ymm8
vmulps %ymm3, %ymm15, %ymm3
vmulps %ymm3, %ymm8, %ymm3
vmulps %ymm8, %ymm8, %ymm9
vmulps %ymm3, %ymm9, %ymm3
vfmadd231ps %ymm8, %ymm16, %ymm3 # ymm3 = (ymm16 * ymm8) + ymm3
vmulps %ymm3, %ymm14, %ymm8
vmulps %ymm3, %ymm12, %ymm9
vmulps %ymm27, %ymm3, %ymm3
vmovaps %ymm11, %ymm12
vfmadd213ps %ymm5, %ymm0, %ymm12 # ymm12 = (ymm0 * ymm12) + ymm5
vmovaps %ymm10, %ymm14
vfmadd213ps %ymm4, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm4
vmovaps %ymm13, %ymm15
vfmadd213ps %ymm25, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm25
vmovaps %ymm8, %ymm16
vfmadd213ps %ymm7, %ymm1, %ymm16 # ymm16 = (ymm1 * ymm16) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm3, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm25 # ymm25 = -(ymm0 * ymm13) + ymm25
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm3, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm3) + ymm2
vsubps %ymm11, %ymm16, %ymm3
vsubps %ymm10, %ymm5, %ymm6
vsubps %ymm25, %ymm4, %ymm7
vmulps %ymm25, %ymm6, %ymm13
vfmsub231ps %ymm7, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm7) - ymm13
vmulps %ymm7, %ymm11, %ymm7
vfmsub231ps %ymm3, %ymm25, %ymm7 # ymm7 = (ymm25 * ymm3) - ymm7
vmulps %ymm3, %ymm10, %ymm3
vfmsub231ps %ymm6, %ymm11, %ymm3 # ymm3 = (ymm11 * ymm6) - ymm3
vfmadd231ps %ymm7, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm7) + ymm3
vfmadd231ps %ymm13, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm13) + ymm3
vcmpleps %ymm27, %ymm3, %k2
vmovaps %ymm12, %ymm8 {%k2}
vmovaps %ymm14, %ymm9 {%k2}
vmovaps %ymm15, %ymm2 {%k2}
vblendmps %ymm16, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm25, %ymm15 {%k2}
vmovaps %ymm11, %ymm16 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm25, %ymm4 {%k2}
vsubps %ymm8, %ymm16, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x1a0(%rsp), %ymm21
vmovaps %ymm17, %ymm22
vmovaps %ymm19, %ymm17
je 0x1c5b96e
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2920a9(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
je 0x1c5b9a4
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x50(%rsp), %xmm8
je 0x1c5b9c1
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x292023(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm6
vmovaps %ymm3, %ymm6 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %r13d
vmovaps %ymm2, %ymm3
testw %r13w, %r13w
je 0x1c5a9f6
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xb8(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r13b
je 0x1c5a9f6
vbroadcastss 0x296273(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x29688c(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm6, 0x2e0(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm3, 0x320(%rsp)
movl $0x0, 0x340(%rsp)
movl %ecx, 0x344(%rsp)
vmovaps %xmm8, 0x350(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x380(%rsp)
movb %r13b, 0x390(%rsp)
movl 0x240(%r12,%r15,4), %eax
movq 0x240(%rsp), %r9
testl %eax, 0x34(%r9)
je 0x1c5a9f6
movl %ecx, 0x20(%rsp)
vaddps 0x2c6731(%rip), %ymm6, %ymm0 # 0x1f20f40
vmovss 0x291efd(%rip), %xmm1 # 0x1eec714
vdivss 0x4c0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm3, 0x3e0(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2911c5(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
jne 0x1c5b9d8
movq 0x240(%rsp), %rax
cmpq $0x0, 0x40(%rax)
jne 0x1c5b9d8
vmovss 0x3a0(%rsp,%r11,4), %xmm0
vmovss 0x3c0(%rsp,%r11,4), %xmm1
vmovss 0x291e2d(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2c65cb(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x292287(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x292261(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x130(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vmovaps 0x80(%rsp), %ymm6
vfmadd132ps 0x150(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm4
vmovss 0x3e0(%rsp,%r11,4), %xmm3
vmovss %xmm3, 0x200(%r12,%r15,4)
vmovaps 0x60(%rsp), %ymm3
vmovss %xmm2, 0x300(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x340(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x380(%r12,%r15,4)
vmovss %xmm0, 0x3c0(%r12,%r15,4)
vmovss %xmm1, 0x400(%r12,%r15,4)
movl 0x24(%rsp), %eax
movl %eax, 0x440(%r12,%r15,4)
movq 0x48(%rsp), %rax
movl %eax, 0x480(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%r15,4)
movl 0x20(%rsp), %ecx
leaq 0x4d112b(%rip), %r13 # 0x212bb28
jmp 0x1c5aa3c
vxorps %xmm27, %xmm27, %xmm27
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x100(%rsp), %ymm17
cmpl $0x9, %ecx
jge 0x1c5aa68
vmovaps 0x6e0(%rsp), %ymm0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r14d
jne 0x1c59de1
jmp 0x1c5bee1
vpbroadcastd %ecx, %ymm0
vmovdqa %ymm0, 0x260(%rsp)
vbroadcastss %xmm26, %ymm0
vmovaps %ymm0, 0x2c0(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x240(%rsp)
vmovss 0x291c78(%rip), %xmm0 # 0x1eec714
vdivss 0x4c0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x220(%rsp)
movq 0x48(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x4c0(%rsp)
movl 0x24(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x540(%rsp)
movl $0x8, %r9d
vmovaps %ymm6, 0x80(%rsp)
vmovaps %ymm3, 0x60(%rsp)
movl %ecx, 0x20(%rsp)
vpbroadcastd %r9d, %ymm0
vpor 0x2ffe21(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpltd 0x260(%rsp), %ymm0, %k1
leaq (%rbx,%r13), %rcx
vmovups (%rcx,%r9,4), %ymm3
vmovups 0x484(%rcx,%r9,4), %ymm10
vmovups 0x908(%rcx,%r9,4), %ymm11
vmovups 0xd8c(%rcx,%r9,4), %ymm12
vmulps %ymm12, %ymm28, %ymm5
vmulps %ymm12, %ymm29, %ymm4
vmovaps 0x620(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vmovaps 0x6c0(%rsp), %ymm19
vfmadd231ps %ymm19, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm19) + ymm5
vmovaps 0x6a0(%rsp), %ymm26
vfmadd231ps %ymm26, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm26) + ymm4
vmovaps 0x640(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm16) + ymm0
vfmadd231ps %ymm17, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm17) + ymm5
vmovaps 0x480(%rsp), %ymm25
vfmadd231ps %ymm25, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm25) + ymm4
vmovaps 0x660(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm18) + ymm4
leaq (%rbx,%rdx), %rax
vmovups (%rax,%r9,4), %ymm2
vmovups 0x484(%rax,%r9,4), %ymm13
vmovaps 0x680(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm8) + ymm0
vmovups 0x908(%rax,%r9,4), %ymm14
vmovups 0xd8c(%rax,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm7
vmulps %ymm15, %ymm29, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm19, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm19) + ymm7
vfmadd231ps %ymm26, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm26) + ymm6
vfmadd231ps %ymm16, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm16) + ymm1
vfmadd231ps %ymm17, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm17) + ymm7
vfmadd231ps %ymm25, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm8) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmovaps %ymm17, %ymm24
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c5b403
vmovaps %ymm23, %ymm16
vmovaps 0x5a0(%rsp), %ymm23
vmulps %ymm15, %ymm23, %ymm15
vmovaps 0x5c0(%rsp), %ymm31
vfmadd213ps %ymm15, %ymm31, %ymm14 # ymm14 = (ymm31 * ymm14) + ymm15
vmovaps 0x5e0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x600(%rsp), %ymm18
vfmadd213ps %ymm13, %ymm18, %ymm2 # ymm2 = (ymm18 * ymm2) + ymm13
vmulps %ymm12, %ymm23, %ymm12
vfmadd213ps %ymm12, %ymm31, %ymm11 # ymm11 = (ymm31 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm18, %ymm3 # ymm3 = (ymm18 * ymm3) + ymm10
vmovups 0x1210(%rcx,%r9,4), %ymm12
vmovups 0x1694(%rcx,%r9,4), %ymm13
vmovups 0x1b18(%rcx,%r9,4), %ymm14
vmovups 0x1f9c(%rcx,%r9,4), %ymm15
vmulps %ymm15, %ymm28, %ymm11
vmulps %ymm15, %ymm29, %ymm10
vmulps %ymm15, %ymm23, %ymm15
vfmadd231ps %ymm19, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm19) + ymm11
vfmadd231ps %ymm26, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm26) + ymm10
vfmadd231ps %ymm14, %ymm31, %ymm15 # ymm15 = (ymm31 * ymm14) + ymm15
vfmadd231ps %ymm24, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm25) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm22, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm22) + ymm11
vfmadd231ps %ymm16, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm16) + ymm10
vmovaps %ymm26, %ymm25
vmovaps %ymm19, %ymm26
vmovaps %ymm16, %ymm19
vfmadd231ps %ymm12, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm12) + ymm15
vmovups 0x1210(%rax,%r9,4), %ymm13
vmovups 0x1b18(%rax,%r9,4), %ymm14
vmovups 0x1f9c(%rax,%r9,4), %ymm16
vmulps %ymm16, %ymm28, %ymm17
vmulps %ymm16, %ymm29, %ymm12
vmulps %ymm16, %ymm23, %ymm16
vfmadd231ps %ymm26, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm26) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm31, %ymm16 # ymm16 = (ymm31 * ymm14) + ymm16
vmovups 0x1694(%rax,%r9,4), %ymm14
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps 0x480(%rsp), %ymm14, %ymm12 # ymm12 = (ymm14 * mem) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm22, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm22) + ymm17
vfmadd231ps %ymm19, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm19) + ymm12
vfmadd231ps %ymm13, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm13) + ymm16
vbroadcastss 0x2c60f7(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x2c0(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x2c6083(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm27, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm27
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2918ba(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x291898(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm27, %ymm14, %ymm13
vfmadd213ps %ymm27, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm27
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm27, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm27, %ymm6 # ymm6 = (ymm27 * ymm13) + ymm6
vcmpleps %ymm27, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm27, %ymm3 # ymm3 = (ymm27 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm27, %ymm4 # ymm4 = (ymm27 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm27, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm19, %ymm18
je 0x1c5b434
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm27, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm27) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2916a7(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x240(%rsp), %ymm2, %k1
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
movl 0x20(%rsp), %ecx
je 0x1c5b452
vcmpneqps %ymm27, %ymm6, %k1
ktestb %k1, %k0
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x100(%rsp), %ymm17
je 0x1c5b477
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x291631(%rip), %ymm5 # 0x1eec714
vsubps %ymm3, %ymm5, %ymm20
vmovaps %ymm3, %ymm20 {%k2}
vsubps %ymm4, %ymm5, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x440(%rsp)
movzbl %al, %r11d
vmovaps %ymm2, %ymm21
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
testw %r11w, %r11w
je 0x1c5b422
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm20, %ymm1 # ymm1 = (ymm20 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0xb8(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm21, %k0
kmovd %k0, %eax
andb %al, %r11b
je 0x1c5b422
movl %r11d, %eax
vbroadcastss 0x29586a(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x440(%rsp), %ymm1
vfmadd132ps 0x295e83(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm20, 0x2e0(%rsp)
vmovaps %ymm1, 0x440(%rsp)
vmovaps %ymm1, 0x300(%rsp)
vmovaps %ymm21, 0x320(%rsp)
movl %r9d, 0x340(%rsp)
movl %ecx, 0x344(%rsp)
vmovaps %xmm8, 0x350(%rsp)
vmovaps 0x150(%rsp), %xmm0
vmovaps %xmm0, 0x360(%rsp)
vmovaps 0x140(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovdqa 0x130(%rsp), %xmm0
vmovdqa %xmm0, 0x380(%rsp)
movb %al, 0x390(%rsp)
movl %r11d, %r13d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x48(%rsp), %rcx
movq (%rax,%rcx,8), %rcx
movl 0x240(%r12,%r15,4), %eax
testl %eax, 0x34(%rcx)
je 0x1c5b3f6
vaddps 0x2c5d23(%rip), %ymm20, %ymm0 # 0x1f20f40
vcvtsi2ss %r9d, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x220(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3a0(%rsp)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps %ymm21, 0x3e0(%rsp)
kmovd %r13d, %k1
vbroadcastss 0x2907ba(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movq %rcx, 0xa0(%rsp)
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r13d
movq 0xa0(%rsp), %rcx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c5b47f
cmpq $0x0, 0x40(%rcx)
jne 0x1c5b47f
vmovss 0x3a0(%rsp,%r13,4), %xmm0
vmovss 0x3c0(%rsp,%r13,4), %xmm1
vmovss 0x291429(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vxorps 0x2c5bc7(%rip){1to4}, %xmm2, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm4
vmulss 0x291883(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmovaps %xmm0, %xmm5
vfnmsub213ss %xmm4, %xmm0, %xmm5 # xmm5 = -(xmm0 * xmm5) - xmm4
vfmadd213ss %xmm4, %xmm2, %xmm2 # xmm2 = (xmm2 * xmm2) + xmm4
vmulss %xmm0, %xmm0, %xmm4
vmovss 0x29185d(%rip), %xmm6 # 0x1eecb80
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm5, %xmm5
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm4, %xmm4
vbroadcastss %xmm4, %xmm4
vmulps 0x130(%rsp), %xmm4, %xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x140(%rsp), %xmm4, %xmm2 # xmm2 = (xmm2 * mem) + xmm4
vbroadcastss %xmm5, %xmm4
vfmadd132ps 0x150(%rsp), %xmm2, %xmm4 # xmm4 = (xmm4 * mem) + xmm2
vbroadcastss %xmm3, %xmm2
vfmadd213ps %xmm4, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm4
vmovss 0x3e0(%rsp,%r13,4), %xmm3
vmovss %xmm3, 0x200(%r12,%r15,4)
vmovss %xmm2, 0x300(%r12,%r15,4)
vextractps $0x1, %xmm2, 0x340(%r12,%r15,4)
vextractps $0x2, %xmm2, 0x380(%r12,%r15,4)
vmovss %xmm0, 0x3c0(%r12,%r15,4)
vmovss %xmm1, 0x400(%r12,%r15,4)
movl 0x24(%rsp), %eax
movl %eax, 0x440(%r12,%r15,4)
movq 0x48(%rsp), %rax
movl %eax, 0x480(%r12,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%r15,4)
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
leaq 0x4d072b(%rip), %r13 # 0x212bb28
movl 0x20(%rsp), %ecx
jmp 0x1c5b422
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
movl 0x20(%rsp), %ecx
vmovaps %ymm23, %ymm18
vmovaps %ymm24, %ymm17
addq $0x8, %r9
cmpl %r9d, %ecx
jg 0x1c5aaf1
jmp 0x1c5aa41
xorl %r11d, %r11d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm8
movl 0x20(%rsp), %ecx
jmp 0x1c5b46a
xorl %r11d, %r11d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x100(%rsp), %ymm17
jmp 0x1c5b11b
xorl %r11d, %r11d
jmp 0x1c5b10c
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x4b0(%rsp)
vmovaps %ymm20, 0x1c0(%rsp)
vmovaps %ymm21, 0x1a0(%rsp)
movq %r9, 0xf0(%rsp)
movl %r11d, %eax
movq %r8, 0x40(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
vmovaps %ymm28, 0x180(%rsp)
vmovaps %ymm29, 0x160(%rsp)
movl %eax, 0x500(%rsp)
vmovss 0x200(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x210(%rsp)
vmovss 0x3e0(%rsp,%r13,4), %xmm0
vbroadcastss 0x3a0(%rsp,%r13,4), %zmm1
vbroadcastss 0x3c0(%rsp,%r13,4), %zmm2
vmovss %xmm0, 0x200(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x2911d0(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c596e(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x29162a(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x291604(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4b0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x200(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2b7132(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2c58fa(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x740(%rsp)
vmovaps %zmm4, 0x780(%rsp)
vmovaps %zmm0, 0x7c0(%rsp)
vmovaps %zmm1, 0x800(%rsp)
vmovaps %zmm2, 0x840(%rsp)
vmovaps 0x540(%rsp), %zmm0
vmovaps %zmm0, 0x880(%rsp)
vmovdqa64 0x4c0(%rsp), %zmm0
vmovdqa64 %zmm0, 0x8c0(%rsp)
movq %rcx, %r11
movq 0xf8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x900(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x700(%rsp), %zmm0
vmovaps %zmm0, 0x400(%rsp)
leaq 0x400(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x18(%r11), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %r12, 0xd8(%rsp)
leaq 0x740(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x10, 0xe8(%rsp)
movq 0x40(%r11), %rax
testq %rax, %rax
je 0x1c5b743
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d2819(%rip), %rdx # 0x212df48
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c5b8bc
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c5b815
testb $0x2, (%rcx)
jne 0x1c5b792
movq 0xa0(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5b815
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xf0(%rsp), %r9
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d2747(%rip), %rdx # 0x212df48
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c5b8bc
movq 0xd8(%rsp), %rax
movq 0xe0(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c5b8cf
vmovd 0x210(%rsp), %xmm0
vmovd %xmm0, 0x200(%r12,%r15,4)
movl $0x1, %eax
shlxl %r13d, %eax, %eax
kmovd %eax, %k0
movzbl 0x500(%rsp), %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm21, %k1
kandb %k1, %k0, %k2
kmovd %k2, %eax
ktestb %k1, %k0
je 0x1c5b959
kmovd %eax, %k1
vbroadcastss 0x29010c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm21, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
movl %eax, %r11d
kmovd %k0, %eax
andb %r11b, %al
movzbl %al, %eax
movzbl %r11b, %ecx
cmovnel %eax, %ecx
movl %r11d, %eax
tzcntl %ecx, %r13d
testb %al, %al
movq 0xa0(%rsp), %rcx
jne 0x1c5b4f4
jmp 0x1c5b3f6
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x220(%rsp), %xmm9
jmp 0x1c5a716
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x50(%rsp), %xmm8
jmp 0x1c5a716
xorl %r13d, %r13d
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
jmp 0x1c5a716
movq 0x48(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x540(%rsp)
movl 0x24(%rsp), %eax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x500(%rsp)
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x370(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r8, 0x40(%rsp)
movq %r10, 0x38(%rsp)
movq %rsi, 0x30(%rsp)
movq %rdi, 0x28(%rsp)
vmovss 0x200(%r12,%r15,4), %xmm0
vmovss %xmm0, 0x2c0(%rsp)
vmovss 0x3e0(%rsp,%r11,4), %xmm0
vbroadcastss 0x3a0(%rsp,%r11,4), %zmm1
vbroadcastss 0x3c0(%rsp,%r11,4), %zmm2
vmovss %xmm0, 0x200(%r12,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x290c84(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vxorps 0x2c5422(%rip){1to4}, %xmm0, %xmm3 # 0x1f20ec0
vmulss %xmm3, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm4
vmulss 0x2910de(%rip), %xmm4, %xmm4 # 0x1eecb8c
vmulss %xmm1, %xmm1, %xmm5
vmovaps %xmm1, %xmm6
vfnmsub213ss %xmm4, %xmm1, %xmm6 # xmm6 = -(xmm1 * xmm6) - xmm4
vfmadd213ss %xmm4, %xmm0, %xmm0 # xmm0 = (xmm0 * xmm0) + xmm4
vmovss 0x2910b8(%rip), %xmm7 # 0x1eecb80
vmulss %xmm7, %xmm3, %xmm3
vmulss %xmm7, %xmm6, %xmm4
vmulss %xmm7, %xmm0, %xmm0
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x200(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x210(%rsp), %xmm0, %xmm4 # xmm4 = (xmm4 * mem) + xmm0
vbroadcastss %xmm3, %xmm0
vfmadd213ps %xmm4, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm0) + xmm4
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2b6be6(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm4
vbroadcastss 0x2c53ae(%rip), %zmm5 # 0x1f20edc
vpermps %zmm0, %zmm5, %zmm0
vmovaps %zmm3, 0x740(%rsp)
vmovaps %zmm4, 0x780(%rsp)
vmovaps %zmm0, 0x7c0(%rsp)
vmovaps %zmm1, 0x800(%rsp)
vmovaps %zmm2, 0x840(%rsp)
vmovaps 0x500(%rsp), %zmm0
vmovaps %zmm0, 0x880(%rsp)
vmovdqa64 0x540(%rsp), %zmm0
vmovdqa64 %zmm0, 0x8c0(%rsp)
movq 0xf8(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x900(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x700(%rsp), %zmm0
vmovaps %zmm0, 0x400(%rsp)
leaq 0x400(%rsp), %rcx
movq %rcx, 0xc0(%rsp)
movq 0x240(%rsp), %r9
movq 0x18(%r9), %rcx
movq %rcx, 0xc8(%rsp)
movq %rax, 0xd0(%rsp)
movq %r12, 0xd8(%rsp)
leaq 0x740(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x10, 0xe8(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movq %r11, 0x260(%rsp)
je 0x1c5bcb1
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm26
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm20
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d22ab(%rip), %rdx # 0x212df48
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
vmovaps 0x80(%rsp), %ymm6
vmovaps 0x60(%rsp), %ymm3
je 0x1c5be3b
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c5bd94
testb $0x2, (%rcx)
jne 0x1c5bd00
movq 0x240(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c5bd94
leaq 0xc0(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x260(%rsp), %r11
vmovaps 0x220(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm26
vmovaps 0x160(%rsp), %ymm29
vmovaps 0x180(%rsp), %ymm28
vmovaps 0x100(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x50(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %ymm21
vmovaps 0x60(%rsp), %ymm3
vmovaps 0x1c0(%rsp), %ymm20
vmovaps 0x80(%rsp), %ymm6
vxorps %xmm27, %xmm27, %xmm27
leaq 0x4d21c8(%rip), %rdx # 0x212df48
movq 0x28(%rsp), %rdi
movq 0x30(%rsp), %rsi
movq 0x38(%rsp), %r10
movq 0x40(%rsp), %r8
vmovdqa64 0x400(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c5be3b
movq 0xd8(%rsp), %rax
movq 0xe0(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c5be4e
vmovd 0x2c0(%rsp), %xmm0
vmovd %xmm0, 0x200(%r12,%r15,4)
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
movzbl %r13b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x200(%r12,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r13d
ktestb %k1, %k0
je 0x1c5becf
kmovd %r13d, %k1
vbroadcastss 0x28fb90(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r13b, %al
movzbl %al, %eax
movzbl %r13b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r11d
testb %r13b, %r13b
movl 0x20(%rsp), %ecx
jne 0x1c5ba47
jmp 0x1c5a9f6
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNv_intersector.h
|
bool embree::avx512::CurveNiIntersector1<8>::occluded_n<embree::avx512::OrientedCurve1Intersector1<embree::BSplineCurveT, 7, 8>, embree::avx512::Occluded1Epilog1<true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x348, %rsp # imm = 0x348
movq %rdx, %r9
movq %rsi, %r15
movq %rdi, %r12
movzbl 0x1(%rcx), %eax
leaq (%rax,%rax,4), %r8
leaq (%r8,%r8,4), %rdx
vbroadcastss 0x12(%rcx,%rdx), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%rcx,%rdx), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%rcx,%rax,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%rcx,%r8), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rax,%rax,2), %rdx
vpmovsxbd 0x6(%rcx,%rdx,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rax,%r8,2), %rsi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rax, %rsi
vpmovsxbd 0x6(%rcx,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%rcx,%rdi), %ymm0
addq %rax, %rdi
vpmovsxbd 0x6(%rcx,%rdi), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %r8d
vpmovsxbd 0x6(%rcx,%r8), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x2b2764(%rip), %ymm26 # 0x1f12704
vpermps %ymm2, %ymm26, %ymm15
vbroadcastss 0x2c0f2c(%rip), %ymm27 # 0x1f20edc
vpermps %ymm2, %ymm27, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm26, %ymm15
vpermps %ymm3, %ymm27, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x2c0ea0(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x290fb7(%rip), %ymm8 # 0x1ef0fe8
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm8, %ymm5, %k1
vmovaps %ymm8, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x28c6a5(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%rcx,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%rcx,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %r8
shlq $0x3, %rdx
subq %rax, %rdx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%rcx,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%rcx,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%rcx,%r8), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%rcx,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%r15){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2bfdbc(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2bfd98(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2fa794(%rip), %ymm1, %k0 # 0x1f5a920
vmovups %ymm6, 0x2c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0xb(%rsp)
je 0x1c61d59
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
vbroadcastss 0x28c9bf(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2c0cf9(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2c0ceb(%rip), %xmm19 # 0x1f20ec0
movq %rcx, 0xa8(%rsp)
movq %r9, 0x20(%rsp)
tzcntq %r13, %rax
movl 0x2(%rcx), %edx
movl 0x6(%rcx,%rax,4), %edi
movq (%r9), %rax
movq 0x1e8(%rax), %rax
movq %rdx, 0x48(%rsp)
movq (%rax,%rdx,8), %rsi
movq 0x58(%rsi), %rax
movq 0x68(%rsi), %rbp
movq %rbp, %rdx
movq %rdi, 0xa0(%rsp)
imulq %rdi, %rdx
movl (%rax,%rdx), %r9d
movq 0xa0(%rsi), %rdx
movq %rdx, %rdi
imulq %r9, %rdi
leaq 0x1(%r9), %r11
leaq 0x2(%r9), %r10
leaq 0x3(%r9), %r8
movq 0xd8(%rsi), %rbx
imulq %rbx, %r9
movq 0xc8(%rsi), %r14
vmovups (%r14,%r9), %xmm4
movq %rdx, %r9
imulq %r11, %r9
imulq %rbx, %r11
vmovups (%r14,%r11), %xmm5
movq %rdx, %r11
imulq %r10, %r11
imulq %rbx, %r10
vmovups (%r14,%r10), %xmm6
imulq %r8, %rbx
vmovups (%r14,%rbx), %xmm7
movq %rdx, %r10
imulq %r8, %r10
movq 0x90(%rsi), %rsi
vmovaps (%rsi,%rdi), %xmm8
vmovaps (%rsi,%r9), %xmm9
vmovaps (%rsi,%r11), %xmm10
blsrq %r13, %r13
vmovaps (%rsi,%r10), %xmm3
movq %r13, %rdi
subq $0x1, %rdi
jb 0x1c602f3
andq %r13, %rdi
tzcntq %r13, %r8
movl 0x6(%rcx,%r8,4), %r8d
imulq %rbp, %r8
movl (%rax,%r8), %r8d
imulq %rdx, %r8
prefetcht0 (%rsi,%r8)
prefetcht0 0x40(%rsi,%r8)
testq %rdi, %rdi
je 0x1c602f3
tzcntq %rdi, %rdi
movl 0x6(%rcx,%rdi,4), %edi
imulq %rdi, %rbp
movl (%rax,%rbp), %eax
imulq %rax, %rdx
prefetcht1 (%rsi,%rdx)
prefetcht1 0x40(%rsi,%rdx)
vxorps %xmm20, %xmm20, %xmm20
vmulps %xmm20, %xmm3, %xmm0
vbroadcastss 0x290cf8(%rip), %xmm14 # 0x1ef1000
vmovaps %xmm14, %xmm1
vfmadd213ps %xmm0, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm0
vbroadcastss 0x2c0bca(%rip), %xmm13 # 0x1f20ee4
vfmadd231ps %xmm13, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm13) + xmm1
vfmadd231ps %xmm14, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm14) + xmm1
vfmadd231ps %xmm17, %xmm10, %xmm0 # xmm0 = (xmm10 * xmm17) + xmm0
vfnmadd231ps %xmm20, %xmm9, %xmm0 # xmm0 = -(xmm9 * xmm20) + xmm0
vfnmadd231ps %xmm17, %xmm8, %xmm0 # xmm0 = -(xmm8 * xmm17) + xmm0
vmulps %xmm20, %xmm7, %xmm11
vmovaps %xmm14, %xmm12
vfmadd213ps %xmm11, %xmm6, %xmm12 # xmm12 = (xmm6 * xmm12) + xmm11
vfmadd231ps %xmm13, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm13) + xmm12
vfmadd231ps %xmm14, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm14) + xmm12
vfmadd231ps %xmm17, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm17) + xmm11
vfnmadd231ps %xmm20, %xmm5, %xmm11 # xmm11 = -(xmm5 * xmm20) + xmm11
vfnmadd231ps %xmm17, %xmm4, %xmm11 # xmm11 = -(xmm4 * xmm17) + xmm11
vmulps %xmm3, %xmm14, %xmm2
vfmadd231ps %xmm13, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm13) + xmm2
vfmadd231ps %xmm14, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm14) + xmm2
vfmadd231ps %xmm20, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm20) + xmm2
vmulps %xmm17, %xmm3, %xmm3
vfmadd231ps %xmm10, %xmm20, %xmm3 # xmm3 = (xmm20 * xmm10) + xmm3
vfnmadd231ps %xmm9, %xmm17, %xmm3 # xmm3 = -(xmm17 * xmm9) + xmm3
vfnmadd231ps %xmm8, %xmm20, %xmm3 # xmm3 = -(xmm20 * xmm8) + xmm3
vmulps %xmm7, %xmm14, %xmm8
vfmadd231ps %xmm13, %xmm6, %xmm8 # xmm8 = (xmm6 * xmm13) + xmm8
vfmadd231ps %xmm14, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm14) + xmm8
vfmadd231ps %xmm20, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm20) + xmm8
vmulps %xmm17, %xmm7, %xmm7
vfmadd231ps %xmm6, %xmm20, %xmm7 # xmm7 = (xmm20 * xmm6) + xmm7
vfnmadd231ps %xmm5, %xmm17, %xmm7 # xmm7 = -(xmm17 * xmm5) + xmm7
vfnmadd231ps %xmm4, %xmm20, %xmm7 # xmm7 = -(xmm20 * xmm4) + xmm7
vshufps $0xc9, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[1,2,0,3]
vmulps %xmm5, %xmm0, %xmm5
vfmsub231ps %xmm12, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm12) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm6 # xmm6 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm5 # xmm5 = xmm11[1,2,0,3]
vmulps %xmm5, %xmm0, %xmm5
vfmsub231ps %xmm11, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm11) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm4 # xmm4 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm3, %xmm4
vfmsub231ps %xmm8, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm8) - xmm4
vshufps $0xc9, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm8 # xmm8 = xmm7[1,2,0,3]
vmulps %xmm3, %xmm8, %xmm8
vfmsub231ps %xmm7, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm7) - xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm5 # xmm5 = xmm8[1,2,0,3]
vdpps $0x7f, %xmm6, %xmm6, %xmm7
vmovss %xmm7, %xmm20, %xmm8 # xmm8 = xmm7[0],xmm20[1,2,3]
vrsqrt14ss %xmm8, %xmm20, %xmm10
vmovss 0x28c2ea(%rip), %xmm14 # 0x1eec718
vmulss %xmm14, %xmm10, %xmm11
vmovss 0x28c2e1(%rip), %xmm15 # 0x1eec71c
vmulss %xmm7, %xmm15, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vaddss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm9, %xmm6, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm6, %xmm10, %xmm12
vbroadcastss %xmm7, %xmm13
vmulps %xmm13, %xmm9, %xmm9
vbroadcastss %xmm11, %xmm11
vmulps %xmm6, %xmm11, %xmm6
vsubps %xmm6, %xmm9, %xmm6
vrcp14ss %xmm8, %xmm20, %xmm8
vmovss 0x290b6f(%rip), %xmm16 # 0x1ef0ff8
vfnmadd213ss %xmm16, %xmm8, %xmm7 # xmm7 = -(xmm8 * xmm7) + xmm16
vmulss %xmm7, %xmm8, %xmm7
vbroadcastss %xmm7, %xmm7
vmulps %xmm7, %xmm6, %xmm6
vmulps %xmm6, %xmm10, %xmm6
vdpps $0x7f, %xmm4, %xmm4, %xmm7
vmovss %xmm7, %xmm20, %xmm8 # xmm8 = xmm7[0],xmm20[1,2,3]
vrsqrt14ss %xmm8, %xmm20, %xmm9
vmulss %xmm14, %xmm9, %xmm10
vmulss %xmm7, %xmm15, %xmm11
vmulss %xmm9, %xmm11, %xmm11
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm9, %xmm11, %xmm9
vaddss %xmm9, %xmm10, %xmm9
vbroadcastss %xmm9, %xmm9
vdpps $0x7f, %xmm5, %xmm4, %xmm10
vmulps %xmm4, %xmm9, %xmm11
vbroadcastss %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vbroadcastss %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm8, %xmm20, %xmm5
vfnmadd213ss %xmm16, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm16
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vmulps %xmm4, %xmm9, %xmm4
vshufps $0xff, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm7
vsubps %xmm7, %xmm1, %xmm13
vshufps $0xff, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm6, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vsubps %xmm5, %xmm0, %xmm6
vaddps %xmm7, %xmm1, %xmm14
vaddps %xmm5, %xmm0, %xmm0
vshufps $0xff, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[3,3,3,3]
vmulps %xmm1, %xmm11, %xmm5
vsubps %xmm5, %xmm2, %xmm15
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vmulps %xmm7, %xmm11, %xmm7
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm7, %xmm1
vsubps %xmm1, %xmm3, %xmm4
vaddps %xmm5, %xmm2, %xmm16
vaddps %xmm1, %xmm3, %xmm1
vbroadcastss 0x29194b(%rip), %xmm3 # 0x1ef1ebc
vmulps %xmm3, %xmm6, %xmm2
vaddps %xmm2, %xmm13, %xmm17
vmulps %xmm3, %xmm4, %xmm2
vsubps %xmm2, %xmm15, %xmm20
vmulps %xmm3, %xmm0, %xmm0
vaddps %xmm0, %xmm14, %xmm21
vmulps %xmm3, %xmm1, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vmovaps (%r15), %xmm4
vsubps %xmm4, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%r12), %xmm3
vmovaps 0x20(%r12), %xmm5
vmovaps 0x30(%r12), %xmm6
vmulps %xmm0, %xmm6, %xmm0
vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm4, %xmm17, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm7, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm7) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm4, %xmm20, %xmm2
vbroadcastss %xmm2, %xmm7
vshufps $0x55, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm7, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm7) + xmm2
vsubps %xmm4, %xmm15, %xmm7
vbroadcastss %xmm7, %xmm8
vshufps $0x55, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[1,1,1,1]
vshufps $0xaa, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
vmulps %xmm7, %xmm6, %xmm7
vfmadd231ps %xmm9, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm9) + xmm7
vfmadd231ps %xmm8, %xmm3, %xmm7 # xmm7 = (xmm3 * xmm8) + xmm7
vsubps %xmm4, %xmm14, %xmm8
vbroadcastss %xmm8, %xmm9
vshufps $0x55, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,1,1,1]
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm8
vfmadd231ps %xmm10, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm10) + xmm8
vfmadd231ps %xmm9, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm9) + xmm8
vsubps %xmm4, %xmm21, %xmm9
vbroadcastss %xmm9, %xmm10
vshufps $0x55, %xmm9, %xmm9, %xmm11 # xmm11 = xmm9[1,1,1,1]
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm11, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm11) + xmm9
vfmadd231ps %xmm10, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm10) + xmm9
vsubps %xmm4, %xmm22, %xmm10
vbroadcastss %xmm10, %xmm11
vshufps $0x55, %xmm10, %xmm10, %xmm12 # xmm12 = xmm10[1,1,1,1]
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm12, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm12) + xmm10
vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10
vsubps %xmm4, %xmm16, %xmm4
vbroadcastss %xmm4, %xmm11
vshufps $0x55, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmulps %xmm4, %xmm6, %xmm4
vfmadd231ps %xmm12, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm12) + xmm4
vfmadd231ps %xmm11, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm11) + xmm4
vmovlhps %xmm8, %xmm0, %xmm29 # xmm29 = xmm0[0],xmm8[0]
vmovlhps %xmm9, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm9[0]
vmovlhps %xmm10, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm10[0]
vmovlhps %xmm4, %xmm7, %xmm24 # xmm24 = xmm7[0],xmm4[0]
vminps %xmm12, %xmm29, %xmm3
vmaxps %xmm12, %xmm29, %xmm5
vminps %xmm24, %xmm23, %xmm6
vminps %xmm6, %xmm3, %xmm3
vmaxps %xmm24, %xmm23, %xmm6
vmaxps %xmm6, %xmm5, %xmm5
vshufpd $0x3, %xmm3, %xmm3, %xmm6 # xmm6 = xmm3[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1]
vminps %xmm6, %xmm3, %xmm3
vmaxps %xmm11, %xmm5, %xmm5
vandps %xmm18, %xmm3, %xmm3
vandps %xmm18, %xmm5, %xmm5
vmaxps %xmm5, %xmm3, %xmm3
vmovshdup %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm5, %xmm3
vmulss 0x291783(%rip), %xmm3, %xmm3 # 0x1ef1eb8
vmovddup %xmm0, %xmm6 # xmm6 = xmm0[0,0]
vmovddup %xmm1, %xmm11 # xmm11 = xmm1[0,0]
vmovddup %xmm2, %xmm18 # xmm18 = xmm2[0,0]
vmovddup %xmm7, %xmm7 # xmm7 = xmm7[0,0]
vmovddup %xmm8, %xmm5 # xmm5 = xmm8[0,0]
vmovddup %xmm9, %xmm8 # xmm8 = xmm9[0,0]
vmovddup %xmm10, %xmm9 # xmm9 = xmm10[0,0]
vmovddup %xmm4, %xmm10 # xmm10 = xmm4[0,0]
vmovaps %xmm3, 0x170(%rsp)
vbroadcastss %xmm3, %ymm31
vxorps %xmm19, %xmm31, %xmm0
vbroadcastss %xmm0, %ymm28
vsubps %xmm29, %xmm12, %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps %xmm12, 0x90(%rsp)
vsubps %xmm12, %xmm23, %xmm0
vmovaps %xmm0, 0x50(%rsp)
vmovaps %xmm23, 0x80(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps %xmm13, 0x160(%rsp)
vmovaps %xmm14, 0x150(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x280(%rsp)
vmovaps %xmm17, 0x120(%rsp)
vmovaps %xmm21, 0x100(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovaps %xmm20, 0x110(%rsp)
vmovaps %xmm22, 0xf0(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x260(%rsp)
vmovaps %xmm15, 0x140(%rsp)
vmovaps %xmm16, 0x130(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x250(%rsp)
xorl %edi, %edi
vmovsd 0x28beb3(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
xorl %ebp, %ebp
movq 0x20(%rsp), %r9
vmovaps %xmm29, 0x30(%rsp)
vmovaps %xmm6, 0x200(%rsp)
vmovaps %xmm11, 0x1f0(%rsp)
vmovaps %xmm18, 0x1e0(%rsp)
vmovaps %xmm7, 0x1d0(%rsp)
vmovaps %xmm5, 0x1c0(%rsp)
vmovaps %xmm8, 0x1b0(%rsp)
vmovaps %xmm9, 0x1a0(%rsp)
vmovaps %xmm10, 0x190(%rsp)
vmovups %ymm31, 0x300(%rsp)
vmovups %ymm28, 0x2e0(%rsp)
vmovaps %xmm15, %xmm30
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x28be58(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm6, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm6) + xmm3
vfmadd231ps %xmm11, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm11) + xmm4
vfmadd231ps %xmm18, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm18) + xmm5
vfmadd231ps %xmm2, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2c05db(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vpermps %ymm3, %ymm26, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm26, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm26, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm26, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2c0592(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2c05a9(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2905e1(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2ff2f1(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm31, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c60d17
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm31, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c60d37
movl %ebp, %eax
movl %ecx, 0x210(%rsp,%rax,4)
vmovlps %xmm0, 0x2a0(%rsp,%rax,8)
vmovlps %xmm30, 0x320(%rsp,%rax,8)
incl %ebp
vbroadcastss 0x28be3f(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2c0179(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2c016b(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x29028d(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2c0177(%rip), %xmm21 # 0x1f20ee0
vmovss 0x28b9a1(%rip), %xmm22 # 0x1eec714
vmovss 0x290283(%rip), %xmm23 # 0x1ef1000
vmovss 0x290cc5(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x28b983(%rip), %xmm25 # 0x1eec714
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x60(%rsp), %xmm28
vmovaps 0x50(%rsp), %xmm31
testl %ebp, %ebp
je 0x1c61d28
leal -0x1(%rbp), %eax
vmovss 0x2a0(%rsp,%rax,8), %xmm0
vmovss 0x2a4(%rsp,%rax,8), %xmm1
movl 0x210(%rsp,%rax,4), %ecx
vmovsd 0x320(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x210(%rsp,%rax,4)
cmovel %eax, %ebp
vpxord %xmm30, %xmm30, %xmm30
vcvtsi2ss %rdx, %xmm30, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vpxord %xmm30, %xmm30, %xmm30
vcvtsi2ss %rdx, %xmm30, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm14
vfmadd231ss %xmm4, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm4) + xmm14
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm13
vfmadd231ss %xmm2, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm2) + xmm13
vsubss %xmm14, %xmm13, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c61cc6
vmovaps %xmm15, %xmm30
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %ebp
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps 0x1c0(%rsp), %xmm1, %xmm3
vmulps 0x1b0(%rsp), %xmm1, %xmm4
vmulps 0x1a0(%rsp), %xmm1, %xmm5
vmulps 0x190(%rsp), %xmm1, %xmm1
vfmadd231ps 0x200(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x1f0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps 0x1e0(%rsp), %xmm2, %xmm5 # xmm5 = (xmm2 * mem) + xmm5
vfmadd231ps 0x1d0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm14, 0x70(%rsp)
vbroadcastss %xmm14, %xmm6
vmovaps %xmm13, 0x10(%rsp)
vbroadcastss %xmm13, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x290f8d(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x28f9e2(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c6100b
vmovss 0x290ebb(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c6106c
vmovss 0x290ead(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x28f9b6(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c6106c
vmovss 0x290e7b(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x28f984(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c61c9a
vcmpltss %xmm16, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x28f949(%rip), %xmm16 # 0x1ef09cc
vxorps %xmm11, %xmm11, %xmm11
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm11, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x28a964(%rip), %xmm7 # 0x1eeba20
vmovss %xmm11, %xmm7, %xmm7 {%k1}
vmovss 0x28baba(%rip), %xmm8 # 0x1eecb84
vmovss %xmm11, %xmm8, %xmm8 {%k1}
vcmpltss %xmm11, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c610ec
jnp 0x1c61131
vucomiss %xmm13, %xmm14
jne 0x1c6113f
jp 0x1c6113f
vxorps %xmm16, %xmm16, %xmm16
vucomiss %xmm16, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x28a909(%rip), %xmm13 # 0x1eeba20
vmovss %xmm16, %xmm13, %xmm13 {%k1}
vmovss 0x28ba5f(%rip), %xmm14 # 0x1eecb84
vmovss 0x28b5e5(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c61166
vxorps %xmm16, %xmm16, %xmm16
vmovaps %xmm30, %xmm15
jmp 0x1c61176
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vxorps %xmm16, %xmm16, %xmm16
vfmadd213ss %xmm14, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps %xmm30, %xmm15
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm16, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x28f83f(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x70(%rsp), %xmm14
jne 0x1c6119c
jnp 0x1c61206
vucomiss %xmm9, %xmm10
jne 0x1c611db
jp 0x1c611db
vucomiss %xmm16, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x28a85f(%rip), %xmm9 # 0x1eeba20
vmovss %xmm16, %xmm9, %xmm9 {%k1}
vmovss 0x28b9b5(%rip), %xmm10 # 0x1eecb84
vmovss 0x28b53b(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c611fc
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c61ca8
vaddss 0x2fc216(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x28b942(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm15, %xmm4 # xmm4 = xmm15[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vbroadcastss 0x28fd05(%rip), %xmm15 # 0x1ef0fec
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm7, %xmm15, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm15, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vinsertps $0x10, %xmm1, %xmm14, %xmm6 # xmm6 = xmm14[0],xmm1[0],xmm14[2,3]
vmovaps 0x10(%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps %xmm17, %xmm0, %xmm9
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm29, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm29
vmovaps %xmm31, %xmm12
vfmadd213ps 0x90(%rsp), %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + mem
vmovaps 0x290(%rsp), %xmm13
vfmadd213ps 0x80(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vmovaps %xmm14, %xmm30
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm3, %xmm15, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2bfa87(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2fe8a5(%rip), %xmm29 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm29, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2f9505(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2bf9f6(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2bf98f(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c61c80
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm30, %xmm7
vmovaps %xmm30, %xmm14
jbe 0x1c61679
testb %sil, %sil
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x28f9c2(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
je 0x1c616b3
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c616b3
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x28f965(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c61c96
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm29, %xmm6 # xmm6 = (xmm29 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm13
ja 0x1c61768
decq %rax
jne 0x1c616c4
jmp 0x1c61ca8
vucomiss %xmm16, %xmm0
jb 0x1c61ca8
vucomiss %xmm0, %xmm22
vmovaps 0x10(%rsp), %xmm13
jb 0x1c61cae
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm16, %xmm1
jb 0x1c61cae
vucomiss %xmm1, %xmm22
jb 0x1c61cae
vmovss 0x18(%r12), %xmm2
vinsertps $0x1c, 0x28(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x38(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vmovaps (%r15), %xmm3
vmovaps 0x160(%rsp), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vdpps $0x7f, %xmm2, %xmm4, %xmm4
vmovaps 0x120(%rsp), %xmm5
vsubps %xmm3, %xmm5, %xmm5
vdpps $0x7f, %xmm2, %xmm5, %xmm5
vmovaps 0x110(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm6
vdpps $0x7f, %xmm2, %xmm6, %xmm6
vmovaps 0x140(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vdpps $0x7f, %xmm2, %xmm7, %xmm7
vmovaps 0x150(%rsp), %xmm8
vsubps %xmm3, %xmm8, %xmm8
vdpps $0x7f, %xmm2, %xmm8, %xmm8
vmovaps 0x100(%rsp), %xmm9
vsubps %xmm3, %xmm9, %xmm9
vdpps $0x7f, %xmm2, %xmm9, %xmm9
vmovaps 0xf0(%rsp), %xmm10
vsubps %xmm3, %xmm10, %xmm10
vdpps $0x7f, %xmm2, %xmm10, %xmm10
vmovaps 0x130(%rsp), %xmm12
vsubps %xmm3, %xmm12, %xmm3
vdpps $0x7f, %xmm2, %xmm3, %xmm2
vsubss %xmm1, %xmm22, %xmm3
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm1, %xmm10, %xmm10
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm4, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm6) + xmm10
vfmadd231ss %xmm7, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm7) + xmm1
vsubss %xmm0, %xmm22, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm10, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm10) + xmm1
vfmadd231ss %xmm9, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm8) + xmm1
vucomiss 0xc(%r15), %xmm1
jb 0x1c61cae
vmovss 0x20(%r15), %xmm12
vucomiss %xmm1, %xmm12
jb 0x1c61cae
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm25, %xmm8
vmulps 0x150(%rsp), %xmm7, %xmm9
vmulps 0x100(%rsp), %xmm7, %xmm10
vmulps 0xf0(%rsp), %xmm7, %xmm11
vmulps 0x130(%rsp), %xmm7, %xmm7
vfmadd231ps 0x160(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x120(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x110(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x140(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps 0x28f681(%rip){1to4}, %xmm7, %xmm6 # 0x1ef0fec
movq (%r9), %rax
movq 0x1e8(%rax), %rax
movq 0x48(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movl 0x24(%r15), %eax
testl %eax, 0x34(%r14)
je 0x1c61c79
movq 0x10(%r9), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c619a4
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c61c7b
vbroadcastss %xmm5, %xmm5
vmulps 0x250(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x260(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x270(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x280(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%r9), %rax
vshufps $0xe9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,2,3]
vmovlps %xmm3, 0x220(%rsp)
vmovss %xmm2, 0x228(%rsp)
vmovlps %xmm0, 0x22c(%rsp)
movq 0xa0(%rsp), %rcx
movl %ecx, 0x234(%rsp)
movq 0x48(%rsp), %rcx
movl %ecx, 0x238(%rsp)
movl (%rax), %ecx
movl %ecx, 0x23c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x240(%rsp)
vmovss %xmm1, 0x20(%r15)
movl $0xffffffff, 0x2c(%rsp) # imm = 0xFFFFFFFF
leaq 0x2c(%rsp), %rax
movq %rax, 0xb0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r9), %rax
movq %rax, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
leaq 0x220(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm15, 0xe0(%rsp)
vmovss %xmm12, 0xc(%rsp)
je 0x1c61b88
movl %edi, 0x28(%rsp)
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xe0(%rsp), %xmm15
movl 0x28(%rsp), %edi
vmovaps 0x50(%rsp), %xmm31
vmovaps 0x60(%rsp), %xmm28
vmovaps 0x30(%rsp), %xmm29
vxorps %xmm16, %xmm16, %xmm16
vbroadcastss 0x28ac06(%rip), %xmm25 # 0x1eec714
vmovss 0x28ff34(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x28f4de(%rip), %xmm23 # 0x1ef1000
vmovss 0x28abe8(%rip), %xmm22 # 0x1eec714
vmovss 0x2bf3aa(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x28f4ac(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2bf376(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2bf370(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x28b022(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2bf374(%rip), %ymm27 # 0x1f20edc
vbroadcastss 0x2b0b92(%rip), %ymm26 # 0x1f12704
movq 0x20(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c61cb8
movq 0x10(%r9), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c61c75
testb $0x2, (%rcx)
jne 0x1c61ba9
testb $0x40, 0x3e(%r14)
je 0x1c61c68
movl %edi, %r14d
leaq 0xb0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xe0(%rsp), %xmm15
movl %r14d, %edi
vmovaps 0x50(%rsp), %xmm31
vmovaps 0x60(%rsp), %xmm28
vmovaps 0x30(%rsp), %xmm29
vxorps %xmm16, %xmm16, %xmm16
vbroadcastss 0x28ab15(%rip), %xmm25 # 0x1eec714
vmovss 0x28fe43(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x28f3ed(%rip), %xmm23 # 0x1ef1000
vmovss 0x28aaf7(%rip), %xmm22 # 0x1eec714
vmovss 0x2bf2b9(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x28f3bb(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2bf285(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2bf27f(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x28af31(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2bf283(%rip), %ymm27 # 0x1f20edc
vbroadcastss 0x2b0aa1(%rip), %ymm26 # 0x1f12704
movq 0x20(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c61cb8
movb $0x1, %al
jmp 0x1c61cba
xorl %eax, %eax
orb %al, %dil
jmp 0x1c61cae
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x30(%rsp), %xmm29
vmovaps %xmm30, %xmm14
jmp 0x1c61ca8
xorl %ebx, %ebx
jmp 0x1c61ca8
movb $0x1, %bl
vmovaps %xmm30, %xmm15
vmovaps 0x70(%rsp), %xmm14
vmovaps 0x10(%rsp), %xmm13
testb %bl, %bl
jne 0x1c60da7
jmp 0x1c61cc6
xorl %eax, %eax
testb %al, %al
jne 0x1c61c7b
vmovss %xmm12, 0x20(%r15)
jmp 0x1c61c7b
vinsertps $0x10, %xmm13, %xmm14, %xmm0 # xmm0 = xmm14[0],xmm13[0],xmm14[2,3]
vmovaps 0x200(%rsp), %xmm6
vmovaps 0x1f0(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm18
vmovaps 0x1d0(%rsp), %xmm7
vmovaps 0x1c0(%rsp), %xmm5
vmovaps 0x1b0(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovups 0x300(%rsp), %ymm31
vmovups 0x2e0(%rsp), %ymm28
jmp 0x1c608a7
testb $0x1, %dil
movq 0xa8(%rsp), %rcx
jne 0x1c61d59
vmovups 0x2c0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r13d
setne 0xb(%rsp)
jne 0x1c601e2
movb 0xb(%rsp), %al
andb $0x1, %al
addq $0x348, %rsp # imm = 0x348
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 8>::occluded_n<embree::avx512::OrientedCurve1IntersectorK<embree::BSplineCurveT, 8>, embree::avx512::Occluded1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x580, %rsp # imm = 0x580
movq %r8, %r10
movq %rdx, %r9
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%r9,4), %xmm1
vmovss 0x80(%r11,%r9,4), %xmm2
vinsertps $0x10, 0x20(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x88(%rsp)
vinsertps $0x20, 0xc0(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2aa43b(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2b8c04(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2b8b73(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x288c8a(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x284378(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r11,%r9,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2b7a88(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r11,%r9,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2b7a63(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2f245f(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x3c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0x1f(%rsp)
je 0x1c6a2f1
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
leaq (%r9,%r9,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x20, %r13
movl $0x1, %eax
shlxl %r9d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x284657(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b8991(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b8983(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq 0x88(%rsp), %rdi
movq %r10, 0xb8(%rsp)
movq %r9, 0xb0(%rsp)
movq %r11, 0xa8(%rsp)
tzcntq %r12, %rax
movl 0x2(%r10), %edx
movl 0x6(%r10,%rax,4), %eax
movq (%rdi), %rcx
movq 0x1e8(%rcx), %rcx
movq %rdx, 0xc0(%rsp)
movq (%rcx,%rdx,8), %rdi
movq 0x58(%rdi), %rcx
movq 0x68(%rdi), %rdx
movq %rdx, %rsi
imulq %rax, %rsi
movl (%rcx,%rsi), %r10d
movq 0xa0(%rdi), %rsi
movq %rsi, %r8
imulq %r10, %r8
leaq 0x1(%r10), %rbx
leaq 0x2(%r10), %r11
leaq 0x3(%r10), %r9
movq 0xd8(%rdi), %r14
imulq %r14, %r10
movq 0xc8(%rdi), %r15
vmovups (%r15,%r10), %xmm5
movq %rsi, %r10
imulq %rbx, %r10
imulq %r14, %rbx
vmovups (%r15,%rbx), %xmm6
movq %rsi, %rbx
imulq %r11, %rbx
imulq %r14, %r11
vmovups (%r15,%r11), %xmm7
imulq %r9, %r14
vmovups (%r15,%r14), %xmm8
movq %rsi, %r11
imulq %r9, %r11
movq 0x90(%rdi), %rdi
vmovaps (%rdi,%r8), %xmm9
vmovaps (%rdi,%r10), %xmm10
movq 0xb8(%rsp), %r10
vmovaps (%rdi,%rbx), %xmm11
blsrq %r12, %r12
vmovaps (%rdi,%r11), %xmm4
movq %r12, %r8
subq $0x1, %r8
jb 0x1c6867a
andq %r12, %r8
tzcntq %r12, %r9
movl 0x6(%r10,%r9,4), %r9d
imulq %rdx, %r9
movl (%rcx,%r9), %r9d
imulq %rsi, %r9
prefetcht0 (%rdi,%r9)
prefetcht0 0x40(%rdi,%r9)
testq %r8, %r8
je 0x1c6867a
tzcntq %r8, %r8
movl 0x6(%r10,%r8,4), %r8d
imulq %r8, %rdx
movl (%rcx,%rdx), %ecx
imulq %rcx, %rsi
prefetcht1 (%rdi,%rsi)
prefetcht1 0x40(%rdi,%rsi)
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r11
vmovss (%r11,%r9,4), %xmm0
vinsertps $0x1c, 0x20(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmulps %xmm31, %xmm4, %xmm1
vbroadcastss 0x288951(%rip), %xmm14 # 0x1ef1000
vmovaps %xmm14, %xmm2
vfmadd213ps %xmm1, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm2) + xmm1
vbroadcastss 0x2b8823(%rip), %xmm15 # 0x1f20ee4
vfmadd231ps %xmm15, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm15) + xmm2
vfmadd231ps %xmm14, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm14) + xmm2
vfmadd231ps %xmm17, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm17) + xmm1
vfnmadd231ps %xmm31, %xmm10, %xmm1 # xmm1 = -(xmm10 * xmm31) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vmovaps %xmm14, %xmm13
vfmadd213ps %xmm12, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm13) + xmm12
vfmadd231ps %xmm15, %xmm6, %xmm13 # xmm13 = (xmm6 * xmm15) + xmm13
vfmadd231ps %xmm14, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm14) + xmm13
vfmadd231ps %xmm17, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm17) + xmm12
vfnmadd231ps %xmm31, %xmm6, %xmm12 # xmm12 = -(xmm6 * xmm31) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vmulps %xmm4, %xmm14, %xmm3
vfmadd231ps %xmm15, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm15) + xmm3
vfmadd231ps %xmm14, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm14) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfmadd231ps %xmm11, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm11) + xmm4
vfnmadd231ps %xmm10, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vmulps %xmm14, %xmm8, %xmm9
vfmadd231ps %xmm15, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm15) + xmm9
vfmadd231ps %xmm14, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm14) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfmadd231ps %xmm7, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm7) + xmm8
vfnmadd231ps %xmm6, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x283f41(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x283f36(%rip), %xmm16 # 0x1eec71c
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x2887c2(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vdpps $0x7f, %xmm5, %xmm5, %xmm9
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vmovss %xmm9, %xmm31, %xmm8 # xmm8 = xmm9[0],xmm31[1,2,3]
vrsqrt14ss %xmm8, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm9, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vaddss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm11
vdpps $0x7f, %xmm6, %xmm5, %xmm13
vbroadcastss %xmm9, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm13, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm8, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm9 # xmm9 = -(xmm6 * xmm9) + xmm17
vmulss %xmm6, %xmm9, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm13
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm11, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm11, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x289599(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm13, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm13, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x330(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%r13), %xmm4
vmovaps 0x10(%r13), %xmm5
vmovaps 0x20(%r13), %xmm6
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x320(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm7
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x310(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm6, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm7) + xmm3
vsubps %xmm0, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm7
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x300(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) + xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vsubps %xmm0, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm7
vshufps $0x55, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2f0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm8, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm8) + xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm7
vshufps $0x55, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2e0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm11
vfmadd231ps %xmm8, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm8) + xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm7
vshufps $0x55, %xmm12, %xmm12, %xmm8 # xmm8 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2d0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm6, %xmm12, %xmm12
vfmadd231ps %xmm8, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm8) + xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vsubps %xmm0, %xmm16, %xmm8
vbroadcastss %xmm8, %xmm0
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x2c0(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm6
vfmadd231ps %xmm7, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm7) + xmm6
vfmadd231ps %xmm0, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm0) + xmm6
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm24 # xmm24 = xmm3[0],xmm12[0]
vmovlhps %xmm6, %xmm9, %xmm25 # xmm25 = xmm9[0],xmm6[0]
vminps %xmm23, %xmm8, %xmm0
vmaxps %xmm23, %xmm8, %xmm4
vminps %xmm25, %xmm24, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm25, %xmm24, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vshufpd $0x3, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,1]
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm7, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x28938e(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm7 # xmm7 = xmm1[0,0]
vmovddup %xmm2, %xmm18 # xmm18 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm9, %xmm2 # xmm2 = xmm9[0,0]
vmovddup %xmm10, %xmm3 # xmm3 = xmm10[0,0]
vmovddup %xmm11, %xmm9 # xmm9 = xmm11[0,0]
vmovddup %xmm12, %xmm10 # xmm10 = xmm12[0,0]
vmovddup %xmm6, %xmm11 # xmm11 = xmm6[0,0]
vmovaps %xmm0, 0x160(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
vpbroadcastd %eax, %ymm0
vmovdqa %ymm0, 0x3a0(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
vmovss 0x60(%r11,%r9,4), %xmm0
vmovss %xmm0, 0xcc(%rsp)
vmovaps %xmm8, 0xe0(%rsp)
vsubps %xmm8, %xmm23, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm23, 0x130(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm24, 0x120(%rsp)
vmovaps %xmm25, 0x170(%rsp)
vsubps %xmm24, %xmm25, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm13, 0x2b0(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps %xmm3, %xmm13
vmovaps %xmm2, %xmm12
vmovaps %xmm17, 0x270(%rsp)
vmovaps %xmm21, 0x250(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm20, 0x260(%rsp)
vmovaps %xmm22, 0x240(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm15, 0x290(%rsp)
vmovaps %xmm16, 0x280(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm1, %xmm16
movq 0xc0(%rsp), %r8
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
vmovsd 0x283a68(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x88(%rsp), %rdi
vmovaps %xmm7, 0x190(%rsp)
vmovaps %xmm18, 0x180(%rsp)
vmovaps %xmm1, 0x90(%rsp)
vmovaps %xmm2, 0x60(%rsp)
vmovaps %xmm3, 0x50(%rsp)
vmovaps %xmm9, 0x40(%rsp)
vmovaps %xmm10, 0x30(%rsp)
vmovaps %xmm11, 0x20(%rsp)
vmovaps %ymm29, 0x400(%rsp)
vmovaps %ymm28, 0x3e0(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x283a22(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm18, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm18) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2b81a2(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2a99ba(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2b815b(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2b8148(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2b8161(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x288198(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2f6ea8(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c69160
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c69183
movl %ebx, %eax
movl %ecx, 0x1a0(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm27, 0x420(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x2839f3(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b7d2d(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b7d1f(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x287e41(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2b7d2b(%rip), %xmm21 # 0x1f20ee0
vmovss 0x283555(%rip), %xmm22 # 0x1eec714
vmovss 0x287e37(%rip), %xmm23 # 0x1ef1000
vmovss 0x288879(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x283537(%rip), %xmm25 # 0x1eec714
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
testl %ebx, %ebx
je 0x1c6a2c7
leal -0x1(%rbx), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x1a0(%rsp,%rax,4), %ecx
vmovsd 0x420(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1a0(%rsp,%rax,4)
cmovel %eax, %ebx
vxorps %xmm2, %xmm2, %xmm2
vcvtsi2ss %rdx, %xmm2, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c6a29a
vmovaps %xmm27, %xmm6
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x180(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0xd0(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x70(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x288b2f(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x287584(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c69469
vmovss 0x288a5d(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c694ca
vmovss 0x288a4f(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x287558(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c694ca
vmovss 0x288a1d(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x287526(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c6a247
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2874eb(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x28250b(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x283661(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c69545
jnp 0x1c69584
vucomiss %xmm13, %xmm14
jne 0x1c69594
jp 0x1c69594
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2824b6(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x28360c(%rip), %xmm14 # 0x1eecb84
vmovss 0x283192(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c695b5
vmovaps 0xe0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
jmp 0x1c695cd
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xe0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0xd0(%rsp), %xmm27
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x2873e0(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x70(%rsp), %xmm14
jne 0x1c695fb
jnp 0x1c69665
vucomiss %xmm9, %xmm10
jne 0x1c6963a
jp 0x1c6963a
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x282400(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x283556(%rip), %xmm10 # 0x1eecb84
vmovss 0x2830dc(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c6965b
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r15b
vucomiss %xmm8, %xmm7
ja 0x1c6a1fc
vaddss 0x2f3db6(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2834e2(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm15, %xmm4 # xmm4 = xmm15[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vbroadcastss 0x2878a5(%rip), %xmm15 # 0x1ef0fec
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm7, %xmm15, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm15, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps %xmm17, %xmm0, %xmm9
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm16, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm16
vmovaps %xmm28, %xmm12
vfmadd213ps 0x130(%rsp), %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + mem
vmovaps %xmm29, %xmm13
vfmadd213ps 0x120(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm3, %xmm15, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2b7635(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2f6453(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2f10b3(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2b75a4(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2b753d(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c6a1ec
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x90(%rsp), %xmm16
jbe 0x1c69ace
testb %sil, %sil
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x28756e(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
je 0x1c69b09
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c69b09
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x287510(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c6a215
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c69bbd
decq %rax
jne 0x1c69b1a
jmp 0x1c6a218
vucomiss %xmm30, %xmm0
jb 0x1c6a218
vucomiss %xmm0, %xmm22
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
jb 0x1c69ced
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c69ced
vucomiss %xmm1, %xmm22
jb 0x1c69ced
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x330(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm7
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm7 # xmm7 = (xmm10 * xmm3) + xmm7
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm9, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm7, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm7) + xmm1
vucomiss 0xcc(%rsp), %xmm1
jb 0x1c69ced
vmovss 0x100(%r11,%r9,4), %xmm7
vucomiss %xmm1, %xmm7
jae 0x1c69d04
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jmp 0x1c6a23c
vmovss %xmm7, 0xc8(%rsp)
movq %r12, 0x148(%rsp)
movl %r14d, %r12d
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm25, %xmm8
vmulps 0x2a0(%rsp), %xmm7, %xmm9
vmulps 0x250(%rsp), %xmm7, %xmm10
vmulps 0x240(%rsp), %xmm7, %xmm11
vmulps 0x280(%rsp), %xmm7, %xmm7
vfmadd231ps 0x2b0(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x270(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x290(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps 0x28723e(%rip){1to4}, %xmm7, %xmm6 # 0x1ef0fec
movq (%rdi), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r14
movl 0x120(%r11,%r9,4), %eax
testl %eax, 0x34(%r14)
je 0x1c6a1c8
movq 0x10(%rdi), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jne 0x1c69df8
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c6a1dc
vbroadcastss %xmm5, %xmm5
vmulps 0x200(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x210(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x220(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x230(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%rdi), %rax
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x2a88ac(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm0
vpermps %ymm2, %ymm4, %ymm4
vbroadcastss 0x2b7071(%rip), %ymm5 # 0x1f20edc
vpermps %ymm2, %ymm5, %ymm5
vbroadcastss %xmm2, %ymm2
vmovaps %ymm4, 0x440(%rsp)
vmovaps %ymm5, 0x460(%rsp)
vmovaps %ymm2, 0x480(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x500(%rsp)
leaq 0x520(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x540(%rsp)
vmovss %xmm1, 0x100(%r11,%r9,4)
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
leaq 0x1e0(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x1b8(%rsp)
movq 0x8(%rdi), %rax
movq %rax, 0x1c0(%rsp)
movq %r11, 0x1c8(%rsp)
leaq 0x440(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x8, 0x1d8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm15, 0x150(%rsp)
je 0x1c6a065
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x282725(%rip), %xmm25 # 0x1eec714
vmovss 0x287a53(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x286ffd(%rip), %xmm23 # 0x1ef1000
vmovss 0x282707(%rip), %xmm22 # 0x1eec714
vmovss 0x2b6ec9(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x286fcb(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b6e95(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b6e8f(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x282b41(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c6a278
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c6a18a
testb $0x2, (%rcx)
jne 0x1c6a09a
testb $0x40, 0x3e(%r14)
je 0x1c6a18a
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x282600(%rip), %xmm25 # 0x1eec714
vmovss 0x28792e(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x286ed8(%rip), %xmm23 # 0x1ef1000
vmovss 0x2825e2(%rip), %xmm22 # 0x1eec714
vmovss 0x2b6da4(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x286ea6(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b6d70(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b6d6a(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x282a1c(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x1c8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x2829d1(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %al
jmp 0x1c6a27a
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
movl %r12d, %r14d
orb %al, %r14b
movq 0x148(%rsp), %r12
jmp 0x1c6a23c
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x90(%rsp), %xmm16
jmp 0x1c6a218
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
jmp 0x1c69ced
xorl %r15d, %r15d
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm14
testb %r15b, %r15b
jne 0x1c6921b
jmp 0x1c6a29a
movb $0x1, %r15b
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps %xmm27, %xmm15
vmovaps 0xd0(%rsp), %xmm27
jmp 0x1c6a236
xorl %eax, %eax
testb %al, %al
jne 0x1c6a1dc
vmovss 0xc8(%rsp), %xmm0
vmovss %xmm0, 0x100(%r11,%r9,4)
jmp 0x1c6a1dc
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x190(%rsp), %xmm7
vmovaps 0x180(%rsp), %xmm18
vmovaps 0x400(%rsp), %ymm29
vmovaps 0x3e0(%rsp), %ymm28
jmp 0x1c68cdc
testb $0x1, %r14b
jne 0x1c6a2f1
vmovaps 0x3c0(%rsp), %ymm0
vcmpleps 0x100(%r11,%r9,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r12d
setne 0x1f(%rsp)
jne 0x1c68563
movb 0x1f(%rsp), %al
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 16>::intersect_n<embree::avx512::OrientedCurve1IntersectorK<embree::BSplineCurveT, 16>, embree::avx512::Intersect1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayHitK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_n(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x740, %rsp # imm = 0x740
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r11
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rdx
vbroadcastss 0x12(%r8,%rdx), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x100(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x40(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rdx), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r8,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2a82ec(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2b6aba(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2b6a2f(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x286b41(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x28222f(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r9
subq %rcx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc0(%r11,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2b593b(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x200(%r11,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2b5913(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2f0315(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x380(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c6c4fe
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r9d
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %rbx
addq $0x40, %rbx
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x400(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x282513(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b684d(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b683f(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq %r8, 0xa8(%rsp)
movq %r10, 0xa0(%rsp)
movq %r11, 0x98(%rsp)
tzcntq %r9, %rax
movl 0x2(%r8), %ecx
movl 0x6(%r8,%rax,4), %edi
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0xb0(%rsp)
movq (%rax,%rcx,8), %rsi
movq 0x58(%rsi), %rax
movq 0x68(%rsi), %rcx
movq %rcx, %rdx
movq %rdi, 0xe8(%rsp)
imulq %rdi, %rdx
movq %r9, %r13
movl (%rax,%rdx), %r9d
movq 0xa0(%rsi), %rdx
movq %rdx, %rdi
imulq %r9, %rdi
leaq 0x1(%r9), %r11
leaq 0x2(%r9), %r10
leaq 0x3(%r9), %r8
movq 0xd8(%rsi), %r14
imulq %r14, %r9
movq 0xc8(%rsi), %r12
vmovups (%r12,%r9), %xmm5
movq %rdx, %r9
imulq %r11, %r9
imulq %r14, %r11
vmovups (%r12,%r11), %xmm6
movq %rdx, %r11
imulq %r10, %r11
imulq %r14, %r10
vmovups (%r12,%r10), %xmm7
imulq %r8, %r14
vmovups (%r12,%r14), %xmm8
movq %rdx, %r10
imulq %r8, %r10
movq 0x90(%rsi), %rsi
vmovaps (%rsi,%rdi), %xmm9
vmovaps (%rsi,%r9), %xmm10
movq 0xa8(%rsp), %r8
vmovaps (%rsi,%r11), %xmm11
blsrq %r13, %r9
vmovaps (%rsi,%r10), %xmm4
movq %r9, %rdi
subq $0x1, %rdi
jb 0x1c6a7c7
andq %r9, %rdi
movq %r9, %r10
tzcntq %r9, %r9
movl 0x6(%r8,%r9,4), %r9d
imulq %rcx, %r9
movl (%rax,%r9), %r9d
imulq %rdx, %r9
prefetcht0 (%rsi,%r9)
prefetcht0 0x40(%rsi,%r9)
movq %r10, %r9
testq %rdi, %rdi
je 0x1c6a7c7
tzcntq %rdi, %rdi
movl 0x6(%r8,%rdi,4), %edi
imulq %rdi, %rcx
movl (%rax,%rcx), %eax
imulq %rax, %rdx
prefetcht1 (%rsi,%rdx)
prefetcht1 0x40(%rsi,%rdx)
movq 0x98(%rsp), %r11
vmovss (%r11,%r15,4), %xmm0
vinsertps $0x1c, 0x40(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovss 0xc0(%r11,%r15,4), %xmm1
vmovss %xmm1, 0xbc(%rsp)
vmulps %xmm31, %xmm4, %xmm1
vbroadcastss 0x2867f6(%rip), %xmm14 # 0x1ef1000
vmovaps %xmm14, %xmm2
vfmadd213ps %xmm1, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm2) + xmm1
vbroadcastss 0x2b66c8(%rip), %xmm15 # 0x1f20ee4
vfmadd231ps %xmm15, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm15) + xmm2
vfmadd231ps %xmm14, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm14) + xmm2
vfmadd231ps %xmm17, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm17) + xmm1
vfnmadd231ps %xmm31, %xmm10, %xmm1 # xmm1 = -(xmm10 * xmm31) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vmovaps %xmm14, %xmm13
vfmadd213ps %xmm12, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm13) + xmm12
vfmadd231ps %xmm15, %xmm6, %xmm13 # xmm13 = (xmm6 * xmm15) + xmm13
vfmadd231ps %xmm14, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm14) + xmm13
vfmadd231ps %xmm17, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm17) + xmm12
vfnmadd231ps %xmm31, %xmm6, %xmm12 # xmm12 = -(xmm6 * xmm31) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vmulps %xmm4, %xmm14, %xmm3
vfmadd231ps %xmm15, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm15) + xmm3
vfmadd231ps %xmm14, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm14) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfmadd231ps %xmm11, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm11) + xmm4
vfnmadd231ps %xmm10, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vmulps %xmm14, %xmm8, %xmm9
vfmadd231ps %xmm15, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm15) + xmm9
vfmadd231ps %xmm14, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm14) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfmadd231ps %xmm7, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm7) + xmm8
vfnmadd231ps %xmm6, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x281de6(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x281ddb(%rip), %xmm16 # 0x1eec71c
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vaddss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x286667(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vdpps $0x7f, %xmm5, %xmm5, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vaddss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm6, %xmm5, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm11, %xmm11
vmulps %xmm5, %xmm11, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm9, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm8 # xmm8 = -(xmm6 * xmm8) + xmm17
vmulss %xmm6, %xmm8, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm14
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm15
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm13, %xmm6
vsubps %xmm6, %xmm3, %xmm16
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm13, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm13
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x28743e(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm14, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm16, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm15, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm13, %xmm22
vsubps %xmm0, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps (%rbx), %xmm4
vmovaps 0x10(%rbx), %xmm5
vmovaps %xmm1, 0x310(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps 0x20(%rbx), %xmm7
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x300(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm6, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm6) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm6
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x2f0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm7, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm6) + xmm3
vsubps %xmm0, %xmm16, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vmovaps %xmm6, 0x2e0(%rsp)
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm8) + xmm6
vsubps %xmm0, %xmm15, %xmm10
vbroadcastss %xmm10, %xmm8
vshufps $0x55, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2d0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm9) + xmm10
vfmadd231ps %xmm8, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm8) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm8
vshufps $0x55, %xmm11, %xmm11, %xmm9 # xmm9 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2c0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm11, %xmm11
vfmadd231ps %xmm9, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm9) + xmm11
vfmadd231ps %xmm8, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm8) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm8
vshufps $0x55, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2b0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm7, %xmm12, %xmm12
vfmadd231ps %xmm9, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm9) + xmm12
vfmadd231ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm8) + xmm12
vsubps %xmm0, %xmm13, %xmm9
vbroadcastss %xmm9, %xmm0
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2a0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vfmadd231ps %xmm8, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm8) + xmm7
vfmadd231ps %xmm0, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm0) + xmm7
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm23 # xmm23 = xmm3[0],xmm12[0]
vmovlhps %xmm7, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm7[0]
vminps %xmm9, %xmm8, %xmm0
vmaxps %xmm9, %xmm8, %xmm4
vminps %xmm24, %xmm23, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm24, %xmm23, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vminps %xmm5, %xmm0, %xmm0
vshufpd $0x3, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1]
vmaxps %xmm5, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x28723c(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm18 # xmm18 = xmm1[0,0]
vmovddup %xmm2, %xmm25 # xmm25 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm10, %xmm10 # xmm10 = xmm10[0,0]
vmovddup %xmm11, %xmm11 # xmm11 = xmm11[0,0]
vmovddup %xmm12, %xmm12 # xmm12 = xmm12[0,0]
vmovddup %xmm7, %xmm2 # xmm2 = xmm7[0,0]
vmovaps %xmm0, 0x190(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r14d, %r14d
vmovaps %xmm8, 0xd0(%rsp)
vsubps %xmm8, %xmm9, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm9, 0x130(%rsp)
vsubps %xmm9, %xmm23, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm23, 0x120(%rsp)
vmovaps %xmm24, 0x1a0(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm14, 0x290(%rsp)
vmovaps %xmm15, 0x280(%rsp)
vsubps %xmm14, %xmm15, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm17, 0x250(%rsp)
vmovaps %xmm21, 0x230(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm20, 0x240(%rsp)
vmovaps %xmm22, 0x220(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm16, 0x270(%rsp)
vmovaps %xmm13, 0x260(%rsp)
vsubps %xmm16, %xmm13, %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm2, %xmm13
vmovaps %xmm1, %xmm16
movq 0xb0(%rsp), %rdi
vpbroadcastd %edi, %zmm0
vmovdqa64 %zmm0, 0x480(%rsp)
movq 0xe8(%rsp), %rax
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x440(%rsp)
vmovsd 0x281925(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0xa0(%rsp), %r10
vmovaps %xmm18, 0x1c0(%rsp)
vmovaps %xmm25, 0x1b0(%rsp)
vmovaps %xmm1, 0x80(%rsp)
vmovaps %xmm6, 0x50(%rsp)
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm11, 0x30(%rsp)
vmovaps %xmm12, 0x20(%rsp)
vmovaps %xmm2, 0x60(%rsp)
vmovaps %ymm29, 0x3c0(%rsp)
vmovaps %ymm28, 0x3a0(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x2818e0(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps %xmm18, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm18) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2b605f(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2a7877(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2b6018(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2b6005(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2b601e(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x286055(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2f4d65(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c6b2a3
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c6b2c5
movl %r14d, %eax
movl %ecx, 0x1d0(%rsp,%rax,4)
vmovlps %xmm0, 0x320(%rsp,%rax,8)
vmovlps %xmm27, 0x3e0(%rsp,%rax,8)
incl %r14d
vbroadcastss 0x2818b1(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b5beb(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b5bdd(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x285cff(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2b5be9(%rip), %xmm21 # 0x1f20ee0
vmovss 0x281413(%rip), %xmm22 # 0x1eec714
vmovss 0x285cf5(%rip), %xmm23 # 0x1ef1000
vmovss 0x286737(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x2813f5(%rip), %xmm25 # 0x1eec714
vmovaps 0x80(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
testl %r14d, %r14d
je 0x1c6c4dc
leal -0x1(%r14), %eax
vmovss 0x320(%rsp,%rax,8), %xmm0
vmovss 0x324(%rsp,%rax,8), %xmm1
movl 0x1d0(%rsp,%rax,4), %ecx
vmovsd 0x3e0(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1d0(%rsp,%rax,4)
cmovel %eax, %r14d
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c6c4b0
vmovaps %xmm27, %xmm7
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %r14d
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps 0x1c0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x1b0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm7, 0xc0(%rsp)
vbroadcastss %xmm7, %xmm6
vmovaps %xmm14, 0x70(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2869e9(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x28543e(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c6b5af
vmovss 0x286917(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c6b610
vmovss 0x286909(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x285412(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c6b610
vmovss 0x2868d7(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2853e0(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c6c073
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x2853a5(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x2803c5(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x28151b(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c6b68b
jnp 0x1c6b6ca
vucomiss %xmm13, %xmm14
jne 0x1c6b6da
jp 0x1c6b6da
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x280370(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x2814c6(%rip), %xmm14 # 0x1eecb84
vmovss 0x28104c(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c6b6fb
vmovaps 0xd0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
jmp 0x1c6b713
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xd0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0xc0(%rsp), %xmm27
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x28529a(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x70(%rsp), %xmm14
jne 0x1c6b741
jnp 0x1c6b7ab
vucomiss %xmm9, %xmm10
jne 0x1c6b780
jp 0x1c6b780
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2802ba(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x281410(%rip), %xmm10 # 0x1eecb84
vmovss 0x280f96(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c6b7a1
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r13b
vucomiss %xmm8, %xmm7
ja 0x1c6c016
vaddss 0x2f1c70(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x28139c(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm15, %xmm4 # xmm4 = xmm15[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vbroadcastss 0x28575f(%rip), %xmm15 # 0x1ef0fec
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm7, %xmm15, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm15, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps %xmm17, %xmm0, %xmm9
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm16, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm16
vmovaps %xmm28, %xmm12
vfmadd213ps 0x130(%rsp), %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + mem
vmovaps %xmm29, %xmm13
vfmadd213ps 0x120(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm3, %xmm15, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2b54ef(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2f430d(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2eef6d(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2b545e(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2b53f7(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c6c006
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x80(%rsp), %xmm16
jbe 0x1c6bc14
testb %sil, %sil
vmovss 0x28542e(%rip), %xmm12 # 0x1ef0fec
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm11
vmovaps 0x1a0(%rsp), %xmm13
vmovaps 0x190(%rsp), %xmm14
je 0x1c6bc4f
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c6bc4f
vmovss 0x2853d0(%rip), %xmm12 # 0x1ef0fec
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0xd0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm11
vmovaps 0x1a0(%rsp), %xmm13
vmovaps 0x190(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c6c03e
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm12, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c6bd03
decq %rax
jne 0x1c6bc60
jmp 0x1c6c041
vucomiss %xmm30, %xmm0
jb 0x1c6c041
vucomiss %xmm0, %xmm22
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
jb 0x1c6bfec
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c6bfec
vucomiss %xmm1, %xmm22
jb 0x1c6bfec
vmovss 0x8(%rbx), %xmm2
vinsertps $0x1c, 0x18(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2a0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm2
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm6) + xmm2
vsubss %xmm0, %xmm22, %xmm7
vmulss %xmm7, %xmm7, %xmm4
vmulss %xmm4, %xmm7, %xmm3
vmulss %xmm0, %xmm12, %xmm5
vmulss %xmm4, %xmm5, %xmm4
vmulps %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulss %xmm2, %xmm6, %xmm2
vfmadd231ss %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm9) + xmm2
vfmadd231ss %xmm8, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm8) + xmm2
vfmadd231ss %xmm11, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm11) + xmm2
vucomiss 0xbc(%rsp), %xmm2
jb 0x1c6bfec
vmovss 0x200(%r11,%r15,4), %xmm8
vucomiss %xmm2, %xmm8
jb 0x1c6bfec
vmovss %xmm8, 0xb8(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,1,1,1]
vsubps %xmm8, %xmm25, %xmm9
vmulps 0x280(%rsp), %xmm8, %xmm10
vmulps 0x230(%rsp), %xmm8, %xmm11
vmulps 0x220(%rsp), %xmm8, %xmm12
vmulps 0x260(%rsp), %xmm8, %xmm8
vfmadd231ps 0x290(%rsp), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10
vfmadd231ps 0x250(%rsp), %xmm9, %xmm11 # xmm11 = (xmm9 * mem) + xmm11
vfmadd231ps 0x240(%rsp), %xmm9, %xmm12 # xmm12 = (xmm9 * mem) + xmm12
vfmadd231ps 0x270(%rsp), %xmm9, %xmm8 # xmm8 = (xmm9 * mem) + xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm12, %xmm10
vsubps %xmm12, %xmm8, %xmm8
vbroadcastss %xmm0, %xmm11
vmulps %xmm10, %xmm11, %xmm12
vbroadcastss %xmm7, %xmm7
vfmadd231ps %xmm9, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm9) + xmm12
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm10, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm10) + xmm8
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm12, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm12) + xmm8
vmulps 0x285117(%rip){1to4}, %xmm8, %xmm7 # 0x1ef0fec
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %r12
movl 0x240(%r11,%r15,4), %eax
testl %eax, 0x34(%r12)
je 0x1c6bfec
vbroadcastss %xmm6, %xmm6
vmulps 0x1e0(%rsp), %xmm6, %xmm6
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x1f0(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x200(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[1,2,0,3]
vmulps %xmm5, %xmm3, %xmm3
vfmsub231ps %xmm4, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm4) - xmm3
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
jne 0x1c6c0a4
cmpq $0x0, 0x40(%r12)
jne 0x1c6c0a4
vmovss %xmm2, 0x200(%r11,%r15,4)
vextractps $0x1, %xmm3, 0x300(%r11,%r15,4)
vextractps $0x2, %xmm3, 0x340(%r11,%r15,4)
vmovss %xmm3, 0x380(%r11,%r15,4)
vmovss %xmm0, 0x3c0(%r11,%r15,4)
vmovss %xmm1, 0x400(%r11,%r15,4)
movq 0xe8(%rsp), %rax
movl %eax, 0x440(%r11,%r15,4)
movl %edi, 0x480(%r11,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r11,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r11,%r15,4)
jmp 0x1c6c065
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
jmp 0x1c6c065
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x80(%rsp), %xmm16
jmp 0x1c6c041
vmovaps 0x80(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
jmp 0x1c6c065
xorl %r13d, %r13d
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
testb %r13b, %r13b
jne 0x1c6b35d
jmp 0x1c6c4b0
movb $0x1, %r13b
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps %xmm27, %xmm15
vmovaps 0xc0(%rsp), %xmm27
jmp 0x1c6c05f
movq 0x8(%r10), %rax
vbroadcastss %xmm0, %zmm1
vbroadcastss 0x2a664c(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm0
vpermps %zmm3, %zmm4, %zmm4
vbroadcastss 0x2b4e0e(%rip), %zmm5 # 0x1f20edc
vpermps %zmm3, %zmm5, %zmm5
vbroadcastss %xmm3, %zmm3
vmovaps %zmm4, 0x4c0(%rsp)
vmovaps %zmm5, 0x500(%rsp)
vmovaps %zmm3, 0x540(%rsp)
vmovaps %zmm1, 0x580(%rsp)
vmovaps %zmm0, 0x5c0(%rsp)
vmovaps 0x440(%rsp), %zmm0
vmovaps %zmm0, 0x600(%rsp)
vmovdqa64 0x480(%rsp), %zmm0
vmovdqa64 %zmm0, 0x640(%rsp)
leaq 0x680(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x680(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x6c0(%rsp)
vmovss %xmm2, 0x200(%r11,%r15,4)
vmovaps 0x400(%rsp), %zmm0
vmovaps %zmm0, 0x340(%rsp)
leaq 0x340(%rsp), %rax
movq %rax, 0x150(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x158(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x160(%rsp)
movq %r11, 0x168(%rsp)
leaq 0x4c0(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x10, 0x178(%rsp)
movq 0x40(%r12), %rax
testq %rax, %rax
vmovaps %xmm15, 0x180(%rsp)
je 0x1c6c2c9
leaq 0x150(%rsp), %rdi
movq %r9, 0x148(%rsp)
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm27
vmovaps 0x180(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x80(%rsp), %xmm16
movq 0xb0(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2857ef(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x284d99(%rip), %xmm23 # 0x1ef1000
vmovss 0x2804a3(%rip), %xmm22 # 0x1eec714
vmovss 0x2b4c65(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x284d67(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b4c31(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b4c2b(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2808dd(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq 0x148(%rsp), %r9
movq 0x98(%rsp), %r11
movq 0xa0(%rsp), %r10
movq 0xa8(%rsp), %r8
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c6c48e
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c6c3e7
testb $0x2, (%rcx)
jne 0x1c6c303
testb $0x40, 0x3e(%r12)
je 0x1c6c3e7
leaq 0x150(%rsp), %rdi
movq %r9, %r12
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm27
vmovaps 0x180(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x20(%rsp), %xmm12
vmovaps 0x30(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm6
vmovaps 0x80(%rsp), %xmm16
movq 0xb0(%rsp), %rdi
vxorps %xmm30, %xmm30, %xmm30
vmovss 0x2856cc(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x284c76(%rip), %xmm23 # 0x1ef1000
vmovss 0x280380(%rip), %xmm22 # 0x1eec714
vmovss 0x2b4b42(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x284c44(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b4b0e(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b4b08(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2807ba(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq %r12, %r9
movq 0x98(%rsp), %r11
movq 0xa0(%rsp), %r10
movq 0xa8(%rsp), %r8
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c6c48e
movq 0x168(%rsp), %rax
movq 0x170(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c6c4a1
vmovd 0xb8(%rsp), %xmm0
vmovd %xmm0, 0x200(%r11,%r15,4)
vbroadcastss 0x280269(%rip), %xmm25 # 0x1eec714
jmp 0x1c6c065
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x1c0(%rsp), %xmm18
vmovaps 0x1b0(%rsp), %xmm25
vmovaps 0x3c0(%rsp), %ymm29
vmovaps 0x3a0(%rsp), %ymm28
jmp 0x1c6ae1e
vmovaps 0x380(%rsp), %ymm0
vcmpleps 0x200(%r11,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r9d
jne 0x1c6a69f
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 16>::occluded_n<embree::avx512::OrientedCurve1IntersectorK<embree::BSplineCurveT, 16>, embree::avx512::Occluded1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_n(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
unsigned int vertexID = geom->curve(primID);
Vec3ff a0,a1,a2,a3; Vec3fa n0,n1,n2,n3; geom->gather(a0,a1,a2,a3,n0,n1,n2,n3,vertexID);
size_t mask1 = mask;
const size_t i1 = bscf(mask1);
if (mask) {
const unsigned int primID1 = prim.primID(N)[i1];
geom->prefetchL1_vertices(geom->curve(primID1));
if (mask1) {
const size_t i2 = bsf(mask1);
const unsigned int primID2 = prim.primID(N)[i2];
geom->prefetchL2_vertices(geom->curve(primID2));
}
}
if (Intersector().intersect(pre,ray,k,context,geom,primID,a0,a1,a2,a3,n0,n1,n2,n3,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0x740, %rsp # imm = 0x740
movq %r8, %r10
movq %rdx, %r9
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%r9,4), %xmm1
vmovss 0x100(%r11,%r9,4), %xmm2
vinsertps $0x10, 0x40(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%r11,%r9,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x88(%rsp)
vinsertps $0x20, 0x180(%r11,%r9,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2a60de(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2b48a7(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2b4816(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x28492d(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x28001b(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc0(%r11,%r9,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2b372b(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x200(%r11,%r9,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2b3703(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2ee0ff(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x380(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0x1f(%rsp)
je 0x1c6e657
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
leaq (%r9,%r9,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x40, %r13
movl $0x1, %eax
shlxl %r9d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x400(%rsp)
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x2802f8(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b4632(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b4624(%rip), %xmm19 # 0x1f20ec0
vxorps %xmm30, %xmm30, %xmm30
movq 0x88(%rsp), %rdi
movq %r10, 0xb8(%rsp)
movq %r9, 0xb0(%rsp)
movq %r11, 0xa8(%rsp)
tzcntq %r12, %rax
movl 0x2(%r10), %edx
movl 0x6(%r10,%rax,4), %eax
movq (%rdi), %rcx
movq 0x1e8(%rcx), %rcx
movq %rdx, 0xc0(%rsp)
movq (%rcx,%rdx,8), %rdi
movq 0x58(%rdi), %rcx
movq 0x68(%rdi), %rdx
movq %rdx, %rsi
imulq %rax, %rsi
movl (%rcx,%rsi), %r10d
movq 0xa0(%rdi), %rsi
movq %rsi, %r8
imulq %r10, %r8
leaq 0x1(%r10), %rbx
leaq 0x2(%r10), %r11
leaq 0x3(%r10), %r9
movq 0xd8(%rdi), %r14
imulq %r14, %r10
movq 0xc8(%rdi), %r15
vmovups (%r15,%r10), %xmm5
movq %rsi, %r10
imulq %rbx, %r10
imulq %r14, %rbx
vmovups (%r15,%rbx), %xmm6
movq %rsi, %rbx
imulq %r11, %rbx
imulq %r14, %r11
vmovups (%r15,%r11), %xmm7
imulq %r9, %r14
vmovups (%r15,%r14), %xmm8
movq %rsi, %r11
imulq %r9, %r11
movq 0x90(%rdi), %rdi
vmovaps (%rdi,%r8), %xmm9
vmovaps (%rdi,%r10), %xmm10
movq 0xb8(%rsp), %r10
vmovaps (%rdi,%rbx), %xmm11
blsrq %r12, %r12
vmovaps (%rdi,%r11), %xmm4
movq %r12, %r8
subq $0x1, %r8
jb 0x1c6c9d9
andq %r12, %r8
tzcntq %r12, %r9
movl 0x6(%r10,%r9,4), %r9d
imulq %rdx, %r9
movl (%rcx,%r9), %r9d
imulq %rsi, %r9
prefetcht0 (%rdi,%r9)
prefetcht0 0x40(%rdi,%r9)
testq %r8, %r8
je 0x1c6c9d9
tzcntq %r8, %r8
movl 0x6(%r10,%r8,4), %r8d
imulq %r8, %rdx
movl (%rcx,%rdx), %ecx
imulq %rcx, %rsi
prefetcht1 (%rdi,%rsi)
prefetcht1 0x40(%rdi,%rsi)
movq 0xb0(%rsp), %r9
movq 0xa8(%rsp), %r11
vmovss (%r11,%r9,4), %xmm0
vinsertps $0x1c, 0x40(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r11,%r9,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmulps %xmm31, %xmm4, %xmm1
vbroadcastss 0x2845ef(%rip), %xmm14 # 0x1ef1000
vmovaps %xmm14, %xmm2
vfmadd213ps %xmm1, %xmm11, %xmm2 # xmm2 = (xmm11 * xmm2) + xmm1
vbroadcastss 0x2b44c1(%rip), %xmm15 # 0x1f20ee4
vfmadd231ps %xmm15, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm15) + xmm2
vfmadd231ps %xmm14, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm14) + xmm2
vfmadd231ps %xmm17, %xmm11, %xmm1 # xmm1 = (xmm11 * xmm17) + xmm1
vfnmadd231ps %xmm31, %xmm10, %xmm1 # xmm1 = -(xmm10 * xmm31) + xmm1
vfnmadd231ps %xmm17, %xmm9, %xmm1 # xmm1 = -(xmm9 * xmm17) + xmm1
vmulps %xmm31, %xmm8, %xmm12
vmovaps %xmm14, %xmm13
vfmadd213ps %xmm12, %xmm7, %xmm13 # xmm13 = (xmm7 * xmm13) + xmm12
vfmadd231ps %xmm15, %xmm6, %xmm13 # xmm13 = (xmm6 * xmm15) + xmm13
vfmadd231ps %xmm14, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm14) + xmm13
vfmadd231ps %xmm17, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm17) + xmm12
vfnmadd231ps %xmm31, %xmm6, %xmm12 # xmm12 = -(xmm6 * xmm31) + xmm12
vfnmadd231ps %xmm17, %xmm5, %xmm12 # xmm12 = -(xmm5 * xmm17) + xmm12
vmulps %xmm4, %xmm14, %xmm3
vfmadd231ps %xmm15, %xmm11, %xmm3 # xmm3 = (xmm11 * xmm15) + xmm3
vfmadd231ps %xmm14, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm14) + xmm3
vfmadd231ps %xmm31, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm31) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfmadd231ps %xmm11, %xmm31, %xmm4 # xmm4 = (xmm31 * xmm11) + xmm4
vfnmadd231ps %xmm10, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm10) + xmm4
vfnmadd231ps %xmm9, %xmm31, %xmm4 # xmm4 = -(xmm31 * xmm9) + xmm4
vmulps %xmm14, %xmm8, %xmm9
vfmadd231ps %xmm15, %xmm7, %xmm9 # xmm9 = (xmm7 * xmm15) + xmm9
vfmadd231ps %xmm14, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm14) + xmm9
vfmadd231ps %xmm31, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm31) + xmm9
vmulps %xmm17, %xmm8, %xmm8
vfmadd231ps %xmm7, %xmm31, %xmm8 # xmm8 = (xmm31 * xmm7) + xmm8
vfnmadd231ps %xmm6, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm6) + xmm8
vfnmadd231ps %xmm5, %xmm31, %xmm8 # xmm8 = -(xmm31 * xmm5) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm6 # xmm6 = xmm13[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm13, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm13) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm7 # xmm7 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm1, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vmulps %xmm5, %xmm4, %xmm5
vfmsub231ps %xmm9, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm9) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm9 # xmm9 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm9, %xmm9
vfmsub231ps %xmm8, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm8) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm31, %xmm9 # xmm9 = xmm8[0],xmm31[1,2,3]
vrsqrt14ss %xmm9, %xmm31, %xmm11
vmovss 0x27fbdf(%rip), %xmm15 # 0x1eec718
vmulss %xmm15, %xmm11, %xmm12
vmovss 0x27fbd4(%rip), %xmm16 # 0x1eec71c
vmulss %xmm16, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vaddss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm12
vdpps $0x7f, %xmm10, %xmm7, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm14, %xmm10, %xmm10
vbroadcastss %xmm13, %xmm13
vmulps %xmm7, %xmm13, %xmm7
vsubps %xmm7, %xmm10, %xmm7
vrcp14ss %xmm9, %xmm31, %xmm9
vmovss 0x284460(%rip), %xmm17 # 0x1ef0ff8
vfnmadd213ss %xmm17, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm17
vmulss %xmm8, %xmm9, %xmm8
vdpps $0x7f, %xmm5, %xmm5, %xmm9
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vmovss %xmm9, %xmm31, %xmm8 # xmm8 = xmm9[0],xmm31[1,2,3]
vrsqrt14ss %xmm8, %xmm31, %xmm10
vmulss %xmm15, %xmm10, %xmm11
vmulss %xmm16, %xmm9, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vaddss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm5, %xmm10, %xmm11
vdpps $0x7f, %xmm6, %xmm5, %xmm13
vbroadcastss %xmm9, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm13, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm8, %xmm31, %xmm6
vfnmadd213ss %xmm17, %xmm6, %xmm9 # xmm9 = -(xmm6 * xmm9) + xmm17
vmulss %xmm6, %xmm9, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm12, %xmm8
vsubps %xmm8, %xmm2, %xmm13
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm12, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm14
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm11, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm11, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vbroadcastss 0x285237(%rip), %xmm4 # 0x1ef1ebc
vmulps %xmm4, %xmm7, %xmm3
vaddps %xmm3, %xmm13, %xmm17
vmulps %xmm4, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm20
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm4, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm13, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x310(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%r13), %xmm4
vmovaps 0x10(%r13), %xmm5
vmovaps 0x20(%r13), %xmm6
vmulps %xmm1, %xmm6, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm7 # xmm7 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x300(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm6, %xmm2
vfmadd231ps %xmm7, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm7) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm20, %xmm3
vbroadcastss %xmm3, %xmm7
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x2f0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm6, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm7) + xmm3
vsubps %xmm0, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm7
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2e0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm6, %xmm9, %xmm9
vfmadd231ps %xmm8, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm8) + xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vsubps %xmm0, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm7
vshufps $0x55, %xmm10, %xmm10, %xmm8 # xmm8 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2d0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm6, %xmm10, %xmm10
vfmadd231ps %xmm8, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm8) + xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm7
vshufps $0x55, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2c0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm6, %xmm11, %xmm11
vfmadd231ps %xmm8, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm8) + xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm7
vshufps $0x55, %xmm12, %xmm12, %xmm8 # xmm8 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2b0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm6, %xmm12, %xmm12
vfmadd231ps %xmm8, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm8) + xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vsubps %xmm0, %xmm16, %xmm8
vbroadcastss %xmm8, %xmm0
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x2a0(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm6, %xmm8, %xmm6
vfmadd231ps %xmm7, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm7) + xmm6
vfmadd231ps %xmm0, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm0) + xmm6
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm24 # xmm24 = xmm3[0],xmm12[0]
vmovlhps %xmm6, %xmm9, %xmm25 # xmm25 = xmm9[0],xmm6[0]
vminps %xmm23, %xmm8, %xmm0
vmaxps %xmm23, %xmm8, %xmm4
vminps %xmm25, %xmm24, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm25, %xmm24, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vshufpd $0x3, %xmm4, %xmm4, %xmm7 # xmm7 = xmm4[1,1]
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm7, %xmm4, %xmm4
vandps %xmm18, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
vmulss 0x28502c(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm7 # xmm7 = xmm1[0,0]
vmovddup %xmm2, %xmm18 # xmm18 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm9, %xmm2 # xmm2 = xmm9[0,0]
vmovddup %xmm10, %xmm3 # xmm3 = xmm10[0,0]
vmovddup %xmm11, %xmm9 # xmm9 = xmm11[0,0]
vmovddup %xmm12, %xmm10 # xmm10 = xmm12[0,0]
vmovddup %xmm6, %xmm11 # xmm11 = xmm6[0,0]
vmovaps %xmm0, 0x160(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm19, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
vpbroadcastd %eax, %zmm0
vmovdqa64 %zmm0, 0x480(%rsp)
xorl %r14d, %r14d
xorl %ebx, %ebx
vmovss 0xc0(%r11,%r9,4), %xmm0
vmovss %xmm0, 0xcc(%rsp)
vmovaps %xmm8, 0xe0(%rsp)
vsubps %xmm8, %xmm23, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm23, 0x130(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm24, 0x120(%rsp)
vmovaps %xmm25, 0x170(%rsp)
vsubps %xmm24, %xmm25, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm13, 0x290(%rsp)
vmovaps %xmm14, 0x280(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm3, %xmm13
vmovaps %xmm2, %xmm12
vmovaps %xmm17, 0x250(%rsp)
vmovaps %xmm21, 0x230(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm20, 0x240(%rsp)
vmovaps %xmm22, 0x220(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm15, 0x270(%rsp)
vmovaps %xmm16, 0x260(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm1, %xmm16
movq 0xc0(%rsp), %r8
vpbroadcastd %r8d, %zmm0
vmovdqa64 %zmm0, 0x440(%rsp)
vmovsd 0x27f705(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq 0x88(%rsp), %rdi
vmovaps %xmm7, 0x190(%rsp)
vmovaps %xmm18, 0x180(%rsp)
vmovaps %xmm1, 0x90(%rsp)
vmovaps %xmm2, 0x60(%rsp)
vmovaps %xmm3, 0x50(%rsp)
vmovaps %xmm9, 0x40(%rsp)
vmovaps %xmm10, 0x30(%rsp)
vmovaps %xmm11, 0x20(%rsp)
vmovaps %ymm29, 0x3c0(%rsp)
vmovaps %ymm28, 0x3a0(%rsp)
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x27f6bf(%rip), %ymm26 # 0x1eec714
vsubps %xmm1, %xmm26, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm18, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm18) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2b3e3f(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x2a5657(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vbroadcastss 0x2b3df8(%rip), %ymm13 # 0x1f20edc
vpermps %ymm3, %ymm13, %ymm19
vbroadcastss 0x2b3de5(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm13, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm13, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm13, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2b3dfe(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm26, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x283e35(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2f2b45(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c6d4c3
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c6d4e3
movl %ebx, %eax
movl %ecx, 0x1a0(%rsp,%rax,4)
vmovlps %xmm0, 0x320(%rsp,%rax,8)
vmovlps %xmm27, 0x3e0(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x27f693(%rip), %xmm17 # 0x1eecb80
vbroadcastss 0x2b39cd(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x2b39bf(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x283ae1(%rip), %ymm20 # 0x1ef0fec
vmovss 0x2b39cb(%rip), %xmm21 # 0x1f20ee0
vmovss 0x27f1f5(%rip), %xmm22 # 0x1eec714
vmovss 0x283ad7(%rip), %xmm23 # 0x1ef1000
vmovss 0x284519(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x27f1d7(%rip), %xmm25 # 0x1eec714
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
testl %ebx, %ebx
je 0x1c6e62a
leal -0x1(%rbx), %eax
vmovss 0x320(%rsp,%rax,8), %xmm0
vmovss 0x324(%rsp,%rax,8), %xmm1
movl 0x1a0(%rsp,%rax,4), %ecx
vmovsd 0x3e0(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1a0(%rsp,%rax,4)
cmovel %eax, %ebx
vxorps %xmm2, %xmm2, %xmm2
vcvtsi2ss %rdx, %xmm2, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vcvtsi2ss %rdx, %xmm31, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm27
vfmadd231ss %xmm4, %xmm0, %xmm27 # xmm27 = (xmm0 * xmm4) + xmm27
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm27, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c6e5fd
vmovaps %xmm27, %xmm6
vmovaps %xmm15, %xmm27
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x180(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0xd0(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x70(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm20, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2847d2(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x283227(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c6d7c6
vmovss 0x284700(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c6d827
vmovss 0x2846f2(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2831fb(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c6d827
vmovss 0x2846c0(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2831c9(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c6e5aa
vcmpltss %xmm30, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x28318e(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm30, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x27e1ae(%rip), %xmm7 # 0x1eeba20
vmovss %xmm30, %xmm7, %xmm7 {%k1}
vmovss 0x27f304(%rip), %xmm8 # 0x1eecb84
vmovss %xmm30, %xmm8, %xmm8 {%k1}
vcmpltss %xmm30, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c6d8a2
jnp 0x1c6d8e1
vucomiss %xmm13, %xmm14
jne 0x1c6d8f1
jp 0x1c6d8f1
vucomiss %xmm30, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x27e159(%rip), %xmm13 # 0x1eeba20
vmovss %xmm30, %xmm13, %xmm13 {%k1}
vmovss 0x27f2af(%rip), %xmm14 # 0x1eecb84
vmovss 0x27ee35(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c6d912
vmovaps 0xe0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
jmp 0x1c6d92a
vxorps %xmm19, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm30, %xmm13 # xmm13 = (xmm30 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xe0(%rsp), %xmm16
vmovaps %xmm27, %xmm15
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0xd0(%rsp), %xmm27
vcmpltss %xmm30, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x283083(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x70(%rsp), %xmm14
jne 0x1c6d958
jnp 0x1c6d9c2
vucomiss %xmm9, %xmm10
jne 0x1c6d997
jp 0x1c6d997
vucomiss %xmm30, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x27e0a3(%rip), %xmm9 # 0x1eeba20
vmovss %xmm30, %xmm9, %xmm9 {%k1}
vmovss 0x27f1f9(%rip), %xmm10 # 0x1eecb84
vmovss 0x27ed7f(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c6d9b8
vxorps %xmm19, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm30, %xmm9 # xmm9 = (xmm30 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r15b
vucomiss %xmm8, %xmm7
ja 0x1c6e55f
vaddss 0x2efa59(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x27f185(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm30, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm15, %xmm4 # xmm4 = xmm15[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vbroadcastss 0x283548(%rip), %xmm15 # 0x1ef0fec
vmulps %xmm2, %xmm15, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm7, %xmm15, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm15, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vinsertps $0x10, %xmm1, %xmm27, %xmm6 # xmm6 = xmm27[0],xmm1[0],xmm27[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps %xmm17, %xmm0, %xmm9
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm26, %xmm3
vfmadd213ps %xmm16, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm16
vmovaps %xmm28, %xmm12
vfmadd213ps 0x130(%rsp), %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + mem
vmovaps %xmm29, %xmm13
vfmadd213ps 0x120(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm3, %xmm15, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2b32d8(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2f20f6(%rip), %xmm30 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm30, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2ecd56(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2b3247(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2b31e0(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c6e54f
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm27, %xmm7
vmovaps 0x90(%rsp), %xmm16
jbe 0x1c6de2b
testb %sil, %sil
vmovss 0x283217(%rip), %xmm11 # 0x1ef0fec
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
je 0x1c6de66
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c6de66
vmovss 0x2831b9(%rip), %xmm11 # 0x1ef0fec
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0x130(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c6e578
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm18, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c6df1a
decq %rax
jne 0x1c6de77
jmp 0x1c6e57b
vucomiss %xmm30, %xmm0
jb 0x1c6e57b
vucomiss %xmm0, %xmm22
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm14
jb 0x1c6e04a
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm30, %xmm1
jb 0x1c6e04a
vucomiss %xmm1, %xmm22
jb 0x1c6e04a
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2a0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm7
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm7 # xmm7 = (xmm10 * xmm3) + xmm7
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm9, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm7, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm7) + xmm1
vucomiss 0xcc(%rsp), %xmm1
jb 0x1c6e04a
vmovss 0x200(%r11,%r9,4), %xmm7
vucomiss %xmm1, %xmm7
jae 0x1c6e061
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jmp 0x1c6e59f
vmovss %xmm7, 0xc8(%rsp)
movq %r12, 0x148(%rsp)
movl %r14d, %r12d
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm25, %xmm8
vmulps 0x280(%rsp), %xmm7, %xmm9
vmulps 0x230(%rsp), %xmm7, %xmm10
vmulps 0x220(%rsp), %xmm7, %xmm11
vmulps 0x260(%rsp), %xmm7, %xmm7
vfmadd231ps 0x290(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x250(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x240(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x270(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps 0x282ee1(%rip){1to4}, %xmm7, %xmm6 # 0x1ef0fec
movq (%rdi), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r14
movl 0x240(%r11,%r9,4), %eax
testl %eax, 0x34(%r14)
je 0x1c6e52b
movq 0x10(%rdi), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
jne 0x1c6e155
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c6e53f
vbroadcastss %xmm5, %xmm5
vmulps 0x1e0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x1f0(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x200(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x210(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%rdi), %rax
vbroadcastss %xmm0, %zmm3
vbroadcastss 0x2a454d(%rip), %zmm4 # 0x1f12704
vpermps %zmm0, %zmm4, %zmm0
vpermps %zmm2, %zmm4, %zmm4
vbroadcastss 0x2b2d0f(%rip), %zmm5 # 0x1f20edc
vpermps %zmm2, %zmm5, %zmm5
vbroadcastss %xmm2, %zmm2
vmovaps %zmm4, 0x4c0(%rsp)
vmovaps %zmm5, 0x500(%rsp)
vmovaps %zmm2, 0x540(%rsp)
vmovaps %zmm3, 0x580(%rsp)
vmovaps %zmm0, 0x5c0(%rsp)
vmovaps 0x480(%rsp), %zmm0
vmovaps %zmm0, 0x600(%rsp)
vmovdqa64 0x440(%rsp), %zmm0
vmovdqa64 %zmm0, 0x640(%rsp)
leaq 0x680(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0x680(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0x6c0(%rsp)
vmovss %xmm1, 0x200(%r11,%r9,4)
vmovaps 0x400(%rsp), %zmm0
vmovaps %zmm0, 0x340(%rsp)
leaq 0x340(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x1b8(%rsp)
movq 0x8(%rdi), %rax
movq %rax, 0x1c0(%rsp)
movq %r11, 0x1c8(%rsp)
leaq 0x4c0(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x10, 0x1d8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm15, 0x150(%rsp)
je 0x1c6e3c8
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x27e3c2(%rip), %xmm25 # 0x1eec714
vmovss 0x2836f0(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x282c9a(%rip), %xmm23 # 0x1ef1000
vmovss 0x27e3a4(%rip), %xmm22 # 0x1eec714
vmovss 0x2b2b66(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x282c68(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b2b32(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b2b2c(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x27e7de(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c6e5db
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c6e4f1
testb $0x2, (%rcx)
jne 0x1c6e401
testb $0x40, 0x3e(%r14)
je 0x1c6e4f1
leaq 0x1b0(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x70(%rsp), %xmm14
vmovaps 0xd0(%rsp), %xmm27
vmovaps 0x150(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm26
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x90(%rsp), %xmm16
movq 0xc0(%rsp), %r8
vxorps %xmm30, %xmm30, %xmm30
vbroadcastss 0x27e299(%rip), %xmm25 # 0x1eec714
vmovss 0x2835c7(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x282b71(%rip), %xmm23 # 0x1ef1000
vmovss 0x27e27b(%rip), %xmm22 # 0x1eec714
vmovss 0x2b2a3d(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x282b3f(%rip), %ymm20 # 0x1ef0fec
vbroadcastss 0x2b2a09(%rip), %xmm19 # 0x1f20ec0
vbroadcastss 0x2b2a03(%rip), %xmm18 # 0x1f20ec4
vbroadcastss 0x27e6b5(%rip), %xmm17 # 0x1eecb80
vxorps %xmm31, %xmm31, %xmm31
movq 0x88(%rsp), %rdi
movq 0xa8(%rsp), %r11
movq 0xb0(%rsp), %r9
movq 0xb8(%rsp), %r10
vmovdqa64 0x340(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x1c8(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x27e66c(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %al
jmp 0x1c6e5dd
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
movl %r12d, %r14d
orb %al, %r14b
movq 0x148(%rsp), %r12
jmp 0x1c6e59f
vxorps %xmm30, %xmm30, %xmm30
vmovaps 0x90(%rsp), %xmm16
jmp 0x1c6e57b
vmovaps 0x90(%rsp), %xmm16
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
jmp 0x1c6e04a
xorl %r15d, %r15d
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x70(%rsp), %xmm14
testb %r15b, %r15b
jne 0x1c6d57b
jmp 0x1c6e5fd
movb $0x1, %r15b
vmovaps 0x60(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm9
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps %xmm27, %xmm15
vmovaps 0xd0(%rsp), %xmm27
jmp 0x1c6e599
xorl %eax, %eax
testb %al, %al
jne 0x1c6e53f
vmovss 0xc8(%rsp), %xmm0
vmovss %xmm0, 0x200(%r11,%r9,4)
jmp 0x1c6e53f
vinsertps $0x10, %xmm14, %xmm27, %xmm0 # xmm0 = xmm27[0],xmm14[0],xmm27[2,3]
vmovaps 0x190(%rsp), %xmm7
vmovaps 0x180(%rsp), %xmm18
vmovaps 0x3c0(%rsp), %ymm29
vmovaps 0x3a0(%rsp), %ymm28
jmp 0x1c6d03f
testb $0x1, %r14b
jne 0x1c6e657
vmovaps 0x380(%rsp), %ymm0
vcmpleps 0x200(%r11,%r9,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %eax, %r12d
setne 0x1f(%rsp)
jne 0x1c6c8c2
movb 0x1f(%rsp), %al
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 4>::occluded_h<embree::avx512::SweepCurve1IntersectorK<embree::HermiteCurveT, 4>, embree::avx512::Occluded1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_h(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
if (Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x980, %rsp # imm = 0x980
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %r9
leaq (%r9,%r9,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%rdx,4), %xmm1
vmovss 0x40(%r11,%rdx,4), %xmm2
vinsertps $0x10, 0x10(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, %r14
vinsertps $0x20, 0x60(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%r9,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
addq %rax, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x29c5bd(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2aad86(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2aacf4(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x27ae09(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2764f3(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %r9
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%r8,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%r9), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x30(%r11,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2a9c05(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x80(%r11,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2a9be0(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2e45dc(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x7a0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %al
je 0x1c78ce3
kandb %k0, %k1, %k0
kmovd %k0, %ecx
movzbl %cl, %r15d
movl $0x1, %ecx
shlxl %edx, %ecx, %ecx
kmovd %ecx, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x550(%rsp)
movq %rdx, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %r8, 0x2f8(%rsp)
movq %r14, 0x28(%rsp)
tzcntq %r15, %rcx
movl 0x2(%r8), %r12d
movl 0x6(%r8,%rcx,4), %ebx
movq (%r14), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%r12,8), %r10
movq %rbx, %rcx
imulq 0x68(%r10), %rcx
movq 0x58(%r10), %rsi
movq 0x90(%r10), %rdi
movl (%rsi,%rcx), %ecx
movq 0xa0(%r10), %rsi
movq %rsi, %r9
imulq %rcx, %r9
vmovaps (%rdi,%r9), %xmm0
leaq 0x1(%rcx), %r9
imulq %r9, %rsi
vmovaps (%rdi,%rsi), %xmm1
movq 0x100(%r10), %rsi
movq 0x110(%r10), %rdi
imulq %rdi, %rcx
vmovss (%r11,%rdx,4), %xmm2
vinsertps $0x1c, 0x10(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
imulq %r9, %rdi
vbroadcastss 0x40(%r11,%rdx,4), %ymm12
vbroadcastss 0x50(%r11,%rdx,4), %ymm21
vunpcklps %xmm21, %xmm12, %xmm3 # xmm3 = xmm12[0],xmm21[0],xmm12[1],xmm21[1]
vbroadcastss 0x60(%r11,%rdx,4), %ymm15
vinsertps $0x28, %xmm15, %xmm3, %xmm9 # xmm9 = xmm3[0,1],xmm15[0],zero
vmovaps (%rsi,%rcx), %xmm3
vbroadcastss 0x2e701c(%rip), %xmm5 # 0x1f5d46c
vfnmadd132ps %xmm5, %xmm0, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm0
vmovaps (%rsi,%rdi), %xmm4
vfmadd132ps %xmm5, %xmm1, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm1
vaddps %xmm3, %xmm0, %xmm5
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm5
vmulps 0x2a6e87(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm2, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x30(%r11,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vpbroadcastd %ebx, %xmm6
vmovdqa %xmm6, 0x570(%rsp)
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x27ab41(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x390(%rsp)
vmovaps %ymm6, 0x660(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm6) + xmm2
vblendps $0x8, %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm8[3]
vsubps %xmm2, %xmm0, %xmm6
vsubps %xmm2, %xmm4, %xmm4
vsubps %xmm2, %xmm3, %xmm3
vsubps %xmm2, %xmm1, %xmm7
vbroadcastss %xmm6, %ymm8
vbroadcastss 0x29c205(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm9
vbroadcastss 0x2aa9cf(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm26
vbroadcastss 0x2aa9bc(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x4e0(%rsp)
vpermps %ymm6, %ymm2, %ymm25
vbroadcastss %xmm3, %ymm6
vpermps %ymm3, %ymm0, %ymm14
vpermps %ymm3, %ymm1, %ymm16
vmovaps %ymm3, 0x4a0(%rsp)
vpermps %ymm3, %ymm2, %ymm17
vbroadcastss %xmm4, %ymm18
vpermps %ymm4, %ymm0, %ymm19
vpermps %ymm4, %ymm1, %ymm20
vmovaps %ymm4, 0x4c0(%rsp)
vpermps %ymm4, %ymm2, %ymm22
vmovaps %ymm14, %ymm4
vbroadcastss %xmm7, %ymm23
vpermps %ymm7, %ymm0, %ymm24
vpermps %ymm7, %ymm1, %ymm30
vmovaps %ymm7, 0x480(%rsp)
vpermps %ymm7, %ymm2, %ymm27
vmovaps %ymm6, %ymm2
vmovaps %ymm15, 0x620(%rsp)
vmulss %xmm15, %xmm15, %xmm0
vfmadd231ps %ymm21, %ymm21, %ymm0 # ymm0 = (ymm21 * ymm21) + ymm0
vmovaps %ymm12, 0x640(%rsp)
vfmadd231ps %ymm12, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm12) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x7e0(%rsp)
vandps %ymm28, %ymm0, %ymm0
vmovaps %ymm0, 0x720(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x400(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x7c0(%rsp)
xorl %ebx, %ebx
xorl %r13d, %r13d
movl $0x1, %ecx
movq %rcx, 0x50(%rsp)
movq %r12, 0x308(%rsp)
vpbroadcastd %r12d, %xmm0
vmovdqa %xmm0, 0x560(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x11c(%rsp)
vmovaps %xmm11, 0x380(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x118(%rsp)
vmovsd 0x2760a9(%rip), %xmm5 # 0x1eec6f0
vbroadcastss 0x2760c4(%rip), %ymm3 # 0x1eec714
vmovaps %ymm21, 0x1e0(%rsp)
vmovaps %ymm8, 0x3a0(%rsp)
vmovaps %ymm9, 0x200(%rsp)
vmovaps %ymm26, 0x1c0(%rsp)
vmovaps %ymm25, 0x2c0(%rsp)
vmovaps %ymm6, 0x460(%rsp)
vmovaps %ymm14, 0x340(%rsp)
vmovaps %ymm16, 0x2a0(%rsp)
vmovaps %ymm17, 0x1a0(%rsp)
vmovaps %ymm18, 0x180(%rsp)
vmovaps %ymm19, 0x280(%rsp)
vmovaps %ymm20, 0x260(%rsp)
vmovaps %ymm22, 0x160(%rsp)
vmovaps %ymm23, 0x140(%rsp)
vmovaps %ymm24, 0x240(%rsp)
vmovaps %ymm30, 0x120(%rsp)
vmovaps %ymm27, 0x220(%rsp)
vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3]
vsubss %xmm5, %xmm0, %xmm1
vmulss 0x2aa7e4(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm5, 0x3e0(%rsp)
vbroadcastss %xmm5, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x60(%rsp)
vmovaps %ymm0, 0x360(%rsp)
vfmadd231ps 0x2aa809(%rip), %ymm0, %ymm5 # ymm5 = (ymm0 * mem) + ymm5
vsubps %ymm5, %ymm3, %ymm7
vmulps %ymm5, %ymm2, %ymm1
vmovaps %ymm2, %ymm14
vmulps %ymm5, %ymm4, %ymm2
vmovaps %ymm3, %ymm0
vmulps %ymm5, %ymm16, %ymm3
vmovaps %ymm4, %ymm15
vmulps %ymm5, %ymm17, %ymm4
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm26, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm26) + ymm3
vfmadd231ps %ymm25, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm25) + ymm4
vmulps %ymm5, %ymm18, %ymm8
vmulps %ymm5, %ymm19, %ymm9
vmulps %ymm5, %ymm20, %ymm10
vmulps %ymm5, %ymm22, %ymm11
vfmadd231ps %ymm14, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm14) + ymm8
vfmadd231ps %ymm15, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm15) + ymm9
vfmadd231ps %ymm16, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm17) + ymm11
vmulps %ymm5, %ymm23, %ymm12
vmulps %ymm5, %ymm24, %ymm13
vmulps %ymm5, %ymm30, %ymm14
vmulps %ymm5, %ymm27, %ymm15
vfmadd231ps %ymm18, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm18) + ymm12
vfmadd231ps %ymm19, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm19) + ymm13
vfmadd231ps %ymm20, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm20) + ymm14
vfmadd231ps %ymm22, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm22) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x27a79e(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x2e949f(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x279f70(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x279f6a(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0xa0(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x275d24(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x275d05(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0x3c0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmovaps 0x620(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x640(%rsp), %ymm23
vfmadd231ps %ymm27, %ymm23, %ymm22 # ymm22 = (ymm23 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm23, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm23) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm16
vmovaps 0x7e0(%rsp), %ymm15
vsubps %ymm16, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm23
vmovaps %ymm23, 0x80(%rsp)
vsubps %ymm1, %ymm23, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2760a6(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %edi
kortestb %k1, %k1
je 0x1c76be8
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x2aa391(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x6e0(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x6c0(%rsp)
vbroadcastss 0x274e9c(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x275ff1(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x2aa322(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm16, %ymm24
vandps %ymm0, %ymm16, %ymm23
vmovaps 0x720(%rsp), %ymm16
vmaxps %ymm23, %ymm16, %ymm23
vmulps 0x27b2ee(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
movq 0x50(%rsp), %r12
jne 0x1c78c25
vmovaps %ymm24, %ymm16
jmp 0x1c76c01
vbroadcastss 0x274e2e(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x275f88(%rip), %ymm31 # 0x1eecb84
movq 0x50(%rsp), %r12
andb $0x7f, %dil
je 0x1c7701e
vmovaps %ymm16, 0x520(%rsp)
vmovss 0x80(%r11,%rdx,4), %xmm0
vsubss 0x400(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x7c0(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x620(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1e0(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x640(%rsp), %ymm16
vfmadd231ps %ymm11, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm11) + ymm13
vbroadcastss 0x2aa23e(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x27a352(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2aa219(%rip), %ymm31 # 0x1f20ec0
vxorps %ymm31, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm31, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vbroadcastss 0x275a4b(%rip), %ymm30 # 0x1eec714
vfnmadd213ps %ymm30, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm30
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x275e94(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x274d11(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm31, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm31, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm30, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm30
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x500(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %dil
je 0x1c77033
vmovaps 0x6e0(%rsp), %ymm0
vmaxps 0xa0(%rsp), %ymm23, %ymm1
vminps %ymm30, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x6c0(%rsp), %ymm9
vminps %ymm30, %ymm9, %ymm9
vmovaps 0x2aa137(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x2a76a2(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x360(%rsp), %ymm13
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x6e0(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x6c0(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0x80(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x220(%rsp), %ymm27
je 0x1c770ca
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm30, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm30
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x2a9ff7(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x640(%rsp), %ymm22
vmulps %ymm12, %ymm22, %ymm0
vmovaps 0x1e0(%rsp), %ymm21
vmulps %ymm12, %ymm21, %ymm1
vmovaps 0x620(%rsp), %ymm25
vmulps %ymm12, %ymm25, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0x3c0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmovaps %ymm22, %ymm19
vmulps %ymm13, %ymm22, %ymm10
vmulps %ymm13, %ymm21, %ymm17
vmulps %ymm13, %ymm25, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x274a88(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x275bdd(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vandps 0x520(%rsp), %ymm28, %ymm6
vmovaps 0x720(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x27aee8(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c78c74
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm30
vmovaps %ymm19, %ymm12
jmp 0x1c7713a
vbroadcastss 0x2a9e9c(%rip), %ymm28 # 0x1f20ec4
vbroadcastss 0x2756e3(%rip), %ymm3 # 0x1eec714
jmp 0x1c77039
vmovaps %ymm30, %ymm3
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x3a0(%rsp), %ymm8
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x460(%rsp), %ymm2
vmovaps 0x340(%rsp), %ymm4
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x220(%rsp), %ymm27
jmp 0x1c78b00
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x274938(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x275a93(%rip), %ymm2 # 0x1eecb84
vmovaps 0x640(%rsp), %ymm12
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x620(%rsp), %ymm25
vmulps %ymm5, %ymm25, %ymm5
vfmadd231ps %ymm3, %ymm21, %ymm5 # ymm5 = (ymm21 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm12, %ymm5 # ymm5 = (ymm12 * ymm10) + ymm5
vmovaps 0x500(%rsp), %ymm6
vmovaps %ymm6, 0x800(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x820(%rsp)
vandps %ymm28, %ymm5, %ymm4
vmaxps %ymm2, %ymm6, %ymm5
vmovaps %ymm5, 0x740(%rsp)
vmovaps %ymm8, 0x760(%rsp)
vbroadcastss 0x2a9d45(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0x114(%rsp)
vcmpleps %ymm3, %ymm6, %k1
kmovd %k1, %esi
andb %dil, %sil
vmovaps %ymm5, 0x680(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %ecx
andb %dil, %cl
movl %ecx, 0x34(%rsp)
orb %sil, %cl
vmovaps %ymm25, %ymm4
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
je 0x1c77d70
movq %r15, 0x300(%rsp)
movb %al, 0xf(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm4, %ymm9, %ymm2
vfmadd213ps %ymm2, %ymm21, %ymm1 # ymm1 = (ymm21 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm1
vandps %ymm28, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0x10c(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2a9ca5(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a9c97(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vpbroadcastd %r12d, %ymm1
vmovdqa %ymm0, 0x780(%rsp)
vmovdqa %ymm1, 0x700(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %esi, 0x110(%rsp)
andb %sil, %r12b
je 0x1c77da2
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4c0(%rsp), %ymm3
vmovaps 0x480(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x2a9c0b(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x27abd6(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3f0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vaddps 0x660(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x274705(%rip), %ymm0 # 0x1eeba20
vblendmps 0x500(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x6e0(%rsp,%rcx), %xmm10
vmovss 0x800(%rsp,%rcx), %xmm11
vmovaps 0x380(%rsp), %xmm0
vucomiss 0x27469d(%rip), %xmm0 # 0x1eeba24
vmovss 0x118(%rsp), %xmm0
jae 0x1c773d3
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x27aac9(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x390(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x275302(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x4a0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x450(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4e0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4c0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x480(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x279b62(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x520(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x360(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x274576(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3c0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c774d7
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c774e9
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x275213(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x27520f(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a99a3(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x430(%rsp)
vfnmadd213ss 0x279ac9(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x440(%rsp)
jb 0x1c77552
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c7759c
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm4, 0xd0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xd0(%rsp), %xmm4
vmovaps 0xe0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x360(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm12
vmulps %xmm12, %xmm14, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x27513c(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xe0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3c0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xd0(%rsp)
vmulss 0x275107(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x2750fd(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x2743f7(%rip), %xmm0 # 0x1eeba24
jb 0x1c77635
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c77684
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm12, 0x410(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x410(%rsp), %xmm12
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x360(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x2a9836(%rip), %ymm30 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm18
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x2a0(%rsp), %ymm24
vmovaps 0x1a0(%rsp), %ymm22
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x160(%rsp), %ymm23
vmovaps 0x140(%rsp), %ymm13
vmovaps 0x240(%rsp), %ymm27
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x220(%rsp), %ymm26
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x2792c1(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x450(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x2798bf(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x480(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x440(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x430(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2a96e4(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm12, %xmm2
vmovaps 0x420(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x390(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xd0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xe0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x520(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x2a960a(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c7791c
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3f0(%rsp), %xmm3
vfmadd231ss 0x27a5d2(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7791c
vaddss 0x400(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
jb 0x1c77917
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x80(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c77932
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c77d3b
decq %r15
jne 0x1c773f7
jmp 0x1c77d38
xorl %eax, %eax
vucomiss 0x2740e8(%rip), %xmm10 # 0x1eeba24
jb 0x1c77919
vmovss 0x274dce(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c77919
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3c0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x274dac(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x274da8(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x308(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x90(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c77917
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c779c6
cmpq $0x0, 0x48(%r14)
jne 0x1c779c6
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c7791c
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[2,2,2,2]
vbroadcastss %xmm0, %xmm0
vmovaps %xmm2, 0x580(%rsp)
vmovaps %xmm3, 0x590(%rsp)
vmovaps %xmm0, 0x5a0(%rsp)
vmovaps %xmm1, 0x5b0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x5c0(%rsp)
vmovaps 0x570(%rsp), %xmm0
vmovaps %xmm0, 0x5d0(%rsp)
vmovdqa 0x560(%rsp), %xmm0
vmovdqa %xmm0, 0x5e0(%rsp)
leaq 0x5f0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x5f0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x600(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x80(%rcx,%rax,4)
vmovaps 0x550(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0x310(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x318(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x320(%rsp)
movq %rcx, 0x328(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0x330(%rsp)
movl $0x4, 0x338(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c77bef
leaq 0x310(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x220(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm27
vmovaps 0x140(%rsp), %ymm13
vmovaps 0x160(%rsp), %ymm23
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x1a0(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm24
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm18
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm21
vbroadcastss 0x2a92df(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2a92d5(%rip), %ymm30 # 0x1f20ec4
vmovdqa 0xf0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c77d14
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c77cd9
testb $0x2, (%rcx)
jne 0x1c77c2e
testb $0x40, 0x3e(%r14)
je 0x1c77cd9
leaq 0x310(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x220(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm27
vmovaps 0x140(%rsp), %ymm13
vmovaps 0x160(%rsp), %ymm23
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x1a0(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm24
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm18
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1e0(%rsp), %ymm21
vbroadcastss 0x2a91f5(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2a91eb(%rip), %ymm30 # 0x1f20ec4
vmovdqa 0xf0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
movq 0x328(%rsp), %rax
vmovaps 0x80(%rax), %xmm0
vbroadcastss 0x274e82(%rip), %xmm0 {%k1} # 0x1eecb84
vmovaps %xmm0, 0x80(%rax)
kortestb %k1, %k1
setne %r14b
jmp 0x1c77d17
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c779bf
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x80(%rcx,%rax,4)
jmp 0x1c779bf
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x6a0(%rsp), %ymm0
vcmpleps 0x80(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c7730d
jmp 0x1c77dfc
vbroadcastss 0x27499b(%rip), %ymm3 # 0x1eec714
vmovaps 0x3a0(%rsp), %ymm8
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x460(%rsp), %ymm2
vmovaps 0x340(%rsp), %ymm4
jmp 0x1c78b00
vmovaps %ymm20, %ymm8
vmovaps %ymm19, %ymm20
vmovaps %ymm30, %ymm19
vmovaps %ymm23, %ymm13
vmovaps %ymm22, %ymm23
vmovaps %ymm17, %ymm22
vmovaps %ymm18, %ymm17
vmovaps %ymm26, %ymm18
vmovaps %ymm27, %ymm26
vmovaps %ymm24, %ymm27
vmovaps %ymm16, %ymm24
vmovaps %ymm28, %ymm30
vbroadcastss 0x2a90d1(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x660(%rsp), %ymm3
vaddps 0x680(%rsp), %ymm3, %ymm0
vcmpleps 0x80(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd 0x114(%rsp), %k1
kmovd 0x10c(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x34(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2a909a(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a908c(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x680(%rsp)
vpcmpled 0x700(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x34(%rsp)
andb %cl, %r12b
je 0x1c7898c
vmovaps 0x740(%rsp), %ymm7
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4c0(%rsp), %ymm5
vmovaps 0x480(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x279fdb(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3f0(%rsp)
vmovaps %ymm7, 0x500(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x273b0f(%rip), %ymm0 # 0x1eeba20
vblendmps 0x500(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x6c0(%rsp,%rcx), %xmm10
vmovss 0x760(%rsp,%rcx), %xmm11
vmovaps 0x380(%rsp), %xmm0
vucomiss 0x273aa7(%rip), %xmm0 # 0x1eeba24
vmovss 0x11c(%rsp), %xmm0
jae 0x1c77fc9
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x279ed3(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x390(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x27470c(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x4a0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x450(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4e0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4c0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x480(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x278f6c(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x520(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x360(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x273980(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3c0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c780cd
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c780df
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x27461d(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x274619(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a8dad(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x430(%rsp)
vfnmadd213ss 0x278ed3(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x440(%rsp)
jb 0x1c78148
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c78192
vmovaps %xmm3, 0xe0(%rsp)
vmovss %xmm4, 0xd0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xd0(%rsp), %xmm4
vmovaps 0xe0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x360(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm13
vmulps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x274546(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xe0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3c0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xd0(%rsp)
vmulss 0x274511(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x274507(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x273801(%rip), %xmm0 # 0x1eeba24
jb 0x1c7822b
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7827a
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm13, 0x410(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x410(%rsp), %xmm13
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x360(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x2a8c40(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x3a0(%rsp), %ymm8
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x220(%rsp), %ymm27
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x2786c4(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x450(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x278cc2(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x480(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x440(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x430(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2a8ae7(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm13, %xmm2
vmovaps 0x420(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x390(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xd0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xe0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x520(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x2a8a0d(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c78524
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3f0(%rsp), %xmm3
vfmadd231ss 0x2799d5(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c78524
vaddss 0x400(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
vmovaps 0x340(%rsp), %ymm4
jb 0x1c7851d
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x80(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c78543
xorl %eax, %eax
xorl %r14d, %r14d
jmp 0x1c7852d
vmovaps 0x340(%rsp), %ymm4
testb %al, %al
je 0x1c7895a
decq %r15
jne 0x1c77fed
jmp 0x1c78957
xorl %eax, %eax
vucomiss 0x2734d7(%rip), %xmm10 # 0x1eeba24
jb 0x1c7851f
vmovss 0x2741bd(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c7851f
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3c0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x27419b(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x274197(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x308(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x90(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c7851d
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c785d7
cmpq $0x0, 0x48(%r14)
jne 0x1c785d7
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c7852d
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[2,2,2,2]
vbroadcastss %xmm0, %xmm0
vmovaps %xmm2, 0x580(%rsp)
vmovaps %xmm3, 0x590(%rsp)
vmovaps %xmm0, 0x5a0(%rsp)
vmovaps %xmm1, 0x5b0(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %xmm0, 0x5c0(%rsp)
vmovaps 0x570(%rsp), %xmm0
vmovaps %xmm0, 0x5d0(%rsp)
vmovdqa 0x560(%rsp), %xmm0
vmovdqa %xmm0, 0x5e0(%rsp)
leaq 0x5f0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x5f0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x600(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x80(%rcx,%rax,4)
vmovaps 0x550(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
leaq 0xf0(%rsp), %rax
movq %rax, 0x310(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x318(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x320(%rsp)
movq %rcx, 0x328(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0x330(%rsp)
movl $0x4, 0x338(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c78807
leaq 0x310(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x340(%rsp), %ymm4
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x3a0(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm21
vbroadcastss 0x2a86bd(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0xf0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c78933
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c788f8
testb $0x2, (%rcx)
jne 0x1c78846
testb $0x40, 0x3e(%r14)
je 0x1c788f8
leaq 0x310(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x160(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x180(%rsp), %ymm18
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x340(%rsp), %ymm4
vmovaps 0x2c0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm26
vmovaps 0x200(%rsp), %ymm9
vmovaps 0x3a0(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm21
vbroadcastss 0x2a85cc(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0xf0(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
movq 0x328(%rsp), %rax
vmovaps 0x80(%rax), %xmm0
vbroadcastss 0x274263(%rip), %xmm0 {%k1} # 0x1eecb84
vmovaps %xmm0, 0x80(%rax)
kortestb %k1, %k1
setne %r14b
jmp 0x1c78936
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c785d0
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x80(%rcx,%rax,4)
jmp 0x1c785d0
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x6a0(%rsp), %ymm0
vcmpleps 0x80(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c77f03
jmp 0x1c789e6
vmovaps 0x340(%rsp), %ymm4
vmovaps %ymm30, %ymm28
vmovaps %ymm24, %ymm16
vmovaps %ymm27, %ymm24
vmovaps %ymm26, %ymm27
vmovaps %ymm18, %ymm26
vmovaps %ymm17, %ymm18
vmovaps %ymm22, %ymm17
vmovaps %ymm23, %ymm22
vmovaps %ymm13, %ymm23
vmovaps %ymm19, %ymm30
vmovaps %ymm20, %ymm19
vmovaps %ymm8, %ymm20
vmovaps 0x3a0(%rsp), %ymm8
vmovdqa 0x700(%rsp), %ymm1
vpcmpltd 0x680(%rsp), %ymm1, %k1
vmovaps 0x800(%rsp), %ymm0
vpcmpltd 0x780(%rsp), %ymm1, %k2
vmovaps 0x660(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
vbroadcastss 0x80(%r11,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x110(%rsp), %ecx
andb %al, %cl
vmovaps 0x740(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x34(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c78ad0
movl %ebx, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0x840(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x860(%rsp,%rax)
vmovaps 0x3e0(%rsp), %xmm0
vmovlps %xmm0, 0x880(%rsp,%rax)
movq 0x50(%rsp), %r12
leal 0x1(%r12), %ecx
movl %ecx, 0x888(%rsp,%rax)
incl %ebx
movq 0x2f8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x273c5b(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x300(%rsp), %r15
vmovaps 0x460(%rsp), %ymm2
jmp 0x1c78b00
movq 0x2f8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x273c2e(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x300(%rsp), %r15
vmovaps 0x460(%rsp), %ymm2
movq 0x50(%rsp), %r12
testl %ebx, %ebx
je 0x1c78cb6
leal -0x1(%rbx), %r9d
leaq (%r9,%r9,2), %rcx
shlq $0x5, %rcx
vmovaps 0x860(%rsp,%rcx), %ymm0
movzbl 0x840(%rsp,%rcx), %esi
vaddps 0x660(%rsp), %ymm0, %ymm1
vcmpleps 0x80(%r11,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %r10d
andl %esi, %r10d
je 0x1c78c06
kmovd %r10d, %k1
vbroadcastss 0x272ece(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %r10b, %sil
je 0x1c78b8a
movzbl %sil, %edi
jmp 0x1c78b8e
movzbl %r10b, %edi
leaq (%rsp,%rcx), %rsi
addq $0x840, %rsi # imm = 0x840
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r12d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %r10d, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
vmovaps 0x460(%rsp), %ymm2
je 0x1c78bd3
movl %ebx, %r9d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2a8335(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x580(%rsp)
vmovsd 0x580(%rsp,%rcx,4), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
movl %r9d, %ebx
testb %r10b, %r10b
je 0x1c78b00
movq %r12, 0x50(%rsp)
vmovaps 0x3e0(%rsp), %xmm5
jmp 0x1c766dc
vcmpleps 0x2a82d0(%rip), %ymm1, %k2 # 0x1f20f00
vbroadcastss 0x273f4b(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x272ddd(%rip), %ymm23 # 0x1eeba20
vblendmps %ymm1, %ymm23, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm23, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %dil, %sil
movl %esi, %edi
jmp 0x1c76be0
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x273f00(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x272d93(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c76fe3
testb $0x1, %r13b
jne 0x1c78ce3
blsrl %r15d, %eax
vmovaps 0x7a0(%rsp), %ymm0
vcmpleps 0x80(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %r15d
andl %eax, %r15d
setne %al
jne 0x1c763a1
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 8>::intersect_h<embree::avx512::SweepCurve1IntersectorK<embree::HermiteCurveT, 8>, embree::avx512::Intersect1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_h(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xa40, %rsp # imm = 0xA40
movq %rcx, %r15
movq %rsi, %r12
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r12,%rdx,4), %xmm1
vmovss 0x80(%r12,%rdx,4), %xmm2
vinsertps $0x10, 0x20(%r12,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r12,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r12,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%r12,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %r9
vpmovsxbd 0x6(%r8,%r9,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%r9,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x299904(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2a80d2(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2a8047(%rip), %ymm6 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm6, %ymm2, %ymm5
vbroadcastss 0x278159(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm6, %ymm1, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm6, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x273847(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %rdi
subq %rcx, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %r9
subq %rcx, %r9
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%r9), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%r12,%rdx,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2a6f53(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x100(%r12,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2a6f2e(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2e1930(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x660(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c7b6ef
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r10d
leaq 0x4e0(%rsp), %rax
addq $0xe0, %rax
movq %rax, 0x198(%rsp)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x440(%rsp)
movq %r15, 0x18(%rsp)
movq %rdx, 0x8(%rsp)
movq %r8, 0x1a0(%rsp)
tzcntq %r10, %rax
movl 0x2(%r8), %r11d
movl 0x6(%r8,%rax,4), %ebx
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r11,8), %rcx
movq %rbx, %rax
imulq 0x68(%rcx), %rax
movq 0x58(%rcx), %rdi
movq 0x90(%rcx), %rsi
movl (%rdi,%rax), %eax
movq 0xa0(%rcx), %r9
movq %r9, %rdi
imulq %rax, %rdi
vmovaps (%rsi,%rdi), %xmm1
leaq 0x1(%rax), %rdi
imulq %rdi, %r9
vmovaps (%rsi,%r9), %xmm2
movq 0x100(%rcx), %rsi
movq 0x110(%rcx), %rcx
imulq %rcx, %rax
vmovss (%r12,%rdx,4), %xmm0
vinsertps $0x1c, 0x20(%r12,%rdx,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r12,%rdx,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
imulq %rdi, %rcx
vbroadcastss 0x80(%r12,%rdx,4), %ymm26
vbroadcastss 0xa0(%r12,%rdx,4), %ymm30
vunpcklps %xmm30, %xmm26, %xmm3 # xmm3 = xmm26[0],xmm30[0],xmm26[1],xmm30[1]
vbroadcastss 0xc0(%r12,%rdx,4), %ymm21
vinsertps $0x28, %xmm21, %xmm3, %xmm9 # xmm9 = xmm3[0,1],xmm21[0],zero
vmovaps (%rsi,%rax), %xmm3
vbroadcastss 0x2e435f(%rip), %xmm5 # 0x1f5d46c
vfnmadd132ps %xmm5, %xmm1, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm1
vmovaps (%rsi,%rcx), %xmm4
vfmadd132ps %xmm5, %xmm2, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm2
vaddps %xmm3, %xmm1, %xmm5
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm5, %xmm2, %xmm5
vmulps 0x2a41ca(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm0, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x60(%r12,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x277e93(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x230(%rsp)
vmovaps %ymm6, 0x380(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm6) + xmm0
vblendps $0x8, %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm8[3]
vsubps %xmm0, %xmm1, %xmm6
vsubps %xmm0, %xmm4, %xmm4
vsubps %xmm0, %xmm3, %xmm7
vmovaps 0x2a7d7d(%rip), %ymm3 # 0x1f20f20
vsubps %xmm0, %xmm2, %xmm8
vbroadcastss %xmm6, %ymm0
vmovaps %ymm0, 0x8a0(%rsp)
vbroadcastss 0x299546(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm1
vmovaps %ymm1, 0x880(%rsp)
vbroadcastss 0x2a7d07(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm2
vmovaps %ymm2, 0x860(%rsp)
vbroadcastss 0x2a7cec(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x340(%rsp)
vpermps %ymm6, %ymm2, %ymm6
vmovaps %ymm6, 0x840(%rsp)
vbroadcastss %xmm7, %ymm6
vmovaps %ymm6, 0x820(%rsp)
vpermps %ymm7, %ymm0, %ymm6
vmovaps %ymm6, 0x800(%rsp)
vpermps %ymm7, %ymm1, %ymm6
vmovaps %ymm6, 0x7e0(%rsp)
vmovaps %ymm7, 0x300(%rsp)
vpermps %ymm7, %ymm2, %ymm6
vmovaps %ymm6, 0x7c0(%rsp)
vbroadcastss %xmm4, %ymm6
vmovaps %ymm6, 0x7a0(%rsp)
vpermps %ymm4, %ymm0, %ymm6
vmovaps %ymm6, 0x780(%rsp)
vpermps %ymm4, %ymm1, %ymm6
vmovaps %ymm6, 0x760(%rsp)
vmovaps %ymm4, 0x320(%rsp)
vpermps %ymm4, %ymm2, %ymm4
vmovaps %ymm4, 0x740(%rsp)
vbroadcastss %xmm8, %ymm4
vmovaps %ymm4, 0x720(%rsp)
vpermps %ymm8, %ymm0, %ymm0
vmovaps %ymm0, 0x700(%rsp)
vpermps %ymm8, %ymm1, %ymm0
vmovaps %ymm0, 0x6e0(%rsp)
vmovaps %ymm8, 0x2e0(%rsp)
vpermps %ymm8, %ymm2, %ymm0
vmovaps %ymm0, 0x6c0(%rsp)
vmulss %xmm21, %xmm21, %xmm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vfmadd231ps %ymm26, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm26) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
vandps 0x2a7bd4(%rip){1to8}, %ymm0, %ymm0 # 0x1f20ec4
vmovaps %ymm0, 0x4c0(%rsp)
vmovss %xmm10, 0x3c(%rsp)
vmovaps %xmm5, 0x270(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x680(%rsp)
movq %r11, 0x78(%rsp)
vpbroadcastd %r11d, %ymm0
vmovdqa %ymm0, 0x480(%rsp)
xorl %r11d, %r11d
movl $0x1, %r14d
movq %rbx, 0x1b8(%rsp)
vpbroadcastd %ebx, %ymm0
vmovdqa %ymm0, 0x460(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xfc(%rsp)
vmovaps %xmm11, 0x220(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xf8(%rsp)
vmovsd 0x273375(%rip), %xmm2 # 0x1eec6f0
vmovaps %ymm26, 0x120(%rsp)
vmovaps %ymm30, 0x100(%rsp)
vmovaps %ymm21, 0x1c0(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vsubss %xmm2, %xmm0, %xmm1
vmulss 0x2a7b2d(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm2, 0x180(%rsp)
vbroadcastss %xmm2, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x40(%rsp)
vmovaps %ymm0, 0x200(%rsp)
vfmadd231ps %ymm3, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm3) + ymm5
vbroadcastss 0x273341(%rip), %ymm0 # 0x1eec714
vsubps %ymm5, %ymm0, %ymm7
vmovaps 0x820(%rsp), %ymm12
vmulps %ymm5, %ymm12, %ymm1
vmovaps 0x800(%rsp), %ymm13
vmulps %ymm5, %ymm13, %ymm2
vmovaps 0x7e0(%rsp), %ymm14
vmulps %ymm5, %ymm14, %ymm3
vmovaps 0x7c0(%rsp), %ymm15
vmulps %ymm5, %ymm15, %ymm4
vfmadd231ps 0x8a0(%rsp), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vfmadd231ps 0x880(%rsp), %ymm7, %ymm2 # ymm2 = (ymm7 * mem) + ymm2
vfmadd231ps 0x860(%rsp), %ymm7, %ymm3 # ymm3 = (ymm7 * mem) + ymm3
vfmadd231ps 0x840(%rsp), %ymm7, %ymm4 # ymm4 = (ymm7 * mem) + ymm4
vmovaps 0x7a0(%rsp), %ymm16
vmulps %ymm5, %ymm16, %ymm8
vmovaps 0x780(%rsp), %ymm17
vmulps %ymm5, %ymm17, %ymm9
vmovaps 0x760(%rsp), %ymm18
vmulps %ymm5, %ymm18, %ymm10
vmovaps 0x740(%rsp), %ymm19
vmulps %ymm5, %ymm19, %ymm11
vfmadd231ps %ymm12, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm12) + ymm8
vfmadd231ps %ymm13, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm13) + ymm9
vfmadd231ps %ymm14, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm14) + ymm10
vfmadd231ps %ymm15, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm15) + ymm11
vmulps 0x720(%rsp), %ymm5, %ymm12
vmulps 0x700(%rsp), %ymm5, %ymm13
vmulps 0x6e0(%rsp), %ymm5, %ymm14
vmulps 0x6c0(%rsp), %ymm5, %ymm15
vfmadd231ps %ymm16, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm16) + ymm12
vfmadd231ps %ymm17, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm17) + ymm13
vfmadd231ps %ymm18, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm19) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x277a92(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x2e6793(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x277264(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x27725e(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0x1e0(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x273018(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x272ff9(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0x80(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmulps %ymm29, %ymm21, %ymm22
vfmadd231ps %ymm28, %ymm30, %ymm22 # ymm22 = (ymm30 * ymm28) + ymm22
vfmadd231ps %ymm27, %ymm26, %ymm22 # ymm22 = (ymm26 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm21, %ymm17
vfmadd231ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm30) + ymm17
vfmadd231ps %ymm26, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm26) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm21
vmovaps 0x6a0(%rsp), %ymm15
vsubps %ymm21, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm16
vmovaps %ymm16, 0xa0(%rsp)
vsubps %ymm1, %ymm16, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2733aa(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vxorps %xmm16, %xmm16, %xmm16
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c798ee
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x2a768f(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x420(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x400(%rsp)
vbroadcastss 0x27219a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x2732ef(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x2a7620(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm21, %ymm24
vandps %ymm0, %ymm21, %ymm23
vmovaps 0x4c0(%rsp), %ymm21
vmaxps %ymm23, %ymm21, %ymm23
vmulps 0x2785ec(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c7b680
vbroadcastss 0x272e2e(%rip), %ymm0 # 0x1eec714
vmovaps %ymm24, %ymm21
jmp 0x1c79902
vbroadcastss 0x272128(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x273282(%rip), %ymm31 # 0x1eecb84
andb $0x7f, %al
je 0x1c79cfd
vmovaps %ymm21, 0x3a0(%rsp)
vmovaps %ymm0, %ymm16
vmovss 0x100(%r12,%rdx,4), %xmm0
vsubss 0x270(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x680(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x1c0(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x100(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x120(%rsp), %ymm31
vfmadd231ps %ymm11, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm11) + ymm13
vbroadcastss 0x2a7539(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x27764d(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2a7514(%rip), %ymm30 # 0x1f20ec0
vxorps %ymm30, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm30, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x273199(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x272015(%rip), %ymm23 # 0x1eeba20
vmovaps %ymm23, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm13, %xmm13, %xmm13
vsubps %ymm8, %ymm13, %ymm8
vsubps %ymm9, %ymm13, %ymm9
vsubps %ymm10, %ymm13, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm30, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm30, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm23, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x360(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c79d22
vmovaps 0x420(%rsp), %ymm0
vmaxps 0x1e0(%rsp), %ymm13, %ymm1
vminps %ymm16, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x400(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2a743f(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x2a49aa(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x40(%rsp), %ymm2
vmovaps 0x200(%rsp), %ymm19
vfmadd213ps %ymm2, %ymm19, %ymm0 # ymm0 = (ymm19 * ymm0) + ymm2
vmovaps %ymm0, 0x420(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm19, %ymm0 # ymm0 = (ymm19 * ymm0) + ymm2
vmovaps %ymm0, 0x400(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0xa0(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
je 0x1c79d3f
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm13, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x2a7316(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x120(%rsp), %ymm26
vmulps %ymm12, %ymm26, %ymm0
vmovaps 0x100(%rsp), %ymm30
vmulps %ymm12, %ymm30, %ymm1
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0x80(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm26, %ymm10
vmulps %ymm13, %ymm30, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x271db5(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x272f0a(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vbroadcastss 0x2a723b(%rip), %ymm7 # 0x1f20ec4
vandps 0x3a0(%rsp), %ymm7, %ymm6
vmovaps 0x4c0(%rsp), %ymm12
vmaxps %ymm6, %ymm12, %ymm6
vmulps 0x27820b(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm7, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c79d76
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x272eb6(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x271d49(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c79d76
vmovaps 0x2a721b(%rip), %ymm3 # 0x1f20f20
vmovaps 0x120(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
jmp 0x1c7b573
vmovaps 0x2a71f6(%rip), %ymm3 # 0x1f20f20
vmovaps 0x120(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm30
jmp 0x1c7b573
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x271cc3(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x272e1e(%rip), %ymm2 # 0x1eecb84
vmovaps 0x120(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm30
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm3, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm26, %ymm5 # ymm5 = (ymm26 * ymm10) + ymm5
vmovaps 0x360(%rsp), %ymm7
vmovaps %ymm7, 0x8c0(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x8e0(%rsp)
vbroadcastss 0x2a7114(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm5, %ymm4
vmaxps %ymm2, %ymm7, %ymm5
vmovaps %ymm5, 0x600(%rsp)
vmovaps %ymm8, 0x620(%rsp)
vbroadcastss 0x2a7101(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0xf4(%rsp)
vcmpleps %ymm3, %ymm7, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x3c0(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %esi
andb %al, %sil
movl %esi, %eax
orb %cl, %al
je 0x1c7b549
movl %esi, 0x20(%rsp)
movq %r11, 0x1a8(%rsp)
movq %r10, 0x1b0(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm9, %ymm21, %ymm2
vfmadd213ps %ymm2, %ymm30, %ymm1 # ymm1 = (ymm30 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm0) + ymm1
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0xec(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2a707a(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a706c(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %r14, 0x2d8(%rsp)
vpbroadcastd %r14d, %ymm1
vmovdqa %ymm0, 0x640(%rsp)
vmovdqa %ymm1, 0x4a0(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0xf0(%rsp)
andb %cl, %bl
vbroadcastss 0x2a701b(%rip), %xmm4 # 0x1f20ec4
je 0x1c7a956
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm5
vminps %xmm5, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm5, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x277fa4(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x260(%rsp)
vmovaps 0x360(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x271ad4(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x420(%rsp,%rcx), %xmm8
vmovss 0x8c0(%rsp,%rcx), %xmm9
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x271a70(%rip), %xmm0 # 0x1eeba24
vmovss 0xf8(%rsp), %xmm0
jae 0x1c7a000
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
kmovw %k1, 0x40(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x40(%rsp), %k1
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x277e9d(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2726d6(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2c0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x276f36(%rip){1to4}, %xmm1, %xmm10 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x27194a(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
jb 0x1c7a103
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c7a115
vzeroupper
callq 0x6aa20
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm10, %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2725e7(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2725e3(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a6d77(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2a0(%rsp)
vfnmadd213ss 0x276e9d(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2b0(%rsp)
jb 0x1c7a17c
vsqrtss %xmm0, %xmm0, %xmm15
jmp 0x1c7a1c4
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0x4(%rsp)
vmovss %xmm5, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm5
vmovss 0x4(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm15
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm18
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm10, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm14
vaddss 0x27250f(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm17
vmulss 0x2724f0(%rip), %xmm17, %xmm16 # 0x1eec718
vmulss 0x2724ea(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x2717ea(%rip), %xmm0 # 0x1eeba24
jb 0x1c7a245
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7a2d6
vmovss %xmm13, 0x4(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm15, 0x2c(%rsp)
vmovss %xmm16, 0x28(%rsp)
vmovaps %xmm17, 0x290(%rsp)
vmovss %xmm19, 0x24(%rsp)
vmovaps %xmm6, 0x280(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x280(%rsp), %xmm6
vmovss 0x24(%rsp), %xmm19
vmovaps 0x290(%rsp), %xmm17
vmovss 0x28(%rsp), %xmm16
vmovss 0x2c(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x14(%rsp), %xmm7
vmovss 0x4(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm18
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x120(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm9
vmovss 0x2766d4(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2c0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x276cd2(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2b0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x2a0(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2a6b01(%rip){1to4}, %xmm10, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xd0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm15, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm17, %xmm19, %xmm2
vmulss %xmm17, %xmm17, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm16, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm5 # xmm5 = -(xmm14 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm14, %xmm6 # xmm6 = -(xmm14 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm14, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm8, %xmm8
vsubss %xmm4, %xmm9, %xmm9
vbroadcastss 0x2a6a34(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
jbe 0x1c7a64e
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x260(%rsp), %xmm3
vfmadd231ss 0x277a00(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7a64e
vaddss 0x270(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm9
jb 0x1c7a651
movq 0x8(%rsp), %rax
vmovss 0x100(%r12,%rax,4), %xmm5
vucomiss %xmm9, %xmm5
jb 0x1c7a651
vucomiss 0x271528(%rip), %xmm8 # 0x1eeba24
jb 0x1c7a651
vmovss 0x27220a(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c7a651
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2721eb(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2721e5(%rip), %xmm18, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x78(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x8(%rsp), %rax
movl 0x120(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c7a66e
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm10, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm10
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm10, %xmm10, %xmm3 # xmm3 = xmm10[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c7a673
cmpq $0x0, 0x40(%r15)
jne 0x1c7a673
movq 0x8(%rsp), %rcx
vmovss %xmm9, 0x100(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x180(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x1a0(%r12,%rcx,4)
vmovss %xmm0, 0x1c0(%r12,%rcx,4)
vmovss %xmm8, 0x1e0(%r12,%rcx,4)
movl $0x0, 0x200(%r12,%rcx,4)
movq 0x1b8(%rsp), %rax
movl %eax, 0x220(%r12,%rcx,4)
movq 0x78(%rsp), %rax
movl %eax, 0x240(%r12,%rcx,4)
movq 0x18(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%rcx,4)
jmp 0x1c7a651
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c7a933
testb %al, %al
je 0x1c7a023
jmp 0x1c7a933
movq %rcx, %r15
jmp 0x1c7a651
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm8, %ymm1
vbroadcastss 0x29807a(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2a6844(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x4e0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovdqa 0x480(%rsp), %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
movq 0x198(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movq 0x8(%rsp), %rax
vmovss %xmm9, 0x100(%r12,%rax,4)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x240(%rsp)
leaq 0x240(%rsp), %rax
movq %rax, 0x150(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x158(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x160(%rsp)
movq %r12, 0x168(%rsp)
leaq 0x4e0(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x8, 0x178(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
vmovss %xmm5, 0x40(%rsp)
je 0x1c7a7fc
leaq 0x150(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x120(%rsp), %ymm26
vbroadcastss 0x2a66c8(%rip), %xmm4 # 0x1f20ec4
vmovdqa 0x240(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7a91a
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c7a874
testb $0x2, (%rcx)
jne 0x1c7a82e
testb $0x40, 0x3e(%r15)
je 0x1c7a874
leaq 0x150(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x120(%rsp), %ymm26
vbroadcastss 0x2a6650(%rip), %xmm4 # 0x1f20ec4
vmovdqa 0x240(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7a91a
vptestmd %ymm0, %ymm0, %k1
movq 0x168(%rsp), %rax
movq 0x170(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c7a929
movq 0x8(%rsp), %rax
vmovss %xmm5, 0x100(%r12,%rax,4)
movq 0x18(%rsp), %r15
jmp 0x1c7a651
movq 0x8(%rsp), %rdx
vmovaps 0x3e0(%rsp), %ymm0
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c79f3f
vmovaps 0x3c0(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xf4(%rsp), %k1
kmovd 0xec(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x20(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2a6540(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a6532(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x3c0(%rsp)
vpcmpled 0x4a0(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x20(%rsp)
andb %cl, %bl
je 0x1c7b466
vmovaps 0x600(%rsp), %ymm6
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm5
vminps %xmm5, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm5, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x277482(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x30(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x260(%rsp)
vmovaps %ymm6, 0x360(%rsp)
vaddps 0x380(%rsp), %ymm6, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x270fb2(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x400(%rsp,%rcx), %xmm8
vmovss 0x620(%rsp,%rcx), %xmm9
vmovaps 0x220(%rsp), %xmm0
vucomiss 0x270f4e(%rip), %xmm0 # 0x1eeba24
vmovss 0xfc(%rsp), %xmm0
jae 0x1c7ab22
vmovaps 0x220(%rsp), %xmm0
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
kmovw %k1, 0x40(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x40(%rsp), %k1
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x27737b(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x34(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x230(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x271bb4(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2c0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x276414(%rip){1to4}, %xmm1, %xmm10 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x270e28(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x40(%rsp)
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
jb 0x1c7ac25
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c7ac37
vzeroupper
callq 0x6aa20
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm10, %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x271ac5(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x271ac1(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a6255(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2a0(%rsp)
vfnmadd213ss 0x27637b(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x38(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x2b0(%rsp)
jb 0x1c7ac9e
vsqrtss %xmm0, %xmm0, %xmm15
jmp 0x1c7ace6
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0x4(%rsp)
vmovss %xmm5, 0xc0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xc0(%rsp), %xmm5
vmovss 0x4(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x40(%rsp), %xmm10
vmovaps %xmm0, %xmm15
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm18
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm10, %xmm0
vmovaps %xmm0, 0xd0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm14
vaddss 0x2719ed(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm17
vmulss 0x2719ce(%rip), %xmm17, %xmm16 # 0x1eec718
vmulss 0x2719c8(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x270cc8(%rip), %xmm0 # 0x1eeba24
jb 0x1c7ad67
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7adf8
vmovss %xmm13, 0x4(%rsp)
vmovaps %xmm14, 0xc0(%rsp)
vmovss %xmm15, 0x2c(%rsp)
vmovss %xmm16, 0x28(%rsp)
vmovaps %xmm17, 0x290(%rsp)
vmovss %xmm19, 0x24(%rsp)
vmovaps %xmm6, 0x280(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x280(%rsp), %xmm6
vmovss 0x24(%rsp), %xmm19
vmovaps 0x290(%rsp), %xmm17
vmovss 0x28(%rsp), %xmm16
vmovss 0x2c(%rsp), %xmm15
vmovaps 0xc0(%rsp), %xmm14
vmovss 0x14(%rsp), %xmm7
vmovss 0x4(%rsp), %xmm13
vmovaps 0x1e0(%rsp), %xmm18
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x40(%rsp), %xmm10
vmovaps 0x120(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x80(%rsp), %xmm9
vmovss 0x275bb2(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2c0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x2761b0(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2b0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x38(%rsp), %xmm2
vmulss 0x2a0(%rsp), %xmm2, %xmm2
vmulss 0x34(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2a5fdf(%rip){1to4}, %xmm10, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xd0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x30(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm15, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vmovaps 0x230(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm17, %xmm19, %xmm2
vmulss %xmm17, %xmm17, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm16, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm5 # xmm5 = -(xmm14 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm14, %xmm6 # xmm6 = -(xmm14 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm14, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm8, %xmm8
vsubss %xmm4, %xmm9, %xmm9
vbroadcastss 0x2a5f12(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
jbe 0x1c7b170
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x260(%rsp), %xmm3
vfmadd231ss 0x276ede(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7b170
vaddss 0x270(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x3c(%rsp), %xmm9
jb 0x1c7b173
movq 0x8(%rsp), %rax
vmovss 0x100(%r12,%rax,4), %xmm4
vucomiss %xmm9, %xmm4
jb 0x1c7b173
vucomiss 0x270a06(%rip), %xmm8 # 0x1eeba24
jb 0x1c7b173
vmovss 0x2716e8(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c7b173
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x2716c9(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x2716c3(%rip), %xmm18, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x78(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x8(%rsp), %rax
movl 0x120(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c7b190
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm10, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm10
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm10, %xmm10, %xmm3 # xmm3 = xmm10[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c7b195
cmpq $0x0, 0x40(%r15)
jne 0x1c7b195
movq 0x8(%rsp), %rcx
vmovss %xmm9, 0x100(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x180(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x1a0(%r12,%rcx,4)
vmovss %xmm0, 0x1c0(%r12,%rcx,4)
vmovss %xmm8, 0x1e0(%r12,%rcx,4)
movl $0x0, 0x200(%r12,%rcx,4)
movq 0x1b8(%rsp), %rax
movl %eax, 0x220(%r12,%rcx,4)
movq 0x78(%rsp), %rax
movl %eax, 0x240(%r12,%rcx,4)
movq 0x18(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x260(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%r12,%rcx,4)
jmp 0x1c7b173
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c7b443
testb %al, %al
je 0x1c7ab45
jmp 0x1c7b443
movq %rcx, %r15
jmp 0x1c7b173
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm8, %ymm1
vbroadcastss 0x297558(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2a5d22(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x4e0(%rsp)
vmovaps %ymm3, 0x500(%rsp)
vmovaps %ymm0, 0x520(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x560(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmovdqa 0x480(%rsp), %ymm0
vmovdqa %ymm0, 0x5a0(%rsp)
movq 0x198(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
movq 0x8(%rsp), %rax
vmovss %xmm9, 0x100(%r12,%rax,4)
vmovaps 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x240(%rsp)
leaq 0x240(%rsp), %rax
movq %rax, 0x150(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x158(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x160(%rsp)
movq %r12, 0x168(%rsp)
leaq 0x4e0(%rsp), %rax
movq %rax, 0x170(%rsp)
movl $0x8, 0x178(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm8, 0xa0(%rsp)
vmovaps %xmm9, 0x80(%rsp)
vmovss %xmm4, 0x40(%rsp)
je 0x1c7b315
leaq 0x150(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm4
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x120(%rsp), %ymm26
vmovdqa 0x240(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7b42a
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c7b384
testb $0x2, (%rcx)
jne 0x1c7b347
testb $0x40, 0x3e(%r15)
je 0x1c7b384
leaq 0x150(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x40(%rsp), %xmm4
vmovaps 0x80(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm8
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x120(%rsp), %ymm26
vmovdqa 0x240(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7b42a
vptestmd %ymm0, %ymm0, %k1
movq 0x168(%rsp), %rax
movq 0x170(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c7b439
movq 0x8(%rsp), %rax
vmovss %xmm4, 0x100(%r12,%rax,4)
movq 0x18(%rsp), %r15
jmp 0x1c7b173
movq 0x8(%rsp), %rdx
vmovaps 0x3e0(%rsp), %ymm0
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c7aa61
vmovdqa 0x4a0(%rsp), %ymm1
vpcmpltd 0x3c0(%rsp), %ymm1, %k1
vmovaps 0x8c0(%rsp), %ymm0
vpcmpltd 0x640(%rsp), %ymm1, %k2
vmovaps 0x380(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
vbroadcastss 0x100(%r12,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xf0(%rsp), %ecx
andb %al, %cl
vmovaps 0x600(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x20(%rsp), %esi
andb %al, %sil
orb %cl, %sil
movq 0x2d8(%rsp), %r14
je 0x1c7b553
movq 0x1a8(%rsp), %r11
movl %r11d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0x900(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x920(%rsp,%rax)
vmovaps 0x180(%rsp), %xmm2
vmovlps %xmm2, 0x940(%rsp,%rax)
leal 0x1(%r14), %ecx
movl %ecx, 0x948(%rsp,%rax)
incl %r11d
movq 0x1a0(%rsp), %r8
movq 0x1b0(%rsp), %r10
vmovaps 0x2a59d9(%rip), %ymm3 # 0x1f20f20
jmp 0x1c7b57c
vmovaps 0x2a59cf(%rip), %ymm3 # 0x1f20f20
jmp 0x1c7b573
movq 0x1a0(%rsp), %r8
movq 0x1b0(%rsp), %r10
vmovaps 0x2a59b5(%rip), %ymm3 # 0x1f20f20
movq 0x1a8(%rsp), %r11
vmovaps 0x180(%rsp), %xmm2
movl %r11d, %eax
testl %eax, %eax
je 0x1c7b6cb
leal -0x1(%rax), %r11d
leaq (%r11,%r11,2), %rcx
shlq $0x5, %rcx
vmovaps 0x920(%rsp,%rcx), %ymm0
movzbl 0x900(%rsp,%rcx), %esi
vaddps 0x380(%rsp), %ymm0, %ymm1
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %r9d
andl %esi, %r9d
je 0x1c7b66f
kmovd %r9d, %k1
vbroadcastss 0x27044f(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %r9b, %sil
je 0x1c7b609
movzbl %sil, %edi
jmp 0x1c7b60d
movzbl %r9b, %edi
leaq (%rsp,%rcx), %rsi
addq $0x900, %rsi # imm = 0x900
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r14d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %r9d, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c7b649
movl %eax, %r11d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps %ymm3, %ymm1, %ymm0 # ymm0 = (ymm0 * ymm3) + ymm1
vmovaps %ymm0, 0x4e0(%rsp)
vmovsd 0x4e0(%rsp,%rcx,4), %xmm2
movl %r11d, %eax
testb %r9b, %r9b
je 0x1c7b57f
jmp 0x1c79393
vcmpleps %ymm16, %ymm1, %k2
vbroadcastss 0x2714f4(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x270386(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm1, %ymm16, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm16, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %al, %sil
movl %esi, %eax
jmp 0x1c798dd
blsrl %r10d, %eax
vmovaps 0x660(%rsp), %ymm0
vcmpleps 0x100(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %r10d
andl %eax, %r10d
jne 0x1c7905b
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 8>::occluded_h<embree::avx512::SweepCurve1IntersectorK<embree::HermiteCurveT, 8>, embree::avx512::Occluded1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_h(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
if (Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0xa20, %rsp # imm = 0xA20
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %r9
leaq (%r9,%r9,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%rdx,4), %xmm1
vmovss 0x80(%r11,%rdx,4), %xmm2
vinsertps $0x10, 0x20(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, %r14
vinsertps $0x20, 0xc0(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%r9,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
addq %rax, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x296efa(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2a56c3(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x2a5631(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x275746(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x270e30(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %r9
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%r8,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%r9), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r11,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x2a4542(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r11,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2a451d(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2def19(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x840(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %al
je 0x1c7e3f3
kandb %k0, %k1, %k0
kmovd %k0, %ecx
movzbl %cl, %r15d
leaq 0x6c0(%rsp), %rcx
addq $0xe0, %rcx
movq %rcx, 0x2d0(%rsp)
movl $0x1, %ecx
shlxl %edx, %ecx, %ecx
kmovd %ecx, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x620(%rsp)
movq %rdx, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %r8, 0x2d8(%rsp)
movq %r14, 0x28(%rsp)
tzcntq %r15, %rcx
movl 0x2(%r8), %r12d
movl 0x6(%r8,%rcx,4), %ebx
movq (%r14), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%r12,8), %r10
movq %rbx, %rcx
imulq 0x68(%r10), %rcx
movq 0x58(%r10), %rsi
movq 0x90(%r10), %rdi
movl (%rsi,%rcx), %ecx
movq 0xa0(%r10), %rsi
movq %rsi, %r9
imulq %rcx, %r9
vmovaps (%rdi,%r9), %xmm0
leaq 0x1(%rcx), %r9
imulq %r9, %rsi
vmovaps (%rdi,%rsi), %xmm1
movq 0x100(%r10), %rsi
movq 0x110(%r10), %rdi
imulq %rdi, %rcx
vmovss (%r11,%rdx,4), %xmm2
vinsertps $0x1c, 0x20(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
imulq %r9, %rdi
vbroadcastss 0x80(%r11,%rdx,4), %ymm12
vbroadcastss 0xa0(%r11,%rdx,4), %ymm21
vunpcklps %xmm21, %xmm12, %xmm3 # xmm3 = xmm12[0],xmm21[0],xmm12[1],xmm21[1]
vbroadcastss 0xc0(%r11,%rdx,4), %ymm15
vinsertps $0x28, %xmm15, %xmm3, %xmm9 # xmm9 = xmm3[0,1],xmm15[0],zero
vmovaps (%rsi,%rcx), %xmm3
vbroadcastss 0x2e193c(%rip), %xmm5 # 0x1f5d46c
vfnmadd132ps %xmm5, %xmm0, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm0
vmovaps (%rsi,%rdi), %xmm4
vfmadd132ps %xmm5, %xmm1, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm1
vaddps %xmm3, %xmm0, %xmm5
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm5
vmulps 0x2a17a7(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm2, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0x60(%r11,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vpbroadcastd %ebx, %ymm6
vmovdqa %ymm6, 0x660(%rsp)
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x275461(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x370(%rsp)
vmovaps %ymm6, 0x580(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm6) + xmm2
vblendps $0x8, %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm8[3]
vsubps %xmm2, %xmm0, %xmm6
vsubps %xmm2, %xmm4, %xmm4
vsubps %xmm2, %xmm3, %xmm3
vsubps %xmm2, %xmm1, %xmm7
vbroadcastss %xmm6, %ymm8
vbroadcastss 0x296b25(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm9
vbroadcastss 0x2a52ef(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm30
vbroadcastss 0x2a52dc(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x4e0(%rsp)
vpermps %ymm6, %ymm2, %ymm26
vbroadcastss %xmm3, %ymm6
vpermps %ymm3, %ymm0, %ymm14
vpermps %ymm3, %ymm1, %ymm16
vmovaps %ymm3, 0x4a0(%rsp)
vpermps %ymm3, %ymm2, %ymm17
vbroadcastss %xmm4, %ymm18
vpermps %ymm4, %ymm0, %ymm19
vpermps %ymm4, %ymm1, %ymm20
vmovaps %ymm4, 0x4c0(%rsp)
vpermps %ymm4, %ymm2, %ymm22
vmovaps %ymm14, %ymm4
vbroadcastss %xmm7, %ymm23
vpermps %ymm7, %ymm0, %ymm24
vpermps %ymm7, %ymm1, %ymm25
vmovaps %ymm7, 0x480(%rsp)
vpermps %ymm7, %ymm2, %ymm27
vmovaps %ymm6, %ymm2
vmovaps %ymm15, 0x560(%rsp)
vmulss %xmm15, %xmm15, %xmm0
vfmadd231ps %ymm21, %ymm21, %ymm0 # ymm0 = (ymm21 * ymm21) + ymm0
vmovaps %ymm12, 0x540(%rsp)
vfmadd231ps %ymm12, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm12) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x880(%rsp)
vandps %ymm28, %ymm0, %ymm0
vmovaps %ymm0, 0x6a0(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x400(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x860(%rsp)
xorl %ebx, %ebx
xorl %r13d, %r13d
movl $0x1, %ecx
movq %rcx, 0x50(%rsp)
movq %r12, 0x2e8(%rsp)
vpbroadcastd %r12d, %ymm0
vmovdqa %ymm0, 0x640(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xfc(%rsp)
vmovaps %xmm11, 0x360(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xf8(%rsp)
vmovsd 0x2709c9(%rip), %xmm5 # 0x1eec6f0
vbroadcastss 0x2709e4(%rip), %ymm3 # 0x1eec714
vmovaps %ymm21, 0x1c0(%rsp)
vmovaps %ymm8, 0x380(%rsp)
vmovaps %ymm9, 0x1e0(%rsp)
vmovaps %ymm30, 0x1a0(%rsp)
vmovaps %ymm26, 0x180(%rsp)
vmovaps %ymm6, 0x460(%rsp)
vmovaps %ymm14, 0x320(%rsp)
vmovaps %ymm16, 0x2a0(%rsp)
vmovaps %ymm17, 0x160(%rsp)
vmovaps %ymm18, 0x140(%rsp)
vmovaps %ymm19, 0x280(%rsp)
vmovaps %ymm20, 0x260(%rsp)
vmovaps %ymm22, 0x120(%rsp)
vmovaps %ymm23, 0x240(%rsp)
vmovaps %ymm24, 0x220(%rsp)
vmovaps %ymm25, 0x100(%rsp)
vmovaps %ymm27, 0x200(%rsp)
vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3]
vsubss %xmm5, %xmm0, %xmm1
vmulss 0x2a5104(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm5, 0x3e0(%rsp)
vbroadcastss %xmm5, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x60(%rsp)
vmovaps %ymm0, 0x340(%rsp)
vfmadd231ps 0x2a5129(%rip), %ymm0, %ymm5 # ymm5 = (ymm0 * mem) + ymm5
vsubps %ymm5, %ymm3, %ymm7
vmulps %ymm5, %ymm2, %ymm1
vmovaps %ymm2, %ymm14
vmulps %ymm5, %ymm4, %ymm2
vmovaps %ymm3, %ymm0
vmulps %ymm5, %ymm16, %ymm3
vmovaps %ymm4, %ymm15
vmulps %ymm5, %ymm17, %ymm4
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm30, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm30) + ymm3
vfmadd231ps %ymm26, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm26) + ymm4
vmulps %ymm5, %ymm18, %ymm8
vmulps %ymm5, %ymm19, %ymm9
vmulps %ymm5, %ymm20, %ymm10
vmulps %ymm5, %ymm22, %ymm11
vfmadd231ps %ymm14, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm14) + ymm8
vfmadd231ps %ymm15, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm15) + ymm9
vfmadd231ps %ymm16, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm17) + ymm11
vmulps %ymm5, %ymm23, %ymm12
vmulps %ymm5, %ymm24, %ymm13
vmulps %ymm5, %ymm25, %ymm14
vmulps %ymm5, %ymm27, %ymm15
vfmadd231ps %ymm18, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm18) + ymm12
vfmadd231ps %ymm19, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm19) + ymm13
vfmadd231ps %ymm20, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm20) + ymm14
vfmadd231ps %ymm22, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm22) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x2750be(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x2e3dbf(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x274890(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x27488a(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0xa0(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x270644(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x270625(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0x3a0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmovaps 0x560(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x540(%rsp), %ymm23
vfmadd231ps %ymm27, %ymm23, %ymm22 # ymm22 = (ymm23 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm23, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm23) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm16
vmovaps 0x880(%rsp), %ymm15
vsubps %ymm16, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm23
vmovaps %ymm23, 0x80(%rsp)
vsubps %ymm1, %ymm23, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x2709c6(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %edi
kortestb %k1, %k1
je 0x1c7c2c8
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x2a4cb1(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x600(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x5e0(%rsp)
vbroadcastss 0x26f7bc(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x270911(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x2a4c42(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm16, %ymm24
vandps %ymm0, %ymm16, %ymm23
vmovaps 0x6a0(%rsp), %ymm16
vmaxps %ymm23, %ymm16, %ymm23
vmulps 0x275c0e(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
movq 0x50(%rsp), %r12
jne 0x1c7e335
vmovaps %ymm24, %ymm16
jmp 0x1c7c2e1
vbroadcastss 0x26f74e(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x2708a8(%rip), %ymm31 # 0x1eecb84
movq 0x50(%rsp), %r12
andb $0x7f, %dil
je 0x1c7c70b
vmovaps %ymm16, 0x520(%rsp)
vmovss 0x100(%r11,%rdx,4), %xmm0
vsubss 0x400(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x860(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x560(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1c0(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm11, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm11) + ymm13
vbroadcastss 0x2a4b5e(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x274c72(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2a4b39(%rip), %ymm31 # 0x1f20ec0
vxorps %ymm31, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm31, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vbroadcastss 0x27036b(%rip), %ymm30 # 0x1eec714
vfnmadd213ps %ymm30, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm30
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x2707b4(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x26f630(%rip), %ymm23 # 0x1eeba20
vmovaps %ymm23, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm13, %xmm13, %xmm13
vsubps %ymm8, %ymm13, %ymm8
vsubps %ymm9, %ymm13, %ymm9
vsubps %ymm10, %ymm13, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm31, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm31, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm30, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm30
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm23, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x500(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %dil
je 0x1c7c720
vmovaps 0x600(%rsp), %ymm0
vmaxps 0xa0(%rsp), %ymm13, %ymm1
vminps %ymm30, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x5e0(%rsp), %ymm9
vminps %ymm30, %ymm9, %ymm9
vmovaps 0x2a4a59(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x2a1fc4(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x340(%rsp), %ymm16
vfmadd213ps %ymm2, %ymm16, %ymm0 # ymm0 = (ymm16 * ymm0) + ymm2
vmovaps %ymm0, 0x600(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm16, %ymm0 # ymm0 = (ymm16 * ymm0) + ymm2
vmovaps %ymm0, 0x5e0(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0x80(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x240(%rsp), %ymm23
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x200(%rsp), %ymm27
je 0x1c7c7b7
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm13, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm30, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm30
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x2a4910(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x540(%rsp), %ymm22
vmulps %ymm12, %ymm22, %ymm0
vmovaps 0x1c0(%rsp), %ymm25
vmulps %ymm12, %ymm25, %ymm1
vmovaps 0x560(%rsp), %ymm26
vmulps %ymm12, %ymm26, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0x3a0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmovaps %ymm22, %ymm19
vmulps %ymm13, %ymm22, %ymm10
vmovaps %ymm25, %ymm21
vmulps %ymm13, %ymm25, %ymm17
vmulps %ymm13, %ymm26, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x26f39b(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x2704f0(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vandps 0x520(%rsp), %ymm28, %ymm6
vmovaps 0x6a0(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2757fb(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c7e384
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm25
vmovaps %ymm19, %ymm13
jmp 0x1c7c81f
vbroadcastss 0x2a47af(%rip), %ymm28 # 0x1f20ec4
vbroadcastss 0x26fff6(%rip), %ymm3 # 0x1eec714
jmp 0x1c7c726
vmovaps %ymm30, %ymm3
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x460(%rsp), %ymm2
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm23
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x100(%rsp), %ymm25
vmovaps 0x200(%rsp), %ymm27
jmp 0x1c7e210
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x26f24b(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x2703a6(%rip), %ymm2 # 0x1eecb84
vmovaps 0x540(%rsp), %ymm13
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x100(%rsp), %ymm25
vmovaps 0x560(%rsp), %ymm7
vmulps %ymm5, %ymm7, %ymm5
vfmadd231ps %ymm3, %ymm21, %ymm5 # ymm5 = (ymm21 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm10) + ymm5
vmovaps 0x500(%rsp), %ymm6
vmovaps %ymm6, 0x8a0(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x8c0(%rsp)
vandps %ymm28, %ymm5, %ymm4
vmaxps %ymm2, %ymm6, %ymm5
vmovaps %ymm5, 0x7e0(%rsp)
vmovaps %ymm8, 0x800(%rsp)
vbroadcastss 0x2a4659(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0xf4(%rsp)
vcmpleps %ymm3, %ymm6, %k1
kmovd %k1, %esi
andb %dil, %sil
vmovaps %ymm5, 0x5a0(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %ecx
andb %dil, %cl
movl %ecx, 0x34(%rsp)
orb %sil, %cl
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
je 0x1c7d461
movq %r15, 0x2e0(%rsp)
movb %al, 0xf(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm7, %ymm9, %ymm2
vfmadd213ps %ymm2, %ymm21, %ymm1 # ymm1 = (ymm21 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm1
vandps %ymm28, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0xec(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2a45c7(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a45b9(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vpbroadcastd %r12d, %ymm1
vmovdqa %ymm0, 0x820(%rsp)
vmovdqa %ymm1, 0x680(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %esi, 0xf0(%rsp)
andb %sil, %r12b
je 0x1c7d493
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4c0(%rsp), %ymm3
vmovaps 0x480(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x2a452d(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2754f8(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3f0(%rsp)
vmovaps 0x500(%rsp), %ymm0
vaddps 0x580(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x26f027(%rip), %ymm0 # 0x1eeba20
vblendmps 0x500(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x600(%rsp,%rcx), %xmm10
vmovss 0x8a0(%rsp,%rcx), %xmm11
vmovaps 0x360(%rsp), %xmm0
vucomiss 0x26efbf(%rip), %xmm0 # 0x1eeba24
vmovss 0xf8(%rsp), %xmm0
jae 0x1c7cab1
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2753eb(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x26fc24(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x4a0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x450(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4e0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4c0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x480(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x274484(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x520(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x340(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x26ee98(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c7cbb5
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c7cbc7
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x26fb35(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x26fb31(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a42c5(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x430(%rsp)
vfnmadd213ss 0x2743eb(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x440(%rsp)
jb 0x1c7cc30
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c7cc7a
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0xc0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xc0(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x340(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm13
vmulps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x26fa5e(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xd0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3a0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xc0(%rsp)
vmulss 0x26fa29(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x26fa1f(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x26ed19(%rip), %xmm0 # 0x1eeba24
jb 0x1c7cd13
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7cd62
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm13, 0x410(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x410(%rsp), %xmm13
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x340(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x2a4158(%rip), %ymm30 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm12
vmovaps 0x2a0(%rsp), %ymm23
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x120(%rsp), %ymm25
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x200(%rsp), %ymm26
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x273be3(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x450(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x2741e1(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x480(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x440(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x430(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2a4006(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm13, %xmm2
vmovaps 0x420(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xc0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xd0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x520(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x2a3f2c(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c7cffa
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3f0(%rsp), %xmm3
vfmadd231ss 0x274ef4(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7cffa
vaddss 0x400(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
jb 0x1c7cff5
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x100(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c7d010
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c7d42c
decq %r15
jne 0x1c7cad5
jmp 0x1c7d429
xorl %eax, %eax
vucomiss 0x26ea0a(%rip), %xmm10 # 0x1eeba24
jb 0x1c7cff7
vmovss 0x26f6f0(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c7cff7
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3a0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x26f6ce(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x26f6ca(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2e8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x120(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c7cff5
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c7d0a4
cmpq $0x0, 0x48(%r14)
jne 0x1c7d0a4
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c7cffa
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2955ff(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2a3dc9(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x6c0(%rsp)
vmovaps %ymm3, 0x6e0(%rsp)
vmovaps %ymm0, 0x700(%rsp)
vmovaps %ymm1, 0x720(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x660(%rsp), %ymm0
vmovaps %ymm0, 0x760(%rsp)
vmovdqa 0x640(%rsp), %ymm0
vmovdqa %ymm0, 0x780(%rsp)
movq 0x2d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x7c0(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x100(%rcx,%rax,4)
vmovaps 0x620(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2f8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x300(%rsp)
movq %rcx, 0x308(%rsp)
leaq 0x6c0(%rsp), %rax
movq %rax, 0x310(%rsp)
movl $0x8, 0x318(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c7d2e4
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm25
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm23
vmovaps 0x180(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x2a3bea(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2a3be0(%rip), %ymm30 # 0x1f20ec4
vmovdqa 0x3c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7d405
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c7d3c9
testb $0x2, (%rcx)
jne 0x1c7d31e
testb $0x40, 0x3e(%r14)
je 0x1c7d3c9
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x240(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm25
vmovaps 0x260(%rsp), %ymm8
vmovaps 0x280(%rsp), %ymm20
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm23
vmovaps 0x180(%rsp), %ymm12
vmovaps 0x1a0(%rsp), %ymm17
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x2a3b05(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x2a3afb(%rip), %ymm30 # 0x1f20ec4
vmovdqa 0x3c0(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x308(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x26f792(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %r14b
jmp 0x1c7d408
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c7d09d
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x100(%rcx,%rax,4)
jmp 0x1c7d09d
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x5c0(%rsp), %ymm0
vcmpleps 0x100(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c7c9eb
jmp 0x1c7d4f3
vbroadcastss 0x26f2aa(%rip), %ymm3 # 0x1eec714
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x460(%rsp), %ymm2
vmovaps 0x320(%rsp), %ymm4
jmp 0x1c7e210
vmovaps %ymm20, %ymm8
vmovaps %ymm19, %ymm20
vmovaps %ymm25, %ymm19
vmovaps %ymm22, %ymm25
vmovaps %ymm18, %ymm22
vmovaps %ymm17, %ymm18
vmovaps %ymm30, %ymm17
vmovaps %ymm26, %ymm12
vmovaps %ymm27, %ymm26
vmovaps %ymm24, %ymm27
vmovaps %ymm23, %ymm24
vmovaps %ymm16, %ymm23
vmovaps %ymm28, %ymm30
vbroadcastss 0x2a39da(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x580(%rsp), %ymm3
vaddps 0x5a0(%rsp), %ymm3, %ymm0
vcmpleps 0x100(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xf4(%rsp), %k1
kmovd 0xec(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x34(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2a39a3(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a3995(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x5a0(%rsp)
vpcmpled 0x680(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x34(%rsp)
andb %cl, %r12b
je 0x1c7e096
vmovaps 0x7e0(%rsp), %ymm7
vmovaps 0x4e0(%rsp), %ymm1
vmovaps 0x4a0(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4c0(%rsp), %ymm5
vmovaps 0x480(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x2748e4(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3f0(%rsp)
vmovaps %ymm7, 0x500(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x26e418(%rip), %ymm0 # 0x1eeba20
vblendmps 0x500(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x5e0(%rsp,%rcx), %xmm10
vmovss 0x800(%rsp,%rcx), %xmm11
vmovaps 0x360(%rsp), %xmm0
vucomiss 0x26e3b0(%rip), %xmm0 # 0x1eeba24
vmovss 0xfc(%rsp), %xmm0
jae 0x1c7d6c0
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x2747dc(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x26f015(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x4a0(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x450(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4e0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4c0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x480(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x273875(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x520(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x340(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x26e289(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c7d7c4
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c7d7d6
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x26ef26(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x26ef22(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a36b6(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x430(%rsp)
vfnmadd213ss 0x2737dc(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x440(%rsp)
jb 0x1c7d83f
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c7d889
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0xc0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xc0(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x340(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm12
vmulps %xmm12, %xmm14, %xmm0
vmovaps %xmm0, 0x420(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x26ee4f(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xd0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3a0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xc0(%rsp)
vmulss 0x26ee1a(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x26ee10(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x26e10a(%rip), %xmm0 # 0x1eeba24
jb 0x1c7d922
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7d971
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm12, 0x410(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x410(%rsp), %xmm12
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x340(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x2a3549(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm23
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x100(%rsp), %ymm25
vmovaps 0x200(%rsp), %ymm27
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x272fcd(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x450(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x2735cb(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x480(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4a0(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x440(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x430(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x2a33f0(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm12, %xmm2
vmovaps 0x420(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xc0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xd0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x520(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x2a3316(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c7dc1b
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3f0(%rsp), %xmm3
vfmadd231ss 0x2742de(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7dc1b
vaddss 0x400(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
vmovaps 0x320(%rsp), %ymm4
jb 0x1c7dc14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x100(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c7dc3a
xorl %eax, %eax
xorl %r14d, %r14d
jmp 0x1c7dc24
vmovaps 0x320(%rsp), %ymm4
testb %al, %al
je 0x1c7e064
decq %r15
jne 0x1c7d6e4
jmp 0x1c7e061
xorl %eax, %eax
vucomiss 0x26dde0(%rip), %xmm10 # 0x1eeba24
jb 0x1c7dc16
vmovss 0x26eac6(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c7dc16
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3a0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x26eaa4(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x26eaa0(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2e8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x120(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c7dc14
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c7dcce
cmpq $0x0, 0x48(%r14)
jne 0x1c7dcce
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c7dc24
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %ymm1
vbroadcastss 0x2949d5(%rip), %ymm2 # 0x1f12704
vpermps %ymm0, %ymm2, %ymm2
vbroadcastss 0x2a319f(%rip), %ymm3 # 0x1f20edc
vpermps %ymm0, %ymm3, %ymm3
vbroadcastss %xmm0, %ymm0
vmovaps %ymm2, 0x6c0(%rsp)
vmovaps %ymm3, 0x6e0(%rsp)
vmovaps %ymm0, 0x700(%rsp)
vmovaps %ymm1, 0x720(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %ymm0, 0x740(%rsp)
vmovaps 0x660(%rsp), %ymm0
vmovaps %ymm0, 0x760(%rsp)
vmovdqa 0x640(%rsp), %ymm0
vmovdqa %ymm0, 0x780(%rsp)
movq 0x2d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x7a0(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x7c0(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x100(%rcx,%rax,4)
vmovaps 0x620(%rsp), %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2f8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x300(%rsp)
movq %rcx, 0x308(%rsp)
leaq 0x6c0(%rsp), %rax
movq %rax, 0x310(%rsp)
movl $0x8, 0x318(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c7df15
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm25
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x240(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x2a2faf(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x3c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c7e03d
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c7e001
testb $0x2, (%rcx)
jne 0x1c7df4f
testb $0x40, 0x3e(%r14)
je 0x1c7e001
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm25
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x240(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x280(%rsp), %ymm19
vmovaps 0x140(%rsp), %ymm18
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x2a0(%rsp), %ymm16
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x2a2ec3(%rip), %ymm28 # 0x1f20ec4
vmovdqa 0x3c0(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x308(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x26eb5a(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %r14b
jmp 0x1c7e040
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c7dcc7
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x100(%rcx,%rax,4)
jmp 0x1c7dcc7
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x5c0(%rsp), %ymm0
vcmpleps 0x100(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c7d5fa
jmp 0x1c7e0f6
vmovaps 0x320(%rsp), %ymm4
vmovaps %ymm30, %ymm28
vmovaps %ymm23, %ymm16
vmovaps %ymm24, %ymm23
vmovaps %ymm27, %ymm24
vmovaps %ymm26, %ymm27
vmovaps %ymm12, %ymm26
vmovaps %ymm17, %ymm30
vmovaps %ymm18, %ymm17
vmovaps %ymm22, %ymm18
vmovaps %ymm25, %ymm22
vmovaps %ymm19, %ymm25
vmovaps %ymm20, %ymm19
vmovaps %ymm8, %ymm20
vmovaps 0x380(%rsp), %ymm8
vmovdqa 0x680(%rsp), %ymm1
vpcmpltd 0x5a0(%rsp), %ymm1, %k1
vmovaps 0x8a0(%rsp), %ymm0
vpcmpltd 0x820(%rsp), %ymm1, %k2
vmovaps 0x580(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
vbroadcastss 0x100(%r11,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xf0(%rsp), %ecx
andb %al, %cl
vmovaps 0x7e0(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x34(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c7e1e0
movl %ebx, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0x8e0(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0x900(%rsp,%rax)
vmovaps 0x3e0(%rsp), %xmm0
vmovlps %xmm0, 0x920(%rsp,%rax)
movq 0x50(%rsp), %r12
leal 0x1(%r12), %ecx
movl %ecx, 0x928(%rsp,%rax)
incl %ebx
movq 0x2d8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x26e54b(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x2e0(%rsp), %r15
vmovaps 0x460(%rsp), %ymm2
jmp 0x1c7e210
movq 0x2d8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x26e51e(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x2e0(%rsp), %r15
vmovaps 0x460(%rsp), %ymm2
movq 0x50(%rsp), %r12
testl %ebx, %ebx
je 0x1c7e3c6
leal -0x1(%rbx), %r9d
leaq (%r9,%r9,2), %rcx
shlq $0x5, %rcx
vmovaps 0x900(%rsp,%rcx), %ymm0
movzbl 0x8e0(%rsp,%rcx), %esi
vaddps 0x580(%rsp), %ymm0, %ymm1
vcmpleps 0x100(%r11,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %r10d
andl %esi, %r10d
je 0x1c7e316
kmovd %r10d, %k1
vbroadcastss 0x26d7be(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %r10b, %sil
je 0x1c7e29a
movzbl %sil, %edi
jmp 0x1c7e29e
movzbl %r10b, %edi
leaq (%rsp,%rcx), %rsi
addq $0x8e0, %rsi # imm = 0x8E0
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r12d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %r10d, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
vmovaps 0x460(%rsp), %ymm2
je 0x1c7e2e3
movl %ebx, %r9d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x2a2c25(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x6c0(%rsp)
vmovsd 0x6c0(%rsp,%rcx,4), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
movl %r9d, %ebx
testb %r10b, %r10b
je 0x1c7e210
movq %r12, 0x50(%rsp)
vmovaps 0x3e0(%rsp), %xmm5
jmp 0x1c7bdbc
vcmpleps 0x2a2bc0(%rip), %ymm1, %k2 # 0x1f20f00
vbroadcastss 0x26e83b(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x26d6cd(%rip), %ymm23 # 0x1eeba20
vblendmps %ymm1, %ymm23, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm23, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %dil, %sil
movl %esi, %edi
jmp 0x1c7c2c0
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x26e7f0(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x26d683(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c7c6d0
testb $0x1, %r13b
jne 0x1c7e3f3
blsrl %r15d, %eax
vmovaps 0x840(%rsp), %ymm0
vcmpleps 0x100(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %r15d
andl %eax, %r15d
setne %al
jne 0x1c7ba7b
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 16>::intersect_h<embree::avx512::SweepCurve1IntersectorK<embree::HermiteCurveT, 16>, embree::avx512::Intersect1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayHitK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_h(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xc40, %rsp # imm = 0xC40
movq %rcx, %r15
movq %rsi, %r12
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r12,%rdx,4), %xmm1
vmovss 0x100(%r12,%rdx,4), %xmm2
vinsertps $0x10, 0x40(%r12,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%r12,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%r12,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x180(%r12,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %r9
vpmovsxbd 0x6(%r8,%r9,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%r9,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2941f1(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2a29bf(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x2a2934(%rip), %ymm6 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm6, %ymm2, %ymm5
vbroadcastss 0x272a46(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm6, %ymm1, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm6, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x26e134(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %rdi
subq %rcx, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %r9
subq %rcx, %r9
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%r9), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc0(%r12,%rdx,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x2a1840(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x200(%r12,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x2a1818(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2dc21a(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x520(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c80e3b
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r10d
leaq 0x880(%rsp), %rax
addq $0x1c0, %rax # imm = 0x1C0
movq %rax, 0x1b8(%rsp)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x7c0(%rsp)
movq %r15, 0x38(%rsp)
movq %rdx, 0x28(%rsp)
movq %r8, 0x1c0(%rsp)
tzcntq %r10, %rax
movl 0x2(%r8), %r11d
movl 0x6(%r8,%rax,4), %ebx
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r11,8), %rcx
movq %rbx, %rax
imulq 0x68(%rcx), %rax
movq 0x58(%rcx), %rdi
movq 0x90(%rcx), %rsi
movl (%rdi,%rax), %eax
movq 0xa0(%rcx), %r9
movq %r9, %rdi
imulq %rax, %rdi
vmovaps (%rsi,%rdi), %xmm1
leaq 0x1(%rax), %rdi
imulq %rdi, %r9
vmovaps (%rsi,%r9), %xmm2
movq 0x100(%rcx), %rsi
movq 0x110(%rcx), %rcx
imulq %rcx, %rax
vmovss (%r12,%rdx,4), %xmm0
vinsertps $0x1c, 0x40(%r12,%rdx,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r12,%rdx,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
imulq %rdi, %rcx
vbroadcastss 0x100(%r12,%rdx,4), %ymm26
vbroadcastss 0x140(%r12,%rdx,4), %ymm30
vunpcklps %xmm30, %xmm26, %xmm3 # xmm3 = xmm26[0],xmm30[0],xmm26[1],xmm30[1]
vbroadcastss 0x180(%r12,%rdx,4), %ymm21
vinsertps $0x28, %xmm21, %xmm3, %xmm9 # xmm9 = xmm3[0,1],xmm21[0],zero
vmovaps (%rsi,%rax), %xmm3
vbroadcastss 0x2dec47(%rip), %xmm5 # 0x1f5d46c
vfnmadd132ps %xmm5, %xmm1, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm1
vmovaps (%rsi,%rcx), %xmm4
vfmadd132ps %xmm5, %xmm2, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm2
vaddps %xmm3, %xmm1, %xmm5
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm5, %xmm2, %xmm5
vmulps 0x29eab2(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm0, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0xc0(%r12,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x272778(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x250(%rsp)
vmovaps %ymm6, 0x380(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm0 # xmm0 = (xmm9 * xmm6) + xmm0
vblendps $0x8, %xmm8, %xmm0, %xmm0 # xmm0 = xmm0[0,1,2],xmm8[3]
vsubps %xmm0, %xmm1, %xmm6
vsubps %xmm0, %xmm4, %xmm4
vsubps %xmm0, %xmm3, %xmm7
vmovaps 0x2a2662(%rip), %ymm3 # 0x1f20f20
vsubps %xmm0, %xmm2, %xmm8
vbroadcastss %xmm6, %ymm0
vmovaps %ymm0, 0x760(%rsp)
vbroadcastss 0x293e2b(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm1
vmovaps %ymm1, 0x740(%rsp)
vbroadcastss 0x2a25ec(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm2
vmovaps %ymm2, 0x720(%rsp)
vbroadcastss 0x2a25d1(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x340(%rsp)
vpermps %ymm6, %ymm2, %ymm6
vmovaps %ymm6, 0x700(%rsp)
vbroadcastss %xmm7, %ymm6
vmovaps %ymm6, 0x6e0(%rsp)
vpermps %ymm7, %ymm0, %ymm6
vmovaps %ymm6, 0x6c0(%rsp)
vpermps %ymm7, %ymm1, %ymm6
vmovaps %ymm6, 0x6a0(%rsp)
vmovaps %ymm7, 0x300(%rsp)
vpermps %ymm7, %ymm2, %ymm6
vmovaps %ymm6, 0x680(%rsp)
vbroadcastss %xmm4, %ymm6
vmovaps %ymm6, 0x660(%rsp)
vpermps %ymm4, %ymm0, %ymm6
vmovaps %ymm6, 0x640(%rsp)
vpermps %ymm4, %ymm1, %ymm6
vmovaps %ymm6, 0x620(%rsp)
vmovaps %ymm4, 0x320(%rsp)
vpermps %ymm4, %ymm2, %ymm4
vmovaps %ymm4, 0x600(%rsp)
vbroadcastss %xmm8, %ymm4
vmovaps %ymm4, 0x5e0(%rsp)
vpermps %ymm8, %ymm0, %ymm0
vmovaps %ymm0, 0x5c0(%rsp)
vpermps %ymm8, %ymm1, %ymm0
vmovaps %ymm0, 0x5a0(%rsp)
vmovaps %ymm8, 0x2e0(%rsp)
vpermps %ymm8, %ymm2, %ymm0
vmovaps %ymm0, 0x580(%rsp)
vmulss %xmm21, %xmm21, %xmm0
vfmadd231ps %ymm30, %ymm30, %ymm0 # ymm0 = (ymm30 * ymm30) + ymm0
vfmadd231ps %ymm26, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm26) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x560(%rsp)
vandps 0x2a24b9(%rip){1to8}, %ymm0, %ymm0 # 0x1f20ec4
vmovaps %ymm0, 0x4a0(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x270(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x540(%rsp)
movq %r11, 0x98(%rsp)
vpbroadcastd %r11d, %zmm0
vmovdqa64 %zmm0, 0x840(%rsp)
xorl %r11d, %r11d
movl $0x1, %r14d
movq %rbx, 0x1d8(%rsp)
vpbroadcastd %ebx, %zmm0
vmovdqa64 %zmm0, 0x800(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x11c(%rsp)
vmovaps %xmm11, 0x240(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0x118(%rsp)
vmovsd 0x26dc59(%rip), %xmm2 # 0x1eec6f0
vmovaps %ymm26, 0x140(%rsp)
vmovaps %ymm30, 0x120(%rsp)
vmovaps %ymm21, 0x1e0(%rsp)
vmovshdup %xmm2, %xmm0 # xmm0 = xmm2[1,1,3,3]
vxorps %xmm31, %xmm31, %xmm31
vsubss %xmm2, %xmm0, %xmm1
vmulss 0x2a240b(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm2, 0x1a0(%rsp)
vbroadcastss %xmm2, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x60(%rsp)
vmovaps %ymm0, 0x220(%rsp)
vfmadd231ps %ymm3, %ymm0, %ymm5 # ymm5 = (ymm0 * ymm3) + ymm5
vbroadcastss 0x26dc1f(%rip), %ymm0 # 0x1eec714
vsubps %ymm5, %ymm0, %ymm7
vmovaps 0x6e0(%rsp), %ymm12
vmulps %ymm5, %ymm12, %ymm1
vmovaps 0x6c0(%rsp), %ymm13
vmulps %ymm5, %ymm13, %ymm2
vmovaps 0x6a0(%rsp), %ymm14
vmulps %ymm5, %ymm14, %ymm3
vmovaps 0x680(%rsp), %ymm15
vmulps %ymm5, %ymm15, %ymm4
vfmadd231ps 0x760(%rsp), %ymm7, %ymm1 # ymm1 = (ymm7 * mem) + ymm1
vfmadd231ps 0x740(%rsp), %ymm7, %ymm2 # ymm2 = (ymm7 * mem) + ymm2
vfmadd231ps 0x720(%rsp), %ymm7, %ymm3 # ymm3 = (ymm7 * mem) + ymm3
vfmadd231ps 0x700(%rsp), %ymm7, %ymm4 # ymm4 = (ymm7 * mem) + ymm4
vmovaps 0x660(%rsp), %ymm16
vmulps %ymm5, %ymm16, %ymm8
vmovaps 0x640(%rsp), %ymm17
vmulps %ymm5, %ymm17, %ymm9
vmovaps 0x620(%rsp), %ymm18
vmulps %ymm5, %ymm18, %ymm10
vmovaps 0x600(%rsp), %ymm19
vmulps %ymm5, %ymm19, %ymm11
vfmadd231ps %ymm12, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm12) + ymm8
vfmadd231ps %ymm13, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm13) + ymm9
vfmadd231ps %ymm14, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm14) + ymm10
vfmadd231ps %ymm15, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm15) + ymm11
vmulps 0x5e0(%rsp), %ymm5, %ymm12
vmulps 0x5c0(%rsp), %ymm5, %ymm13
vmulps 0x5a0(%rsp), %ymm5, %ymm14
vmulps 0x580(%rsp), %ymm5, %ymm15
vfmadd231ps %ymm16, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm16) + ymm12
vfmadd231ps %ymm17, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm17) + ymm13
vfmadd231ps %ymm18, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm19) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x272370(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x2e1071(%rip), %ymm7 # 0x1f5fd20
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x271b48(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x271b42(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0x200(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x26d8fc(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x26d8dd(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0xa0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmulps %ymm29, %ymm21, %ymm22
vfmadd231ps %ymm28, %ymm30, %ymm22 # ymm22 = (ymm30 * ymm28) + ymm22
vfmadd231ps %ymm27, %ymm26, %ymm22 # ymm22 = (ymm26 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm21, %ymm17
vfmadd231ps %ymm30, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm30) + ymm17
vfmadd231ps %ymm26, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm26) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm21
vmovaps 0x560(%rsp), %ymm15
vsubps %ymm21, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm16
vmovaps %ymm16, 0xc0(%rsp)
vsubps %ymm1, %ymm16, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x26dc8e(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vxorps %xmm16, %xmm16, %xmm16
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %eax
kortestb %k1, %k1
je 0x1c7f00a
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x2a1f73(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x460(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x440(%rsp)
vbroadcastss 0x26ca7e(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x26dbd3(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x2a1f04(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm21, %ymm24
vandps %ymm0, %ymm21, %ymm23
vmovaps 0x4a0(%rsp), %ymm21
vmaxps %ymm23, %ymm21, %ymm23
vmulps 0x272ed0(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c80dc9
vbroadcastss 0x26d712(%rip), %ymm0 # 0x1eec714
vmovaps %ymm24, %ymm21
jmp 0x1c7f01e
vbroadcastss 0x26ca0c(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x26db66(%rip), %ymm31 # 0x1eecb84
andb $0x7f, %al
je 0x1c7f41a
vmovaps %ymm21, 0x3a0(%rsp)
vmovaps %ymm0, %ymm16
vmovss 0x200(%r12,%rdx,4), %xmm0
vsubss 0x270(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x540(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x1e0(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x120(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x140(%rsp), %ymm31
vfmadd231ps %ymm11, %ymm31, %ymm13 # ymm13 = (ymm31 * ymm11) + ymm13
vbroadcastss 0x2a1e1d(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x271f31(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x2a1df8(%rip), %ymm30 # 0x1f20ec0
vxorps %ymm30, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm30, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vfnmadd213ps %ymm16, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm16
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x26da7d(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x26c8fa(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm31, %ymm8 # ymm8 = -(ymm31 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm30, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm30, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm16, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm16
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x360(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c7f43f
vmovaps 0x460(%rsp), %ymm0
vmaxps 0x200(%rsp), %ymm23, %ymm1
vminps %ymm16, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x440(%rsp), %ymm9
vminps %ymm16, %ymm9, %ymm9
vmovaps 0x2a1d21(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x29f28c(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x220(%rsp), %ymm13
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x460(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x440(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0xc0(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
je 0x1c7f45c
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm16, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm16
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x2a1bf9(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x140(%rsp), %ymm26
vmulps %ymm12, %ymm26, %ymm0
vmovaps 0x120(%rsp), %ymm30
vmulps %ymm12, %ymm30, %ymm1
vmulps %ymm12, %ymm21, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0xa0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmulps %ymm13, %ymm26, %ymm10
vmulps %ymm13, %ymm30, %ymm17
vmulps %ymm13, %ymm21, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x26c698(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x26d7ed(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vbroadcastss 0x2a1b1e(%rip), %ymm7 # 0x1f20ec4
vandps 0x3a0(%rsp), %ymm7, %ymm6
vmovaps 0x4a0(%rsp), %ymm12
vmaxps %ymm6, %ymm12, %ymm6
vmulps 0x272aee(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm7, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
je 0x1c7f493
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x26d799(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x26c62c(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c7f493
vmovaps 0x2a1afe(%rip), %ymm3 # 0x1f20f20
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
jmp 0x1c80cb9
vmovaps 0x2a1ad9(%rip), %ymm3 # 0x1f20f20
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
jmp 0x1c80cb9
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x26c5a6(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x26d701(%rip), %ymm2 # 0x1eecb84
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmulps %ymm5, %ymm21, %ymm5
vfmadd231ps %ymm3, %ymm30, %ymm5 # ymm5 = (ymm30 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm26, %ymm5 # ymm5 = (ymm26 * ymm10) + ymm5
vmovaps 0x360(%rsp), %ymm7
vmovaps %ymm7, 0x780(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x7a0(%rsp)
vbroadcastss 0x2a19f7(%rip), %ymm6 # 0x1f20ec4
vandps %ymm6, %ymm5, %ymm4
vmaxps %ymm2, %ymm7, %ymm5
vmovaps %ymm5, 0x4c0(%rsp)
vmovaps %ymm8, 0x4e0(%rsp)
vbroadcastss 0x2a19e4(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0x114(%rsp)
vcmpleps %ymm3, %ymm7, %k1
kmovd %k1, %ecx
andb %al, %cl
vmovaps %ymm5, 0x400(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %esi
andb %al, %sil
movl %esi, %eax
orb %cl, %al
je 0x1c80c8f
movl %esi, 0x40(%rsp)
movq %r11, 0x1c8(%rsp)
movq %r10, 0x1d0(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm9, %ymm21, %ymm2
vfmadd213ps %ymm2, %ymm30, %ymm1 # ymm1 = (ymm30 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm26, %ymm0 # ymm0 = (ymm26 * ymm0) + ymm1
vandps %ymm6, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0x10c(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x2a195d(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a194f(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
movq %r14, 0x2d8(%rsp)
vpbroadcastd %r14d, %ymm1
vmovdqa %ymm0, 0x500(%rsp)
vmovdqa %ymm1, 0x480(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %ebx
movl %ecx, 0x110(%rsp)
andb %cl, %bl
vbroadcastss 0x2a18fe(%rip), %xmm4 # 0x1f20ec4
je 0x1c80086
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm5
vminps %xmm5, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm5, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x272887(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x50(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x260(%rsp)
vmovaps 0x360(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x420(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x26c3b7(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x460(%rsp,%rcx), %xmm8
vmovss 0x780(%rsp,%rcx), %xmm9
vmovaps 0x240(%rsp), %xmm0
vucomiss 0x26c353(%rip), %xmm0 # 0x1eeba24
vmovss 0x118(%rsp), %xmm0
jae 0x1c7f71d
vmovaps 0x240(%rsp), %xmm0
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x272780(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x54(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x250(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x26cfb9(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2c0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x271819(%rip){1to4}, %xmm1, %xmm10 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x26c22d(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
jb 0x1c7f820
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c7f832
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm10, %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x26ceca(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x26cec6(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a165a(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2a0(%rsp)
vfnmadd213ss 0x271780(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x58(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x34(%rsp)
vmovaps %xmm0, 0x2b0(%rsp)
jb 0x1c7f899
vsqrtss %xmm0, %xmm0, %xmm15
jmp 0x1c7f8e1
vmovaps %xmm3, 0xf0(%rsp)
vmovss %xmm4, 0x24(%rsp)
vmovss %xmm5, 0xe0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xe0(%rsp), %xmm5
vmovss 0x24(%rsp), %xmm4
vmovaps 0xf0(%rsp), %xmm3
vmovss 0x34(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm15
vmovaps 0x220(%rsp), %xmm11
vmovaps 0x200(%rsp), %xmm18
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm10, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm14
vaddss 0x26cdf2(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm17
vmulss 0x26cdd3(%rip), %xmm17, %xmm16 # 0x1eec718
vmulss 0x26cdcd(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x26c0cd(%rip), %xmm0 # 0x1eeba24
jb 0x1c7f962
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c7f9f3
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xe0(%rsp)
vmovss %xmm15, 0x4c(%rsp)
vmovss %xmm16, 0x48(%rsp)
vmovaps %xmm17, 0x290(%rsp)
vmovss %xmm19, 0x44(%rsp)
vmovaps %xmm6, 0x280(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x280(%rsp), %xmm6
vmovss 0x44(%rsp), %xmm19
vmovaps 0x290(%rsp), %xmm17
vmovss 0x48(%rsp), %xmm16
vmovss 0x4c(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
vmovss 0x34(%rsp), %xmm7
vmovss 0x24(%rsp), %xmm13
vmovaps 0x200(%rsp), %xmm18
vmovaps 0x220(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovss 0x270fb7(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2c0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x2715b5(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2b0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x58(%rsp), %xmm2
vmulss 0x2a0(%rsp), %xmm2, %xmm2
vmulss 0x54(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2a13e4(%rip){1to4}, %xmm10, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xf0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x50(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm15, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vmovaps 0x250(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm17, %xmm19, %xmm2
vmulss %xmm17, %xmm17, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm16, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm5 # xmm5 = -(xmm14 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm14, %xmm6 # xmm6 = -(xmm14 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm14, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm8, %xmm8
vsubss %xmm4, %xmm9, %xmm9
vbroadcastss 0x2a1317(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
jbe 0x1c7fd71
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x260(%rsp), %xmm3
vfmadd231ss 0x2722e3(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c7fd71
vaddss 0x270(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x5c(%rsp), %xmm9
jb 0x1c7fd74
movq 0x28(%rsp), %rax
vmovss 0x200(%r12,%rax,4), %xmm5
vucomiss %xmm9, %xmm5
jb 0x1c7fd74
vucomiss 0x26be0b(%rip), %xmm8 # 0x1eeba24
jb 0x1c7fd74
vmovss 0x26caed(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c7fd74
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x26cace(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x26cac8(%rip), %xmm18, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x28(%rsp), %rax
movl 0x240(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c7fd91
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm10, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm10
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm10, %xmm10, %xmm3 # xmm3 = xmm10[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c7fd96
cmpq $0x0, 0x40(%r15)
jne 0x1c7fd96
movq 0x28(%rsp), %rcx
vmovss %xmm9, 0x200(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x300(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x340(%r12,%rcx,4)
vmovss %xmm0, 0x380(%r12,%rcx,4)
vmovss %xmm8, 0x3c0(%r12,%rcx,4)
movl $0x0, 0x400(%r12,%rcx,4)
movq 0x1d8(%rsp), %rax
movl %eax, 0x440(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x480(%r12,%rcx,4)
movq 0x38(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%rcx,4)
jmp 0x1c7fd74
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c80060
testb %al, %al
je 0x1c7f740
jmp 0x1c80060
movq %rcx, %r15
jmp 0x1c7fd74
movq 0x38(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm8, %zmm1
vbroadcastss 0x292955(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x2a111d(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x1b8(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rdx)
vmovdqa %ymm0, 0x40(%rdx)
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x28(%rsp), %rax
vmovss %xmm9, 0x200(%r12,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c7ff24
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vbroadcastss 0x2a0fa0(%rip), %xmm4 # 0x1f20ec4
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c80047
movq 0x38(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c7ffa0
testb $0x2, (%rcx)
jne 0x1c7ff5a
testb $0x40, 0x3e(%r15)
je 0x1c7ffa0
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vbroadcastss 0x2a0f24(%rip), %xmm4 # 0x1f20ec4
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c80047
movq 0x188(%rsp), %rax
movq 0x190(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c80056
movq 0x28(%rsp), %rax
vmovss %xmm5, 0x200(%r12,%rax,4)
movq 0x38(%rsp), %r15
jmp 0x1c7fd74
movq 0x28(%rsp), %rdx
vmovaps 0x420(%rsp), %ymm0
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c7f65c
vmovaps 0x400(%rsp), %ymm0
vaddps 0x380(%rsp), %ymm0, %ymm0
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd 0x114(%rsp), %k1
kmovd 0x10c(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x40(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x2a0e0d(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x2a0dff(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x400(%rsp)
vpcmpled 0x480(%rsp), %ymm0, %k0
kmovd %k0, %ebx
movl %ecx, 0x40(%rsp)
andb %cl, %bl
je 0x1c80bac
vmovaps 0x4c0(%rsp), %ymm6
vmovaps 0x340(%rsp), %ymm1
vmovaps 0x300(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x320(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm5
vminps %xmm5, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm5, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x271d4f(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x50(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x260(%rsp)
vmovaps %ymm6, 0x360(%rsp)
vaddps 0x380(%rsp), %ymm6, %ymm0
vmovaps %ymm0, 0x420(%rsp)
kmovd %ebx, %k1
vbroadcastss 0x26b87f(%rip), %ymm0 # 0x1eeba20
vblendmps 0x360(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %bl, %al
movzbl %al, %eax
movzbl %bl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %bl
shlb %cl, %bl
shll $0x2, %ecx
vmovss 0x440(%rsp,%rcx), %xmm8
vmovss 0x4e0(%rsp,%rcx), %xmm9
vmovaps 0x240(%rsp), %xmm0
vucomiss 0x26b81b(%rip), %xmm0 # 0x1eeba24
vmovss 0x11c(%rsp), %xmm0
jae 0x1c80255
vmovaps 0x240(%rsp), %xmm0
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
movzbl %bl, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %ebx
vmulss 0x271c48(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x54(%rsp)
movl $0x4, %r14d
vbroadcastss %xmm9, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x250(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x26c481(%rip), %xmm1 # 0x1eec714
vsubss %xmm8, %xmm1, %xmm3
vbroadcastss %xmm8, %xmm1
vmovaps 0x300(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x2c0(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x340(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x320(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x2e0(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x270ce1(%rip){1to4}, %xmm1, %xmm10 # 0x1ef0fec
vmovaps %xmm4, 0x3a0(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x26b6f5(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm10, 0x60(%rsp)
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
jb 0x1c80358
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c8036a
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm10, %xmm10, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x26c392(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x26c38e(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x2a0b22(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x2a0(%rsp)
vfnmadd213ss 0x270c48(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x58(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x34(%rsp)
vmovaps %xmm0, 0x2b0(%rsp)
jb 0x1c803d1
vsqrtss %xmm0, %xmm0, %xmm15
jmp 0x1c80419
vmovaps %xmm3, 0xf0(%rsp)
vmovss %xmm4, 0x24(%rsp)
vmovss %xmm5, 0xe0(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0xe0(%rsp), %xmm5
vmovss 0x24(%rsp), %xmm4
vmovaps 0xf0(%rsp), %xmm3
vmovss 0x34(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm10
vmovaps %xmm0, %xmm15
vmovaps 0x220(%rsp), %xmm11
vmovaps 0x200(%rsp), %xmm18
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm6
vmulps %xmm6, %xmm10, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vdpps $0x7f, %xmm0, %xmm11, %xmm14
vaddss 0x26c2ba(%rip), %xmm7, %xmm13 # 0x1eec714
vmulps %xmm14, %xmm14, %xmm0
vsubps %xmm0, %xmm18, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm17
vmulss 0x26c29b(%rip), %xmm17, %xmm16 # 0x1eec718
vmulss 0x26c295(%rip), %xmm0, %xmm19 # 0x1eec71c
vucomiss 0x26b595(%rip), %xmm0 # 0x1eeba24
jb 0x1c8049a
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c8052b
vmovss %xmm13, 0x24(%rsp)
vmovaps %xmm14, 0xe0(%rsp)
vmovss %xmm15, 0x4c(%rsp)
vmovss %xmm16, 0x48(%rsp)
vmovaps %xmm17, 0x290(%rsp)
vmovss %xmm19, 0x44(%rsp)
vmovaps %xmm6, 0x280(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x280(%rsp), %xmm6
vmovss 0x44(%rsp), %xmm19
vmovaps 0x290(%rsp), %xmm17
vmovss 0x48(%rsp), %xmm16
vmovss 0x4c(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
vmovss 0x34(%rsp), %xmm7
vmovss 0x24(%rsp), %xmm13
vmovaps 0x200(%rsp), %xmm18
vmovaps 0x220(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm10
vmovaps 0x140(%rsp), %ymm26
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0xa0(%rsp), %xmm9
vmovss 0x27047f(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x2c0(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm1) + xmm5
vmovss 0x270a7d(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm8, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x2e0(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x320(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm8
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x300(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x340(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x2b0(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm10, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm10, %xmm1
vsubps %xmm1, %xmm2, %xmm1
vmovss 0x58(%rsp), %xmm2
vmulss 0x2a0(%rsp), %xmm2, %xmm2
vmulss 0x54(%rsp), %xmm9, %xmm3
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm1, %xmm1
vxorps 0x2a08ac(%rip){1to4}, %xmm10, %xmm2 # 0x1f20ec0
vmulps %xmm1, %xmm6, %xmm4
vmovaps 0xf0(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm2, %xmm5
vmovss 0x50(%rsp), %xmm6
vmaxss %xmm3, %xmm6, %xmm1
vdivss %xmm15, %xmm6, %xmm3
vdpps $0x7f, %xmm4, %xmm11, %xmm4
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vfmadd213ss %xmm7, %xmm3, %xmm13 # xmm13 = (xmm3 * xmm13) + xmm7
vmovaps 0x250(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm3
vaddss %xmm4, %xmm5, %xmm4
vdpps $0x7f, %xmm2, %xmm11, %xmm5
vmulss %xmm17, %xmm19, %xmm2
vmulss %xmm17, %xmm17, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vdpps $0x7f, %xmm7, %xmm11, %xmm6
vaddss %xmm2, %xmm16, %xmm7
vfnmadd231ss %xmm4, %xmm14, %xmm5 # xmm5 = -(xmm14 * xmm4) + xmm5
vfnmadd231ss %xmm3, %xmm14, %xmm6 # xmm6 = -(xmm14 * xmm3) + xmm6
vpermilps $0xff, 0x3a0(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[3,3,3,3]
vfmsub213ss %xmm0, %xmm7, %xmm5 # xmm5 = (xmm7 * xmm5) - xmm0
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm5, %xmm3, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm14, %xmm6
vmulss %xmm3, %xmm2, %xmm3
vsubss %xmm3, %xmm6, %xmm3
vmulss %xmm5, %xmm14, %xmm5
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm5, %xmm4, %xmm4
vsubss %xmm3, %xmm8, %xmm8
vsubss %xmm4, %xmm9, %xmm9
vbroadcastss 0x2a07df(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm14, %xmm3
vucomiss %xmm3, %xmm13
jbe 0x1c808a9
vaddss %xmm1, %xmm13, %xmm1
vmovaps 0x260(%rsp), %xmm3
vfmadd231ss 0x2717ab(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c808a9
vaddss 0x270(%rsp), %xmm9, %xmm9
movb $0x1, %r13b
vucomiss 0x5c(%rsp), %xmm9
jb 0x1c808ac
movq 0x28(%rsp), %rax
vmovss 0x200(%r12,%rax,4), %xmm4
vucomiss %xmm9, %xmm4
jb 0x1c808ac
vucomiss 0x26b2d3(%rip), %xmm8 # 0x1eeba24
jb 0x1c808ac
vmovss 0x26bfb5(%rip), %xmm1 # 0x1eec714
vucomiss %xmm8, %xmm1
jb 0x1c808ac
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm18, %xmm2, %xmm1 # xmm1 = xmm18[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x26bf96(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x26bf90(%rip), %xmm18, %xmm3 # 0x1eec71c
movq (%r15), %rax
movq 0x1e8(%rax), %rax
movq %r15, %rcx
movq 0x98(%rsp), %rdx
movq (%rax,%rdx,8), %r15
movq 0x28(%rsp), %rax
movl 0x240(%r12,%rax,4), %eax
testl %eax, 0x34(%r15)
je 0x1c808c9
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm11, %xmm1
vfmadd213ps %xmm10, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm10
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm10, %xmm10, %xmm3 # xmm3 = xmm10[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c808ce
cmpq $0x0, 0x40(%r15)
jne 0x1c808ce
movq 0x28(%rsp), %rcx
vmovss %xmm9, 0x200(%r12,%rcx,4)
vextractps $0x1, %xmm0, 0x300(%r12,%rcx,4)
vextractps $0x2, %xmm0, 0x340(%r12,%rcx,4)
vmovss %xmm0, 0x380(%r12,%rcx,4)
vmovss %xmm8, 0x3c0(%r12,%rcx,4)
movl $0x0, 0x400(%r12,%rcx,4)
movq 0x1d8(%rsp), %rax
movl %eax, 0x440(%r12,%rcx,4)
movq 0x98(%rsp), %rax
movl %eax, 0x480(%r12,%rcx,4)
movq 0x38(%rsp), %r15
movq 0x8(%r15), %rax
movl (%rax), %eax
movl %eax, 0x4c0(%r12,%rcx,4)
movq 0x8(%r15), %rax
movl 0x4(%rax), %eax
movl %eax, 0x500(%r12,%rcx,4)
jmp 0x1c808ac
xorl %r13d, %r13d
subq $0x1, %r14
setb %al
testb %r13b, %r13b
jne 0x1c80b86
testb %al, %al
je 0x1c80278
jmp 0x1c80b86
movq %rcx, %r15
jmp 0x1c808ac
movq 0x38(%rsp), %rcx
movq 0x8(%rcx), %rax
vbroadcastss %xmm8, %zmm1
vbroadcastss 0x291e1d(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x2a05e5(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x880(%rsp)
vmovaps %zmm3, 0x8c0(%rsp)
vmovaps %zmm0, 0x900(%rsp)
vmovaps %zmm1, 0x940(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x980(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x9c0(%rsp)
vmovdqa64 0x840(%rsp), %zmm0
vmovdqa64 %zmm0, 0xa00(%rsp)
movq 0x1b8(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rdx)
vmovdqa %ymm0, 0x40(%rdx)
vmovdqa %ymm0, 0x20(%rdx)
vmovdqa %ymm0, (%rdx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa80(%rsp)
movq 0x28(%rsp), %rax
vmovss %xmm9, 0x200(%r12,%rax,4)
vmovaps 0x7c0(%rsp), %zmm0
vmovaps %zmm0, 0x3c0(%rsp)
leaq 0x3c0(%rsp), %rax
movq %rax, 0x170(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x178(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x180(%rsp)
movq %r12, 0x188(%rsp)
leaq 0x880(%rsp), %rax
movq %rax, 0x190(%rsp)
movl $0x10, 0x198(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
vmovaps %xmm8, 0xc0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vmovss %xmm4, 0x60(%rsp)
je 0x1c80a53
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c80b6d
movq 0x38(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c80ac6
testb $0x2, (%rcx)
jne 0x1c80a89
testb $0x40, 0x3e(%r15)
je 0x1c80ac6
leaq 0x170(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm4
vmovaps 0xa0(%rsp), %xmm9
vmovaps 0xc0(%rsp), %xmm8
vmovaps 0x1e0(%rsp), %ymm21
vmovaps 0x120(%rsp), %ymm30
vmovaps 0x140(%rsp), %ymm26
vmovdqa64 0x3c0(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
kortestw %k1, %k1
je 0x1c80b6d
movq 0x188(%rsp), %rax
movq 0x190(%rsp), %rcx
vmovaps (%rcx), %zmm0
vmovups %zmm0, 0x300(%rax) {%k1}
vmovaps 0x40(%rcx), %zmm0
vmovups %zmm0, 0x340(%rax) {%k1}
vmovaps 0x80(%rcx), %zmm0
vmovups %zmm0, 0x380(%rax) {%k1}
vmovaps 0xc0(%rcx), %zmm0
vmovups %zmm0, 0x3c0(%rax) {%k1}
vmovaps 0x100(%rcx), %zmm0
vmovups %zmm0, 0x400(%rax) {%k1}
vmovdqa64 0x140(%rcx), %zmm0
vmovdqu32 %zmm0, 0x440(%rax) {%k1}
vmovdqa64 0x180(%rcx), %zmm0
vmovdqu32 %zmm0, 0x480(%rax) {%k1}
vmovdqa64 0x1c0(%rcx), %zmm0
vmovdqa32 %zmm0, 0x4c0(%rax) {%k1}
vmovdqa64 0x200(%rcx), %zmm0
vmovdqa32 %zmm0, 0x500(%rax) {%k1}
jmp 0x1c80b7c
movq 0x28(%rsp), %rax
vmovss %xmm4, 0x200(%r12,%rax,4)
movq 0x38(%rsp), %r15
jmp 0x1c808ac
movq 0x28(%rsp), %rdx
vmovaps 0x420(%rsp), %ymm0
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %bl
jne 0x1c80194
vmovdqa 0x480(%rsp), %ymm1
vpcmpltd 0x400(%rsp), %ymm1, %k1
vmovaps 0x780(%rsp), %ymm0
vpcmpltd 0x500(%rsp), %ymm1, %k2
vmovaps 0x380(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
vbroadcastss 0x200(%r12,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0x110(%rsp), %ecx
andb %al, %cl
vmovaps 0x4c0(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x40(%rsp), %esi
andb %al, %sil
orb %cl, %sil
movq 0x2d8(%rsp), %r14
je 0x1c80c99
movq 0x1c8(%rsp), %r11
movl %r11d, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0xae0(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0xb00(%rsp,%rax)
vmovaps 0x1a0(%rsp), %xmm2
vmovlps %xmm2, 0xb20(%rsp,%rax)
leal 0x1(%r14), %ecx
movl %ecx, 0xb28(%rsp,%rax)
incl %r11d
movq 0x1c0(%rsp), %r8
movq 0x1d0(%rsp), %r10
vmovaps 0x2a0293(%rip), %ymm3 # 0x1f20f20
jmp 0x1c80cc2
vmovaps 0x2a0289(%rip), %ymm3 # 0x1f20f20
jmp 0x1c80cb9
movq 0x1c0(%rsp), %r8
movq 0x1d0(%rsp), %r10
vmovaps 0x2a026f(%rip), %ymm3 # 0x1f20f20
movq 0x1c8(%rsp), %r11
vmovaps 0x1a0(%rsp), %xmm2
movl %r11d, %eax
testl %eax, %eax
je 0x1c80e14
leal -0x1(%rax), %r11d
leaq (%r11,%r11,2), %rcx
shlq $0x5, %rcx
vmovaps 0xb00(%rsp,%rcx), %ymm0
movzbl 0xae0(%rsp,%rcx), %esi
vaddps 0x380(%rsp), %ymm0, %ymm1
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %r9d
andl %esi, %r9d
je 0x1c80db8
kmovd %r9d, %k1
vbroadcastss 0x26ad06(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %r9b, %sil
je 0x1c80d52
movzbl %sil, %edi
jmp 0x1c80d56
movzbl %r9b, %edi
leaq (%rsp,%rcx), %rsi
addq $0xae0, %rsi # imm = 0xAE0
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r14d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %r9d, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
je 0x1c80d92
movl %eax, %r11d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps %ymm3, %ymm1, %ymm0 # ymm0 = (ymm0 * ymm3) + ymm1
vmovaps %ymm0, 0x880(%rsp)
vmovsd 0x880(%rsp,%rcx,4), %xmm2
movl %r11d, %eax
testb %r9b, %r9b
je 0x1c80cc5
jmp 0x1c7eaaf
vcmpleps %ymm16, %ymm1, %k2
vbroadcastss 0x26bdab(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x26ac3d(%rip), %ymm16 # 0x1eeba20
vblendmps %ymm1, %ymm16, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm16, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %al, %sil
movl %esi, %eax
jmp 0x1c7eff9
blsrl %r10d, %eax
vmovaps 0x520(%rsp), %ymm0
vcmpleps 0x200(%r12,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %r10d
andl %eax, %r10d
jne 0x1c7e770
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 16>::occluded_h<embree::avx512::SweepCurve1IntersectorK<embree::HermiteCurveT, 16>, embree::avx512::Occluded1KEpilog1<16, true>>(embree::avx512::CurvePrecalculationsK<16>&, embree::RayK<16>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_h(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
if (Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x40, %rsp
subq $0xc00, %rsp # imm = 0xC00
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %r9
leaq (%r9,%r9,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%rdx,4), %xmm1
vmovss 0x100(%r11,%rdx,4), %xmm2
vinsertps $0x10, 0x40(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x80(%r11,%rdx,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x140(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, %r14
vinsertps $0x20, 0x180(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%r9,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
addq %rax, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2917ab(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x29ff74(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x29fee2(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm4, %ymm5
vbroadcastss 0x26fff7(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm28, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm28, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x26b6e1(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %r9
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%r8,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%r9), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc0(%r11,%rdx,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x29edf3(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x200(%r11,%rdx,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x29edcb(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2d97c7(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x6e0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %al
je 0x1c83b57
kandb %k0, %k1, %k0
kmovd %k0, %ecx
movzbl %cl, %r15d
leaq 0x840(%rsp), %rcx
addq $0x1c0, %rcx # imm = 0x1C0
movq %rcx, 0x2d0(%rsp)
movl $0x1, %ecx
shlxl %edx, %ecx, %ecx
kmovd %ecx, %k0
vpmovm2d %k0, %zmm0
vmovdqa64 %zmm0, 0x780(%rsp)
movq %rdx, 0x20(%rsp)
movq %r11, 0x18(%rsp)
movq %r8, 0x2d8(%rsp)
movq %r14, 0x28(%rsp)
tzcntq %r15, %rcx
movl 0x2(%r8), %r12d
movl 0x6(%r8,%rcx,4), %ebx
movq (%r14), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%r12,8), %r10
movq %rbx, %rcx
imulq 0x68(%r10), %rcx
movq 0x58(%r10), %rsi
movq 0x90(%r10), %rdi
movl (%rsi,%rcx), %ecx
movq 0xa0(%r10), %rsi
movq %rsi, %r9
imulq %rcx, %r9
vmovaps (%rdi,%r9), %xmm0
leaq 0x1(%rcx), %r9
imulq %r9, %rsi
vmovaps (%rdi,%rsi), %xmm1
movq 0x100(%r10), %rsi
movq 0x110(%r10), %rdi
imulq %rdi, %rcx
vmovss (%r11,%rdx,4), %xmm2
vinsertps $0x1c, 0x40(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x80(%r11,%rdx,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
imulq %r9, %rdi
vbroadcastss 0x100(%r11,%rdx,4), %ymm12
vbroadcastss 0x140(%r11,%rdx,4), %ymm21
vunpcklps %xmm21, %xmm12, %xmm3 # xmm3 = xmm12[0],xmm21[0],xmm12[1],xmm21[1]
vbroadcastss 0x180(%r11,%rdx,4), %ymm15
vinsertps $0x28, %xmm15, %xmm3, %xmm9 # xmm9 = xmm3[0,1],xmm15[0],zero
vmovaps (%rsi,%rcx), %xmm3
vbroadcastss 0x2dc1e8(%rip), %xmm5 # 0x1f5d46c
vfnmadd132ps %xmm5, %xmm0, %xmm3 # xmm3 = -(xmm3 * xmm5) + xmm0
vmovaps (%rsi,%rdi), %xmm4
vfmadd132ps %xmm5, %xmm1, %xmm4 # xmm4 = (xmm4 * xmm5) + xmm1
vaddps %xmm3, %xmm0, %xmm5
vaddps %xmm4, %xmm5, %xmm5
vaddps %xmm5, %xmm1, %xmm5
vmulps 0x29c053(%rip){1to4}, %xmm5, %xmm5 # 0x1f1d2fc
vsubps %xmm2, %xmm5, %xmm5
vdpps $0x7f, %xmm9, %xmm5, %xmm5
vmovss 0xc0(%r11,%rdx,4), %xmm10
vdpps $0x7f, %xmm9, %xmm9, %xmm11
vpbroadcastd %ebx, %zmm6
vmovdqa64 %zmm6, 0x800(%rsp)
vxorps %xmm8, %xmm8, %xmm8
vmovss %xmm11, %xmm8, %xmm6
vrcp14ss %xmm6, %xmm8, %xmm6
vmovaps %xmm6, %xmm7
vfnmadd213ss 0x26fd0b(%rip), %xmm11, %xmm7 # xmm7 = -(xmm11 * xmm7) + mem
vmulss %xmm7, %xmm6, %xmm6
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %ymm6
vmovaps %xmm9, 0x370(%rsp)
vmovaps %ymm6, 0x560(%rsp)
vfmadd231ps %xmm6, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm6) + xmm2
vblendps $0x8, %xmm8, %xmm2, %xmm2 # xmm2 = xmm2[0,1,2],xmm8[3]
vsubps %xmm2, %xmm0, %xmm6
vsubps %xmm2, %xmm4, %xmm4
vsubps %xmm2, %xmm3, %xmm3
vsubps %xmm2, %xmm1, %xmm7
vbroadcastss %xmm6, %ymm8
vbroadcastss 0x2913cf(%rip), %ymm0 # 0x1f12704
vpermps %ymm6, %ymm0, %ymm9
vbroadcastss 0x29fb99(%rip), %ymm1 # 0x1f20edc
vpermps %ymm6, %ymm1, %ymm26
vbroadcastss 0x29fb86(%rip), %ymm2 # 0x1f20ed8
vmovaps %ymm6, 0x4c0(%rsp)
vpermps %ymm6, %ymm2, %ymm25
vbroadcastss %xmm3, %ymm6
vpermps %ymm3, %ymm0, %ymm14
vpermps %ymm3, %ymm1, %ymm16
vmovaps %ymm3, 0x480(%rsp)
vpermps %ymm3, %ymm2, %ymm17
vbroadcastss %xmm4, %ymm18
vpermps %ymm4, %ymm0, %ymm19
vpermps %ymm4, %ymm1, %ymm20
vmovaps %ymm4, 0x4a0(%rsp)
vpermps %ymm4, %ymm2, %ymm22
vmovaps %ymm14, %ymm4
vbroadcastss %xmm7, %ymm23
vpermps %ymm7, %ymm0, %ymm24
vpermps %ymm7, %ymm1, %ymm30
vmovaps %ymm7, 0x460(%rsp)
vpermps %ymm7, %ymm2, %ymm27
vmovaps %ymm6, %ymm2
vmovaps %ymm15, 0x520(%rsp)
vmulss %xmm15, %xmm15, %xmm0
vfmadd231ps %ymm21, %ymm21, %ymm0 # ymm0 = (ymm21 * ymm21) + ymm0
vmovaps %ymm12, 0x540(%rsp)
vfmadd231ps %ymm12, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm12) + ymm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x720(%rsp)
vandps %ymm28, %ymm0, %ymm0
vmovaps %ymm0, 0x660(%rsp)
vmovss %xmm10, 0x5c(%rsp)
vmovaps %xmm5, 0x3e0(%rsp)
vsubss %xmm5, %xmm10, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x700(%rsp)
xorl %ebx, %ebx
xorl %r13d, %r13d
movl $0x1, %ecx
movq %rcx, 0x50(%rsp)
movq %r12, 0x2e8(%rsp)
vpbroadcastd %r12d, %zmm0
vmovdqa64 %zmm0, 0x7c0(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xfc(%rsp)
vmovaps %xmm11, 0x360(%rsp)
vsqrtss %xmm11, %xmm11, %xmm0
vmovss %xmm0, 0xf8(%rsp)
vmovsd 0x26b274(%rip), %xmm5 # 0x1eec6f0
vbroadcastss 0x26b28f(%rip), %ymm3 # 0x1eec714
vmovaps %ymm21, 0x1c0(%rsp)
vmovaps %ymm8, 0x380(%rsp)
vmovaps %ymm9, 0x1e0(%rsp)
vmovaps %ymm26, 0x1a0(%rsp)
vmovaps %ymm25, 0x2a0(%rsp)
vmovaps %ymm6, 0x440(%rsp)
vmovaps %ymm14, 0x320(%rsp)
vmovaps %ymm16, 0x280(%rsp)
vmovaps %ymm17, 0x180(%rsp)
vmovaps %ymm18, 0x160(%rsp)
vmovaps %ymm19, 0x260(%rsp)
vmovaps %ymm20, 0x240(%rsp)
vmovaps %ymm22, 0x140(%rsp)
vmovaps %ymm23, 0x120(%rsp)
vmovaps %ymm24, 0x220(%rsp)
vmovaps %ymm30, 0x100(%rsp)
vmovaps %ymm27, 0x200(%rsp)
vmovshdup %xmm5, %xmm0 # xmm0 = xmm5[1,1,3,3]
vsubss %xmm5, %xmm0, %xmm1
vmulss 0x29f9af(%rip), %xmm1, %xmm6 # 0x1f20ed0
vmovaps %xmm5, 0x3c0(%rsp)
vbroadcastss %xmm5, %ymm5
vbroadcastss %xmm1, %ymm0
vmovaps %ymm5, 0x60(%rsp)
vmovaps %ymm0, 0x340(%rsp)
vfmadd231ps 0x29f9d4(%rip), %ymm0, %ymm5 # ymm5 = (ymm0 * mem) + ymm5
vsubps %ymm5, %ymm3, %ymm7
vmulps %ymm5, %ymm2, %ymm1
vmovaps %ymm2, %ymm14
vmulps %ymm5, %ymm4, %ymm2
vmovaps %ymm3, %ymm0
vmulps %ymm5, %ymm16, %ymm3
vmovaps %ymm4, %ymm15
vmulps %ymm5, %ymm17, %ymm4
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm2 # ymm2 = (ymm7 * ymm9) + ymm2
vfmadd231ps %ymm26, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm26) + ymm3
vfmadd231ps %ymm25, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm25) + ymm4
vmulps %ymm5, %ymm18, %ymm8
vmulps %ymm5, %ymm19, %ymm9
vmulps %ymm5, %ymm20, %ymm10
vmulps %ymm5, %ymm22, %ymm11
vfmadd231ps %ymm14, %ymm7, %ymm8 # ymm8 = (ymm7 * ymm14) + ymm8
vfmadd231ps %ymm15, %ymm7, %ymm9 # ymm9 = (ymm7 * ymm15) + ymm9
vfmadd231ps %ymm16, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm16) + ymm10
vfmadd231ps %ymm17, %ymm7, %ymm11 # ymm11 = (ymm7 * ymm17) + ymm11
vmulps %ymm5, %ymm23, %ymm12
vmulps %ymm5, %ymm24, %ymm13
vmulps %ymm5, %ymm30, %ymm14
vmulps %ymm5, %ymm27, %ymm15
vfmadd231ps %ymm18, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm18) + ymm12
vfmadd231ps %ymm19, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm19) + ymm13
vfmadd231ps %ymm20, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm20) + ymm14
vfmadd231ps %ymm22, %ymm7, %ymm15 # ymm15 = (ymm7 * ymm22) + ymm15
vmulps %ymm8, %ymm5, %ymm16
vmulps %ymm9, %ymm5, %ymm17
vmulps %ymm10, %ymm5, %ymm18
vmulps %ymm11, %ymm5, %ymm19
vfmadd231ps %ymm1, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm1) + ymm16
vfmadd231ps %ymm2, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm2) + ymm17
vfmadd231ps %ymm3, %ymm7, %ymm18 # ymm18 = (ymm7 * ymm3) + ymm18
vfmadd231ps %ymm4, %ymm7, %ymm19 # ymm19 = (ymm7 * ymm4) + ymm19
vmulps %ymm5, %ymm12, %ymm1
vmulps %ymm5, %ymm13, %ymm12
vmulps %ymm5, %ymm14, %ymm13
vmulps %ymm5, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm7, %ymm1 # ymm1 = (ymm7 * ymm8) + ymm1
vfmadd231ps %ymm9, %ymm7, %ymm12 # ymm12 = (ymm7 * ymm9) + ymm12
vfmadd231ps %ymm10, %ymm7, %ymm13 # ymm13 = (ymm7 * ymm10) + ymm13
vfmadd231ps %ymm11, %ymm7, %ymm14 # ymm14 = (ymm7 * ymm11) + ymm14
vmulps %ymm1, %ymm5, %ymm4
vmulps %ymm5, %ymm12, %ymm3
vmulps %ymm13, %ymm5, %ymm29
vmulps %ymm5, %ymm14, %ymm5
vfmadd231ps %ymm16, %ymm7, %ymm4 # ymm4 = (ymm7 * ymm16) + ymm4
vfmadd231ps %ymm17, %ymm7, %ymm3 # ymm3 = (ymm7 * ymm17) + ymm3
vfmadd231ps %ymm18, %ymm7, %ymm29 # ymm29 = (ymm7 * ymm18) + ymm29
vfmadd231ps %ymm7, %ymm19, %ymm5 # ymm5 = (ymm19 * ymm7) + ymm5
vsubps %ymm16, %ymm1, %ymm1
vsubps %ymm17, %ymm12, %ymm7
vsubps %ymm18, %ymm13, %ymm8
vsubps %ymm19, %ymm14, %ymm9
vbroadcastss 0x26f969(%rip), %ymm10 # 0x1ef0fec
vmulps %ymm1, %ymm10, %ymm1
vmulps %ymm7, %ymm10, %ymm7
vmulps %ymm10, %ymm8, %ymm8
vmulps %ymm10, %ymm9, %ymm9
vbroadcastss %xmm6, %ymm6
vmulps %ymm1, %ymm6, %ymm11
vmulps %ymm7, %ymm6, %ymm12
vmulps %ymm6, %ymm8, %ymm13
vmulps %ymm6, %ymm9, %ymm6
vmovaps %ymm4, %ymm8
vmovaps 0x2de66a(%rip), %ymm7 # 0x1f5fd20
vxorps %xmm31, %xmm31, %xmm31
vpermt2ps %ymm31, %ymm7, %ymm8
vmovaps %ymm3, %ymm9
vpermt2ps %ymm31, %ymm7, %ymm9
vmovaps %ymm29, %ymm10
vpermt2ps %ymm31, %ymm7, %ymm10
vaddps %ymm6, %ymm5, %ymm1
vmaxps %ymm1, %ymm5, %ymm14
vminps %ymm1, %ymm5, %ymm1
vmovaps %ymm5, %ymm15
vpermt2ps %ymm31, %ymm7, %ymm15
vmovaps %ymm11, %ymm19
vpermt2ps %ymm31, %ymm7, %ymm19
vmovaps %ymm12, %ymm20
vpermt2ps %ymm31, %ymm7, %ymm20
vmovaps %ymm13, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm2
vpermt2ps %ymm31, %ymm7, %ymm6
vsubps %ymm6, %ymm15, %ymm16
vsubps %ymm4, %ymm8, %ymm7
vsubps %ymm3, %ymm9, %ymm6
vsubps %ymm29, %ymm10, %ymm5
vmulps %ymm13, %ymm6, %ymm17
vfmsub231ps %ymm5, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm5) - ymm17
vmulps %ymm11, %ymm5, %ymm18
vfmsub231ps %ymm7, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm7) - ymm18
vmulps %ymm12, %ymm7, %ymm22
vfmsub231ps %ymm6, %ymm11, %ymm22 # ymm22 = (ymm11 * ymm6) - ymm22
vmulps %ymm22, %ymm22, %ymm22
vfmadd231ps %ymm18, %ymm18, %ymm22 # ymm22 = (ymm18 * ymm18) + ymm22
vfmadd231ps %ymm17, %ymm17, %ymm22 # ymm22 = (ymm17 * ymm17) + ymm22
vmulps %ymm5, %ymm5, %ymm17
vfmadd231ps %ymm6, %ymm6, %ymm17 # ymm17 = (ymm6 * ymm6) + ymm17
vfmadd231ps %ymm7, %ymm7, %ymm17 # ymm17 = (ymm7 * ymm7) + ymm17
vrcp14ps %ymm17, %ymm18
vmovaps %ymm18, %ymm23
vfnmadd213ps %ymm0, %ymm17, %ymm23 # ymm23 = -(ymm17 * ymm23) + ymm0
vfmadd132ps %ymm18, %ymm18, %ymm23 # ymm23 = (ymm23 * ymm18) + ymm18
vmulps %ymm23, %ymm22, %ymm18
vmulps %ymm2, %ymm6, %ymm22
vfmsub231ps %ymm5, %ymm20, %ymm22 # ymm22 = (ymm20 * ymm5) - ymm22
vmulps %ymm19, %ymm5, %ymm24
vfmsub231ps %ymm7, %ymm2, %ymm24 # ymm24 = (ymm2 * ymm7) - ymm24
vmulps %ymm20, %ymm7, %ymm25
vfmsub231ps %ymm6, %ymm19, %ymm25 # ymm25 = (ymm19 * ymm6) - ymm25
vmulps %ymm25, %ymm25, %ymm25
vfmadd231ps %ymm24, %ymm24, %ymm25 # ymm25 = (ymm24 * ymm24) + ymm25
vfmadd231ps %ymm22, %ymm22, %ymm25 # ymm25 = (ymm22 * ymm22) + ymm25
vmulps %ymm23, %ymm25, %ymm22
vmaxps %ymm22, %ymm18, %ymm18
vsqrtps %ymm18, %ymm18
vmaxps %ymm15, %ymm16, %ymm22
vmaxps %ymm22, %ymm14, %ymm14
vaddps %ymm14, %ymm18, %ymm14
vminps %ymm15, %ymm16, %ymm15
vminps %ymm15, %ymm1, %ymm1
vsubps %ymm18, %ymm1, %ymm1
vmulps 0x26f13b(%rip){1to8}, %ymm14, %ymm14 # 0x1ef0940
vmulps 0x26f135(%rip){1to8}, %ymm1, %ymm1 # 0x1ef0944
vmovaps %ymm1, 0xa0(%rsp)
vmulps %ymm14, %ymm14, %ymm1
vrsqrt14ps %ymm17, %ymm15
vmulps 0x26aeef(%rip){1to8}, %ymm17, %ymm14 # 0x1eec71c
vmulps %ymm14, %ymm15, %ymm14
vmulps %ymm15, %ymm15, %ymm16
vmulps %ymm14, %ymm16, %ymm14
vfmadd231ps 0x26aed0(%rip){1to8}, %ymm15, %ymm14 # ymm14 = (ymm15 * mem) + ymm14
vmulps %ymm7, %ymm14, %ymm15
vmulps %ymm14, %ymm6, %ymm16
vmulps %ymm14, %ymm5, %ymm18
vsubps %ymm4, %ymm31, %ymm27
vsubps %ymm3, %ymm31, %ymm28
vmovaps %ymm29, 0x3a0(%rsp)
vsubps %ymm29, %ymm31, %ymm29
vmovaps 0x520(%rsp), %ymm17
vmulps %ymm29, %ymm17, %ymm22
vfmadd231ps %ymm28, %ymm21, %ymm22 # ymm22 = (ymm21 * ymm28) + ymm22
vmovaps 0x540(%rsp), %ymm23
vfmadd231ps %ymm27, %ymm23, %ymm22 # ymm22 = (ymm23 * ymm27) + ymm22
vmulps %ymm29, %ymm29, %ymm24
vfmadd231ps %ymm28, %ymm28, %ymm24 # ymm24 = (ymm28 * ymm28) + ymm24
vfmadd231ps %ymm27, %ymm27, %ymm24 # ymm24 = (ymm27 * ymm27) + ymm24
vmulps %ymm18, %ymm17, %ymm17
vfmadd231ps %ymm21, %ymm16, %ymm17 # ymm17 = (ymm16 * ymm21) + ymm17
vfmadd231ps %ymm23, %ymm15, %ymm17 # ymm17 = (ymm15 * ymm23) + ymm17
vmulps %ymm18, %ymm29, %ymm18
vfmadd231ps %ymm16, %ymm28, %ymm18 # ymm18 = (ymm28 * ymm16) + ymm18
vfmadd231ps %ymm15, %ymm27, %ymm18 # ymm18 = (ymm27 * ymm15) + ymm18
vmulps %ymm17, %ymm17, %ymm16
vmovaps 0x720(%rsp), %ymm15
vsubps %ymm16, %ymm15, %ymm15
vmulps %ymm18, %ymm17, %ymm25
vsubps %ymm25, %ymm22, %ymm22
vaddps %ymm22, %ymm22, %ymm22
vmulps %ymm18, %ymm18, %ymm25
vsubps %ymm25, %ymm24, %ymm23
vmovaps %ymm23, 0x80(%rsp)
vsubps %ymm1, %ymm23, %ymm1
vmulps %ymm22, %ymm22, %ymm25
vmulps 0x26b271(%rip){1to8}, %ymm15, %ymm26 # 0x1eecb8c
vmulps %ymm1, %ymm26, %ymm30
vsubps %ymm30, %ymm25, %ymm30
vcmpnltps %ymm31, %ymm30, %k1
kmovd %k1, %edi
kortestb %k1, %k1
je 0x1c81a1d
vsqrtps %ymm30, %ymm30
vaddps %ymm15, %ymm15, %ymm31
vrcp14ps %ymm31, %ymm23
vfnmadd213ps %ymm0, %ymm23, %ymm31 # ymm31 = -(ymm23 * ymm31) + ymm0
vfmadd132ps %ymm23, %ymm23, %ymm31 # ymm31 = (ymm31 * ymm23) + ymm23
vxorps 0x29f55c(%rip){1to8}, %ymm22, %ymm23 # 0x1f20ec0
vsubps %ymm30, %ymm23, %ymm23
vmulps %ymm31, %ymm23, %ymm23
vsubps %ymm22, %ymm30, %ymm30
vmulps %ymm31, %ymm30, %ymm31
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm23, %ymm30 # ymm30 = (ymm23 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x620(%rsp)
vmovaps %ymm17, %ymm30
vfmadd213ps %ymm18, %ymm31, %ymm30 # ymm30 = (ymm31 * ymm30) + ymm18
vmulps %ymm30, %ymm14, %ymm30
vmovaps %ymm30, 0x600(%rsp)
vbroadcastss 0x26a067(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm23, %ymm0, %ymm30 {%k1}
vbroadcastss 0x26b1bc(%rip), %ymm0 # 0x1eecb84
vblendmps %ymm31, %ymm0, %ymm31 {%k1}
vbroadcastss 0x29f4ed(%rip), %ymm0 # 0x1f20ec4
vmovaps %ymm16, %ymm24
vandps %ymm0, %ymm16, %ymm23
vmovaps 0x660(%rsp), %ymm16
vmaxps %ymm23, %ymm16, %ymm23
vmulps 0x2704b9(%rip){1to8}, %ymm23, %ymm23 # 0x1ef1eb4
vandps %ymm0, %ymm15, %ymm0
vcmpltps %ymm23, %ymm0, %k1 {%k1}
kortestb %k1, %k1
movq 0x50(%rsp), %r12
jne 0x1c83a96
vmovaps %ymm24, %ymm16
jmp 0x1c81a36
vbroadcastss 0x269ff9(%rip), %ymm30 # 0x1eeba20
vbroadcastss 0x26b153(%rip), %ymm31 # 0x1eecb84
movq 0x50(%rsp), %r12
andb $0x7f, %dil
je 0x1c81e53
vmovaps %ymm16, 0x500(%rsp)
vmovss 0x200(%r11,%rdx,4), %xmm0
vsubss 0x3e0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vminps %ymm31, %ymm0, %ymm0
vmovaps 0x700(%rsp), %ymm1
vmaxps %ymm30, %ymm1, %ymm1
vmulps %ymm13, %ymm29, %ymm23
vfmadd213ps %ymm23, %ymm12, %ymm28 # ymm28 = (ymm12 * ymm28) + ymm23
vfmadd213ps %ymm28, %ymm11, %ymm27 # ymm27 = (ymm11 * ymm27) + ymm28
vmovaps 0x520(%rsp), %ymm21
vmulps %ymm13, %ymm21, %ymm13
vmovaps 0x1c0(%rsp), %ymm24
vfmadd231ps %ymm12, %ymm24, %ymm13 # ymm13 = (ymm24 * ymm12) + ymm13
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm11, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm11) + ymm13
vbroadcastss 0x29f409(%rip), %ymm28 # 0x1f20ec4
vandps %ymm28, %ymm13, %ymm11
vbroadcastss 0x26f51d(%rip), %ymm29 # 0x1ef0fe8
vcmpltps %ymm29, %ymm11, %k0
vbroadcastss 0x29f3e4(%rip), %ymm31 # 0x1f20ec0
vxorps %ymm31, %ymm27, %ymm11
vrcp14ps %ymm13, %ymm12
vxorps %ymm31, %ymm13, %ymm23
vmovaps %ymm12, %ymm27
vbroadcastss 0x26ac16(%rip), %ymm30 # 0x1eec714
vfnmadd213ps %ymm30, %ymm13, %ymm27 # ymm27 = -(ymm13 * ymm27) + ymm30
vfmadd132ps %ymm12, %ymm12, %ymm27 # ymm27 = (ymm27 * ymm12) + ymm12
vmulps %ymm11, %ymm27, %ymm11
vcmpltps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x26b05f(%rip), %ymm27 # 0x1eecb84
vblendmps %ymm27, %ymm11, %ymm12 {%k1}
vmaxps %ymm12, %ymm1, %ymm1
vcmpnleps %ymm23, %ymm13, %k1
korb %k1, %k0, %k1
vbroadcastss 0x269edc(%rip), %ymm13 # 0x1eeba20
vmovaps %ymm13, %ymm11 {%k1}
vminps %ymm11, %ymm0, %ymm0
vxorps %xmm23, %xmm23, %xmm23
vsubps %ymm8, %ymm23, %ymm8
vsubps %ymm9, %ymm23, %ymm9
vsubps %ymm10, %ymm23, %ymm10
vmulps %ymm2, %ymm10, %ymm10
vfnmsub231ps %ymm9, %ymm20, %ymm10 # ymm10 = -(ymm20 * ymm9) - ymm10
vfnmadd231ps %ymm8, %ymm19, %ymm10 # ymm10 = -(ymm19 * ymm8) + ymm10
vmulps %ymm2, %ymm21, %ymm8
vfnmsub231ps %ymm20, %ymm24, %ymm8 # ymm8 = -(ymm24 * ymm20) - ymm8
vfnmadd231ps %ymm19, %ymm16, %ymm8 # ymm8 = -(ymm16 * ymm19) + ymm8
vandps %ymm28, %ymm8, %ymm9
vcmpltps %ymm29, %ymm9, %k0
vxorps %ymm31, %ymm10, %ymm9
vrcp14ps %ymm8, %ymm10
vxorps %ymm31, %ymm8, %ymm11
vmovaps %ymm10, %ymm12
vfnmadd213ps %ymm30, %ymm8, %ymm12 # ymm12 = -(ymm8 * ymm12) + ymm30
vfmadd132ps %ymm10, %ymm10, %ymm12 # ymm12 = (ymm12 * ymm10) + ymm10
vmulps %ymm9, %ymm12, %ymm9
vcmpltps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vblendmps %ymm27, %ymm9, %ymm10 {%k1}
vmaxps %ymm10, %ymm1, %ymm1
vcmpnleps %ymm11, %ymm8, %k1
korb %k1, %k0, %k1
vmovaps %ymm13, %ymm9 {%k1}
vminps %ymm9, %ymm0, %ymm8
vmovaps %ymm1, 0x4e0(%rsp)
vcmpleps %ymm8, %ymm1, %k0
kmovd %k0, %ecx
andb %cl, %dil
je 0x1c81e68
vmovaps 0x620(%rsp), %ymm0
vmaxps 0xa0(%rsp), %ymm23, %ymm1
vminps %ymm30, %ymm0, %ymm0
vxorps %xmm10, %xmm10, %xmm10
vmaxps %ymm10, %ymm0, %ymm0
vmovaps 0x600(%rsp), %ymm9
vminps %ymm30, %ymm9, %ymm9
vmovaps 0x29f302(%rip), %ymm11 # 0x1f20f40
vaddps %ymm0, %ymm11, %ymm0
vbroadcastss 0x29c86d(%rip), %ymm12 # 0x1f1e4b8
vmulps %ymm0, %ymm12, %ymm0
vmovaps 0x60(%rsp), %ymm2
vmovaps 0x340(%rsp), %ymm13
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x620(%rsp)
vmaxps %ymm10, %ymm9, %ymm0
vaddps %ymm0, %ymm11, %ymm0
vmulps %ymm0, %ymm12, %ymm0
vfmadd213ps %ymm2, %ymm13, %ymm0 # ymm0 = (ymm13 * ymm0) + ymm2
vmovaps %ymm0, 0x600(%rsp)
vmulps %ymm1, %ymm1, %ymm0
vmovaps 0x80(%rsp), %ymm1
vsubps %ymm0, %ymm1, %ymm11
vmulps %ymm11, %ymm26, %ymm0
vsubps %ymm0, %ymm25, %ymm0
vcmpnltps %ymm10, %ymm0, %k0
kortestb %k0, %k0
vmovaps 0x280(%rsp), %ymm16
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x200(%rsp), %ymm27
je 0x1c81eff
vxorps %xmm20, %xmm20, %xmm20
vcmpnltps %ymm23, %ymm0, %k1
vsqrtps %ymm0, %ymm0
vaddps %ymm15, %ymm15, %ymm1
vrcp14ps %ymm1, %ymm9
vfnmadd213ps %ymm30, %ymm9, %ymm1 # ymm1 = -(ymm9 * ymm1) + ymm30
vfmadd132ps %ymm9, %ymm9, %ymm1 # ymm1 = (ymm1 * ymm9) + ymm9
vxorps 0x29f1c2(%rip){1to8}, %ymm22, %ymm9 # 0x1f20ec0
vsubps %ymm0, %ymm9, %ymm9
vmulps %ymm1, %ymm9, %ymm12
vsubps %ymm22, %ymm0, %ymm0
vmulps %ymm1, %ymm0, %ymm13
vmovaps %ymm17, %ymm0
vfmadd213ps %ymm18, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm18
vmulps %ymm0, %ymm14, %ymm9
vmovaps 0x540(%rsp), %ymm22
vmulps %ymm12, %ymm22, %ymm0
vmovaps 0x1c0(%rsp), %ymm21
vmulps %ymm12, %ymm21, %ymm1
vmovaps 0x520(%rsp), %ymm25
vmulps %ymm12, %ymm25, %ymm10
vmovaps %ymm7, %ymm19
vfmadd213ps %ymm4, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm4
vsubps %ymm19, %ymm0, %ymm0
vmovaps %ymm6, %ymm19
vfmadd213ps %ymm3, %ymm9, %ymm19 # ymm19 = (ymm9 * ymm19) + ymm3
vsubps %ymm19, %ymm1, %ymm1
vmovaps 0x3a0(%rsp), %ymm2
vfmadd213ps %ymm2, %ymm5, %ymm9 # ymm9 = (ymm5 * ymm9) + ymm2
vsubps %ymm9, %ymm10, %ymm9
vfmadd213ps %ymm18, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm17) + ymm18
vmulps %ymm17, %ymm14, %ymm14
vmovaps %ymm22, %ymm19
vmulps %ymm13, %ymm22, %ymm10
vmulps %ymm13, %ymm21, %ymm17
vmulps %ymm13, %ymm25, %ymm18
vfmadd213ps %ymm4, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm7) + ymm4
vsubps %ymm7, %ymm10, %ymm10
vfmadd213ps %ymm3, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm6) + ymm3
vsubps %ymm6, %ymm17, %ymm3
vfmadd213ps %ymm2, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm5) + ymm2
vsubps %ymm5, %ymm18, %ymm5
vbroadcastss 0x269c53(%rip), %ymm2 # 0x1eeba20
vblendmps %ymm12, %ymm2, %ymm4 {%k1}
vbroadcastss 0x26ada8(%rip), %ymm2 # 0x1eecb84
vblendmps %ymm13, %ymm2, %ymm2 {%k1}
vandps 0x500(%rsp), %ymm28, %ymm6
vmovaps 0x660(%rsp), %ymm7
vmaxps %ymm6, %ymm7, %ymm6
vmulps 0x2700b3(%rip){1to8}, %ymm6, %ymm6 # 0x1ef1eb4
vandps %ymm28, %ymm15, %ymm7
vcmpltps %ymm6, %ymm7, %k1 {%k1}
kortestb %k1, %k1
jne 0x1c83ae5
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x100(%rsp), %ymm30
vmovaps %ymm19, %ymm12
jmp 0x1c81f6f
vbroadcastss 0x29f067(%rip), %ymm28 # 0x1f20ec4
vbroadcastss 0x26a8ae(%rip), %ymm3 # 0x1eec714
jmp 0x1c81e6e
vmovaps %ymm30, %ymm3
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x440(%rsp), %ymm2
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x280(%rsp), %ymm16
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x200(%rsp), %ymm27
jmp 0x1c8396e
vxorps %xmm3, %xmm3, %xmm3
vxorps %xmm5, %xmm5, %xmm5
vxorps %xmm0, %xmm0, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vxorps %xmm9, %xmm9, %xmm9
vbroadcastss 0x269b03(%rip), %ymm4 # 0x1eeba20
vbroadcastss 0x26ac5e(%rip), %ymm2 # 0x1eecb84
vmovaps 0x540(%rsp), %ymm12
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x520(%rsp), %ymm25
vmulps %ymm5, %ymm25, %ymm5
vfmadd231ps %ymm3, %ymm21, %ymm5 # ymm5 = (ymm21 * ymm3) + ymm5
vfmadd231ps %ymm10, %ymm12, %ymm5 # ymm5 = (ymm12 * ymm10) + ymm5
vmovaps 0x4e0(%rsp), %ymm6
vmovaps %ymm6, 0x740(%rsp)
vminps %ymm4, %ymm8, %ymm3
vmovaps %ymm3, 0x760(%rsp)
vandps %ymm28, %ymm5, %ymm4
vmaxps %ymm2, %ymm6, %ymm5
vmovaps %ymm5, 0x680(%rsp)
vmovaps %ymm8, 0x6a0(%rsp)
vbroadcastss 0x29ef10(%rip), %ymm2 # 0x1f20ed4
vcmpltps %ymm2, %ymm4, %k1
kmovd %k1, 0xf4(%rsp)
vcmpleps %ymm3, %ymm6, %k1
kmovd %k1, %esi
andb %dil, %sil
vmovaps %ymm5, 0x5c0(%rsp)
vcmpleps %ymm8, %ymm5, %k1
kmovd %k1, %ecx
andb %dil, %cl
movl %ecx, 0x34(%rsp)
orb %sil, %cl
vmovaps %ymm25, %ymm4
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x260(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm20
je 0x1c82bc0
movq %r15, 0x2e0(%rsp)
movb %al, 0xf(%rsp)
knotb %k0, %k1
vmovaps %ymm2, %ymm3
vmulps %ymm4, %ymm9, %ymm2
vfmadd213ps %ymm2, %ymm21, %ymm1 # ymm1 = (ymm21 * ymm1) + ymm2
vfmadd213ps %ymm1, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm0) + ymm1
vandps %ymm28, %ymm0, %ymm0
vcmpltps %ymm3, %ymm0, %k0
kmovd %k1, 0xec(%rsp)
korb %k1, %k0, %k1
vpbroadcastd 0x29ee70(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x29ee62(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vpbroadcastd %r12d, %ymm1
vmovdqa %ymm0, 0x6c0(%rsp)
vmovdqa %ymm1, 0x640(%rsp)
vpcmpnltd %ymm0, %ymm1, %k0
kmovd %k0, %r12d
movl %esi, 0xf0(%rsp)
andb %sil, %r12b
je 0x1c82bf2
vmovaps 0x4c0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4a0(%rsp), %ymm3
vmovaps 0x460(%rsp), %ymm4
vminps %xmm4, %xmm3, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm4, %xmm3, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vbroadcastss 0x29edd6(%rip), %xmm2 # 0x1f20ec4
vandps %xmm2, %xmm0, %xmm0
vandps %xmm2, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x26fda1(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3d0(%rsp)
vmovaps 0x4e0(%rsp), %ymm0
vaddps 0x560(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x2698d0(%rip), %ymm0 # 0x1eeba20
vblendmps 0x4e0(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x620(%rsp,%rcx), %xmm10
vmovss 0x740(%rsp,%rcx), %xmm11
vmovaps 0x360(%rsp), %xmm0
vucomiss 0x269868(%rip), %xmm0 # 0x1eeba24
vmovss 0xf8(%rsp), %xmm0
jae 0x1c82208
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x26fc94(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x26a4cd(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x480(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x430(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4c0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4a0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x460(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x26ed2d(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x500(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x340(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x269741(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c8230c
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c8231e
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x26a3de(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x26a3da(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x29eb6e(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x410(%rsp)
vfnmadd213ss 0x26ec94(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x420(%rsp)
jb 0x1c82387
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c823d1
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0xc0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xc0(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x340(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm12
vmulps %xmm12, %xmm14, %xmm0
vmovaps %xmm0, 0x400(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x26a307(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xd0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3a0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xc0(%rsp)
vmulss 0x26a2d2(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x26a2c8(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x2695c2(%rip), %xmm0 # 0x1eeba24
jb 0x1c8246a
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c824b9
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm12, 0x3f0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x3f0(%rsp), %xmm12
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x340(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x29ea01(%rip), %ymm30 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm18
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x280(%rsp), %ymm24
vmovaps 0x180(%rsp), %ymm22
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x120(%rsp), %ymm13
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x200(%rsp), %ymm26
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x26e48c(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x430(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x26ea8a(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x460(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x420(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x410(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x29e8af(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm12, %xmm2
vmovaps 0x400(%rsp), %xmm12
vdpps $0x7f, %xmm12, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm12, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xc0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xd0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x500(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x29e7d5(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c82751
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3d0(%rsp), %xmm3
vfmadd231ss 0x26f79d(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c82751
vaddss 0x3e0(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
jb 0x1c8274c
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c82767
xorl %eax, %eax
xorl %r14d, %r14d
testb %al, %al
je 0x1c82b88
decq %r15
jne 0x1c8222c
jmp 0x1c82b85
xorl %eax, %eax
vucomiss 0x2692b3(%rip), %xmm10 # 0x1eeba24
jb 0x1c8274e
vmovss 0x269f99(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c8274e
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3a0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x269f77(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x269f73(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2e8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c8274c
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c827fb
cmpq $0x0, 0x48(%r14)
jne 0x1c827fb
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c82751
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %zmm1
vbroadcastss 0x28fea6(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x29e66e(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x840(%rsp)
vmovaps %zmm3, 0x880(%rsp)
vmovaps %zmm0, 0x8c0(%rsp)
vmovaps %zmm1, 0x900(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x980(%rsp)
vmovdqa64 0x7c0(%rsp), %zmm0
vmovdqa64 %zmm0, 0x9c0(%rsp)
movq 0x2d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa00(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x200(%rcx,%rax,4)
vmovaps 0x780(%rsp), %zmm0
vmovaps %zmm0, 0x580(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2f8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x300(%rsp)
movq %rcx, 0x308(%rsp)
leaq 0x840(%rsp), %rax
movq %rax, 0x310(%rsp)
movl $0x10, 0x318(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c82a40
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x120(%rsp), %ymm13
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm24
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm18
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x29e48e(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x29e484(%rip), %ymm30 # 0x1f20ec4
vmovdqa64 0x580(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c82b61
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c82b29
testb $0x2, (%rcx)
jne 0x1c82a7e
testb $0x40, 0x3e(%r14)
je 0x1c82b29
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm26
vmovaps 0x100(%rsp), %ymm19
vmovaps 0x220(%rsp), %ymm27
vmovaps 0x120(%rsp), %ymm13
vmovaps 0x140(%rsp), %ymm23
vmovaps 0x240(%rsp), %ymm8
vmovaps 0x260(%rsp), %ymm20
vmovaps 0x160(%rsp), %ymm17
vmovaps 0x180(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm24
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm18
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x29e3a5(%rip), %xmm4 # 0x1f20ec4
vbroadcastss 0x29e39b(%rip), %ymm30 # 0x1f20ec4
vmovdqa64 0x580(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x308(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x26a034(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c82b64
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c827f4
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x200(%rcx,%rax,4)
jmp 0x1c827f4
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x5e0(%rsp), %ymm0
vcmpleps 0x200(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c82142
jmp 0x1c82c4c
vbroadcastss 0x269b4b(%rip), %ymm3 # 0x1eec714
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x440(%rsp), %ymm2
vmovaps 0x320(%rsp), %ymm4
jmp 0x1c8396e
vmovaps %ymm20, %ymm8
vmovaps %ymm19, %ymm20
vmovaps %ymm30, %ymm19
vmovaps %ymm23, %ymm13
vmovaps %ymm22, %ymm23
vmovaps %ymm17, %ymm22
vmovaps %ymm18, %ymm17
vmovaps %ymm26, %ymm18
vmovaps %ymm27, %ymm26
vmovaps %ymm24, %ymm27
vmovaps %ymm16, %ymm24
vmovaps %ymm28, %ymm30
vbroadcastss 0x29e281(%rip), %xmm4 # 0x1f20ec4
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x560(%rsp), %ymm3
vaddps 0x5c0(%rsp), %ymm3, %ymm0
vcmpleps 0x200(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd 0xf4(%rsp), %k1
kmovd 0xec(%rsp), %k2
korb %k2, %k1, %k1
kmovd %k0, %eax
movl 0x34(%rsp), %ecx
andb %al, %cl
vpbroadcastd 0x29e247(%rip), %ymm0 # 0x1f20edc
vpblendmd 0x29e239(%rip){1to8}, %ymm0, %ymm0 {%k1} # 0x1f20ed8
vmovdqa %ymm0, 0x5c0(%rsp)
vpcmpled 0x640(%rsp), %ymm0, %k0
kmovd %k0, %r12d
movl %ecx, 0x34(%rsp)
andb %cl, %r12b
je 0x1c837fa
vmovaps 0x680(%rsp), %ymm7
vmovaps 0x4c0(%rsp), %ymm1
vmovaps 0x480(%rsp), %ymm2
vminps %xmm2, %xmm1, %xmm0
vmaxps %xmm2, %xmm1, %xmm1
vmovaps 0x4a0(%rsp), %ymm5
vmovaps 0x460(%rsp), %ymm6
vminps %xmm6, %xmm5, %xmm2
vminps %xmm2, %xmm0, %xmm0
vmaxps %xmm6, %xmm5, %xmm2
vmaxps %xmm2, %xmm1, %xmm1
vandps %xmm4, %xmm0, %xmm0
vandps %xmm4, %xmm1, %xmm2
vmaxps %xmm2, %xmm0, %xmm0
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm2, %xmm2
vshufpd $0x1, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[1,0]
vmaxss %xmm2, %xmm0, %xmm0
vmulss 0x26f188(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x40(%rsp)
vshufps $0xff, %xmm1, %xmm1, %xmm0 # xmm0 = xmm1[3,3,3,3]
vmovaps %xmm0, 0x3d0(%rsp)
vmovaps %ymm7, 0x4e0(%rsp)
vaddps %ymm7, %ymm3, %ymm0
vmovaps %ymm0, 0x5e0(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x268cbc(%rip), %ymm0 # 0x1eeba20
vblendmps 0x4e0(%rsp), %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %ecx
movb $0x1, %r14b
shlb %cl, %r14b
shll $0x2, %ecx
vmovss 0x600(%rsp,%rcx), %xmm10
vmovss 0x6a0(%rsp,%rcx), %xmm11
vmovaps 0x360(%rsp), %xmm0
vucomiss 0x268c54(%rip), %xmm0 # 0x1eeba24
vmovss 0xfc(%rsp), %xmm0
jae 0x1c82e1c
vmovaps 0x360(%rsp), %xmm0
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
kmovw %k1, 0x60(%rsp)
vzeroupper
callq 0x6aa20
kmovw 0x60(%rsp), %k1
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
movzbl %r14b, %eax
kmovd %eax, %k0
kandnb %k1, %k0, %k0
kmovd %k0, %r12d
vmulss 0x26f080(%rip), %xmm0, %xmm0 # 0x1ef1eb4
vmovss %xmm0, 0x44(%rsp)
movl $0x5, %r15d
vbroadcastss %xmm11, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd132ps 0x370(%rsp), %xmm1, %xmm0 # xmm0 = (xmm0 * mem) + xmm1
vmovss 0x2698b9(%rip), %xmm1 # 0x1eec714
vsubss %xmm10, %xmm1, %xmm3
vbroadcastss %xmm10, %xmm1
vmovaps 0x480(%rsp), %ymm5
vmulps %xmm1, %xmm5, %xmm2
vmovaps %xmm3, 0x430(%rsp)
vbroadcastss %xmm3, %xmm3
vfmadd231ps 0x4c0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm3 * mem) + xmm2
vmovaps 0x4a0(%rsp), %ymm6
vmulps %xmm1, %xmm6, %xmm4
vfmadd231ps %xmm5, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm5) + xmm4
vmulps 0x460(%rsp), %xmm1, %xmm5
vfmadd231ps %xmm6, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm6) + xmm5
vmulps %xmm4, %xmm1, %xmm6
vfmadd231ps %xmm2, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm2) + xmm6
vmulps %xmm5, %xmm1, %xmm2
vfmadd231ps %xmm4, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm4) + xmm2
vmulps %xmm2, %xmm1, %xmm4
vfmadd231ps %xmm3, %xmm6, %xmm4 # xmm4 = (xmm6 * xmm3) + xmm4
vsubps %xmm6, %xmm2, %xmm1
vmulps 0x26e119(%rip){1to4}, %xmm1, %xmm14 # 0x1ef0fec
vmovaps %xmm4, 0x500(%rsp)
vsubps %xmm4, %xmm0, %xmm0
vmovaps %xmm0, 0x340(%rsp)
vdpps $0x7f, %xmm0, %xmm0, %xmm0
vucomiss 0x268b2d(%rip), %xmm0 # 0x1eeba24
vmovaps %xmm14, 0x60(%rsp)
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
jb 0x1c82f20
vsqrtss %xmm0, %xmm0, %xmm7
jmp 0x1c82f32
vzeroupper
callq 0x6aa20
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm7
vdpps $0x7f, %xmm14, %xmm14, %xmm0
vxorps %xmm1, %xmm1, %xmm1
vmovss %xmm0, %xmm1, %xmm2 # xmm2 = xmm0[0],xmm1[1,2,3]
vrsqrt14ss %xmm2, %xmm1, %xmm3
vmulss 0x2697ca(%rip), %xmm3, %xmm4 # 0x1eec718
vmulss 0x2697c6(%rip), %xmm0, %xmm5 # 0x1eec71c
vrcp14ss %xmm2, %xmm1, %xmm2
vxorps 0x29df5a(%rip){1to4}, %xmm0, %xmm1 # 0x1f20ec0
vmovaps %xmm2, 0x410(%rsp)
vfnmadd213ss 0x26e080(%rip), %xmm0, %xmm2 # xmm2 = -(xmm0 * xmm2) + mem
vmovss %xmm2, 0x4c(%rsp)
vucomiss %xmm1, %xmm0
vmovss %xmm7, 0x14(%rsp)
vmovaps %xmm0, 0x420(%rsp)
jb 0x1c82f9b
vsqrtss %xmm0, %xmm0, %xmm31
jmp 0x1c82fe5
vmovaps %xmm3, 0xd0(%rsp)
vmovss %xmm4, 0xc0(%rsp)
vmovss %xmm5, 0x10(%rsp)
vzeroupper
callq 0x6aa20
vmovss 0x10(%rsp), %xmm5
vmovss 0xc0(%rsp), %xmm4
vmovaps 0xd0(%rsp), %xmm3
vmovss 0x14(%rsp), %xmm7
vmovaps 0x60(%rsp), %xmm14
vmovaps %xmm0, %xmm31
vmovaps 0x340(%rsp), %xmm15
vmulss %xmm3, %xmm5, %xmm0
vmulss %xmm3, %xmm3, %xmm1
vmulss %xmm1, %xmm0, %xmm0
vaddss %xmm0, %xmm4, %xmm0
vbroadcastss %xmm0, %xmm13
vmulps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x400(%rsp)
vdpps $0x7f, %xmm0, %xmm15, %xmm0
vaddss 0x2696f3(%rip), %xmm7, %xmm29 # 0x1eec714
vmovaps %xmm0, 0xd0(%rsp)
vmulps %xmm0, %xmm0, %xmm0
vmovaps 0x3a0(%rsp), %xmm1
vsubps %xmm0, %xmm1, %xmm0
vxorps %xmm2, %xmm2, %xmm2
vmovss %xmm0, %xmm2, %xmm1 # xmm1 = xmm0[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmovaps %xmm1, 0xc0(%rsp)
vmulss 0x2696be(%rip), %xmm1, %xmm1 # 0x1eec718
vmovss %xmm1, 0x10(%rsp)
vmulss 0x2696b4(%rip), %xmm0, %xmm1 # 0x1eec71c
vmovss %xmm1, 0x48(%rsp)
vucomiss 0x2689ae(%rip), %xmm0 # 0x1eeba24
jb 0x1c8307e
vsqrtss %xmm0, %xmm0, %xmm0
jmp 0x1c830cd
vmovss %xmm29, 0x3c(%rsp)
vmovss %xmm31, 0x38(%rsp)
vmovaps %xmm13, 0x3f0(%rsp)
vzeroupper
callq 0x6aa20
vmovaps 0x3f0(%rsp), %xmm13
vmovss 0x38(%rsp), %xmm31
vmovss 0x14(%rsp), %xmm7
vmovss 0x3c(%rsp), %xmm29
vmovaps 0x340(%rsp), %xmm15
vmovaps 0x60(%rsp), %xmm14
vbroadcastss 0x29dded(%rip), %ymm28 # 0x1f20ec4
vmovaps 0x1c0(%rsp), %ymm21
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x280(%rsp), %ymm16
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x260(%rsp), %ymm19
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x200(%rsp), %ymm27
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm11
vmovss 0x26d871(%rip), %xmm3 # 0x1ef09dc
vmovaps %xmm3, %xmm1
vmovaps 0x430(%rsp), %xmm5
vfmadd213ss %xmm5, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm1) + xmm5
vmovss 0x26de6f(%rip), %xmm4 # 0x1ef0ff4
vmulss %xmm4, %xmm1, %xmm1
vmulss %xmm4, %xmm10, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps 0x460(%rsp), %xmm2, %xmm2
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4a0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vmovaps %xmm3, %xmm2
vfmadd213ss %xmm10, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm2) + xmm10
vmulss %xmm4, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x480(%rsp), %xmm1, %xmm2 # xmm2 = (xmm2 * mem) + xmm1
vmulss %xmm4, %xmm5, %xmm1
vbroadcastss %xmm1, %xmm1
vfmadd132ps 0x4c0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm1 * mem) + xmm2
vbroadcastss 0x420(%rsp), %xmm2
vmulps %xmm2, %xmm1, %xmm2
vdpps $0x7f, %xmm1, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm14, %xmm1
vsubps %xmm1, %xmm2, %xmm2
vmovss 0x4c(%rsp), %xmm1
vmulss 0x410(%rsp), %xmm1, %xmm3
vmulss 0x44(%rsp), %xmm11, %xmm1
vmovss 0x40(%rsp), %xmm6
vmaxss %xmm1, %xmm6, %xmm1
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm2, %xmm2
vxorps 0x29dc94(%rip){1to4}, %xmm14, %xmm3 # 0x1f20ec0
vmulps %xmm2, %xmm13, %xmm2
vmovaps 0x400(%rsp), %xmm13
vdpps $0x7f, %xmm13, %xmm3, %xmm4
vdivss %xmm31, %xmm6, %xmm5
vfmadd213ss %xmm1, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm7) + xmm1
vdpps $0x7f, %xmm2, %xmm15, %xmm2
vfmadd213ss %xmm7, %xmm5, %xmm29 # xmm29 = (xmm5 * xmm29) + xmm7
vaddss %xmm2, %xmm4, %xmm4
vmovaps 0x370(%rsp), %xmm7
vdpps $0x7f, %xmm13, %xmm7, %xmm5
vdpps $0x7f, %xmm3, %xmm15, %xmm3
vmovaps 0xc0(%rsp), %xmm6
vmulss 0x48(%rsp), %xmm6, %xmm2
vmulss %xmm6, %xmm6, %xmm6
vmulss %xmm6, %xmm2, %xmm2
vaddss 0x10(%rsp), %xmm2, %xmm6
vdpps $0x7f, %xmm7, %xmm15, %xmm7
vmovaps 0xd0(%rsp), %xmm31
vfnmadd231ss %xmm4, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm4) + xmm3
vfnmadd231ss %xmm5, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm5) + xmm7
vpermilps $0xff, 0x500(%rsp), %xmm2 # xmm2 = mem[3,3,3,3]
vsubss %xmm2, %xmm0, %xmm2
vshufps $0xff, %xmm14, %xmm14, %xmm0 # xmm0 = xmm14[3,3,3,3]
vfmsub213ss %xmm0, %xmm6, %xmm3 # xmm3 = (xmm6 * xmm3) - xmm0
vmulss %xmm6, %xmm7, %xmm6
vmulss %xmm3, %xmm5, %xmm7
vfmsub231ss %xmm6, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm6) - xmm7
vdivss %xmm7, %xmm6, %xmm6
vdivss %xmm7, %xmm3, %xmm3
vdivss %xmm7, %xmm5, %xmm5
vdivss %xmm7, %xmm4, %xmm4
vmulss %xmm6, %xmm31, %xmm6
vmulss %xmm5, %xmm2, %xmm5
vsubss %xmm5, %xmm6, %xmm5
vmulss %xmm3, %xmm31, %xmm3
vmulss %xmm4, %xmm2, %xmm4
vsubss %xmm3, %xmm4, %xmm3
vsubss %xmm5, %xmm10, %xmm10
vsubss %xmm3, %xmm11, %xmm11
vbroadcastss 0x29dbba(%rip), %xmm4 # 0x1f20ec4
vandps %xmm4, %xmm31, %xmm3
vucomiss %xmm3, %xmm29
movb $0x1, %al
jbe 0x1c83377
vaddss %xmm29, %xmm1, %xmm1
vmovaps 0x3d0(%rsp), %xmm3
vfmadd231ss 0x26eb82(%rip), %xmm3, %xmm1 # xmm1 = (xmm3 * mem) + xmm1
vandps %xmm4, %xmm2, %xmm2
vucomiss %xmm2, %xmm1
jbe 0x1c83377
vaddss 0x3e0(%rsp), %xmm11, %xmm11
vucomiss 0x5c(%rsp), %xmm11
vmovaps 0x320(%rsp), %ymm4
jb 0x1c83370
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss 0x200(%rcx,%rax,4), %xmm5
vucomiss %xmm11, %xmm5
jae 0x1c83396
xorl %eax, %eax
xorl %r14d, %r14d
jmp 0x1c83380
vmovaps 0x320(%rsp), %ymm4
testb %al, %al
je 0x1c837c5
decq %r15
jne 0x1c82e40
jmp 0x1c837c2
xorl %eax, %eax
vucomiss 0x268684(%rip), %xmm10 # 0x1eeba24
jb 0x1c83372
vmovss 0x26936a(%rip), %xmm1 # 0x1eec714
vucomiss %xmm10, %xmm1
jb 0x1c83372
vxorps %xmm2, %xmm2, %xmm2
vmovaps 0x3a0(%rsp), %xmm3
vmovss %xmm3, %xmm2, %xmm1 # xmm1 = xmm3[0],xmm2[1,2,3]
vrsqrt14ss %xmm1, %xmm2, %xmm1
vmulss 0x269348(%rip), %xmm1, %xmm2 # 0x1eec718
vmulss 0x269344(%rip), %xmm3, %xmm3 # 0x1eec71c
movq 0x28(%rsp), %rdx
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq 0x2e8(%rsp), %rcx
movq (%rax,%rcx,8), %r14
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
movl 0x240(%rcx,%rax,4), %eax
testl %eax, 0x34(%r14)
je 0x1c83370
movq 0x10(%rdx), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c8342a
cmpq $0x0, 0x48(%r14)
jne 0x1c8342a
movb $0x1, %r14b
xorl %eax, %eax
jmp 0x1c83380
vmulss %xmm1, %xmm3, %xmm3
vmulss %xmm1, %xmm1, %xmm1
vmulss %xmm1, %xmm3, %xmm1
vaddss %xmm1, %xmm2, %xmm1
vbroadcastss %xmm1, %xmm1
vmulps %xmm1, %xmm15, %xmm1
vfmadd213ps %xmm14, %xmm1, %xmm0 # xmm0 = (xmm1 * xmm0) + xmm14
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,2,0,3]
vmulps %xmm3, %xmm1, %xmm1
vfmsub231ps %xmm2, %xmm14, %xmm1 # xmm1 = (xmm14 * xmm2) - xmm1
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,2,0,3]
vshufps $0xd2, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,0,1,3]
vmulps %xmm1, %xmm0, %xmm0
vfmsub231ps %xmm3, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm3) - xmm0
movq 0x28(%rsp), %rdx
movq 0x8(%rdx), %rax
vbroadcastss %xmm10, %zmm1
vbroadcastss 0x28f277(%rip), %zmm2 # 0x1f12704
vpermps %zmm0, %zmm2, %zmm2
vbroadcastss 0x29da3f(%rip), %zmm3 # 0x1f20edc
vpermps %zmm0, %zmm3, %zmm3
vbroadcastss %xmm0, %zmm0
vmovaps %zmm2, 0x840(%rsp)
vmovaps %zmm3, 0x880(%rsp)
vmovaps %zmm0, 0x8c0(%rsp)
vmovaps %zmm1, 0x900(%rsp)
vxorps %xmm0, %xmm0, %xmm0
vmovaps %zmm0, 0x940(%rsp)
vmovaps 0x800(%rsp), %zmm0
vmovaps %zmm0, 0x980(%rsp)
vmovdqa64 0x7c0(%rsp), %zmm0
vmovdqa64 %zmm0, 0x9c0(%rsp)
movq 0x2d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x60(%rcx)
vmovdqa %ymm0, 0x40(%rcx)
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %zmm0
vmovaps %zmm0, 0xa00(%rsp)
vbroadcastss 0x4(%rax), %zmm0
vmovaps %zmm0, 0xa40(%rsp)
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm11, 0x200(%rcx,%rax,4)
vmovaps 0x780(%rsp), %zmm0
vmovaps %zmm0, 0x580(%rsp)
leaq 0x580(%rsp), %rax
movq %rax, 0x2f0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x2f8(%rsp)
movq 0x8(%rdx), %rax
movq %rax, 0x300(%rsp)
movq %rcx, 0x308(%rsp)
leaq 0x840(%rsp), %rax
movq %rax, 0x310(%rsp)
movl $0x10, 0x318(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
vmovaps %xmm10, 0xa0(%rsp)
vmovaps %xmm11, 0x80(%rsp)
vmovss %xmm5, 0x60(%rsp)
je 0x1c83676
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x260(%rsp), %ymm19
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm16
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x29d84e(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x580(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k0
kortestw %k0, %k0
je 0x1c8379e
movq 0x28(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c83766
testb $0x2, (%rcx)
jne 0x1c836b4
testb $0x40, 0x3e(%r14)
je 0x1c83766
leaq 0x2f0(%rsp), %rdi
vzeroupper
callq *%rax
vmovss 0x60(%rsp), %xmm5
vmovaps 0x80(%rsp), %xmm11
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x200(%rsp), %ymm27
vmovaps 0x100(%rsp), %ymm30
vmovaps 0x220(%rsp), %ymm24
vmovaps 0x120(%rsp), %ymm23
vmovaps 0x140(%rsp), %ymm22
vmovaps 0x240(%rsp), %ymm20
vmovaps 0x260(%rsp), %ymm19
vmovaps 0x160(%rsp), %ymm18
vmovaps 0x180(%rsp), %ymm17
vmovaps 0x280(%rsp), %ymm16
vmovaps 0x320(%rsp), %ymm4
vmovaps 0x2a0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1e0(%rsp), %ymm9
vmovaps 0x380(%rsp), %ymm8
vmovaps 0x1c0(%rsp), %ymm21
vbroadcastss 0x29d75e(%rip), %ymm28 # 0x1f20ec4
vmovdqa64 0x580(%rsp), %zmm0
vptestmd %zmm0, %zmm0, %k1
movq 0x308(%rsp), %rax
vmovaps 0x200(%rax), %zmm0
vbroadcastss 0x2693f7(%rip), %zmm0 {%k1} # 0x1eecb84
vmovaps %zmm0, 0x200(%rax)
kortestw %k1, %k1
setne %r14b
jmp 0x1c837a1
xorl %r14d, %r14d
testb %r14b, %r14b
jne 0x1c83423
movq 0x20(%rsp), %rax
movq 0x18(%rsp), %rcx
vmovss %xmm5, 0x200(%rcx,%rax,4)
jmp 0x1c83423
xorl %r14d, %r14d
andb $0x1, %r14b
orb %r14b, %r13b
movq 0x20(%rsp), %rdx
movq 0x18(%rsp), %r11
vmovaps 0x5e0(%rsp), %ymm0
vcmpleps 0x200(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovd %k0, %eax
andb %al, %r12b
jne 0x1c82d56
jmp 0x1c83854
vmovaps 0x320(%rsp), %ymm4
vmovaps %ymm30, %ymm28
vmovaps %ymm24, %ymm16
vmovaps %ymm27, %ymm24
vmovaps %ymm26, %ymm27
vmovaps %ymm18, %ymm26
vmovaps %ymm17, %ymm18
vmovaps %ymm22, %ymm17
vmovaps %ymm23, %ymm22
vmovaps %ymm13, %ymm23
vmovaps %ymm19, %ymm30
vmovaps %ymm20, %ymm19
vmovaps %ymm8, %ymm20
vmovaps 0x380(%rsp), %ymm8
vmovdqa 0x640(%rsp), %ymm1
vpcmpltd 0x5c0(%rsp), %ymm1, %k1
vmovaps 0x740(%rsp), %ymm0
vpcmpltd 0x6c0(%rsp), %ymm1, %k2
vmovaps 0x560(%rsp), %ymm3
vaddps %ymm0, %ymm3, %ymm1
vbroadcastss 0x200(%r11,%rdx,4), %ymm2
vcmpleps %ymm2, %ymm1, %k0 {%k2}
kmovd %k0, %eax
movl 0xf0(%rsp), %ecx
andb %al, %cl
vmovaps 0x680(%rsp), %ymm1
vaddps %ymm1, %ymm3, %ymm3
vcmpleps %ymm2, %ymm3, %k0 {%k1}
kmovd %k0, %eax
movl 0x34(%rsp), %esi
andb %al, %sil
orb %cl, %sil
je 0x1c8393e
movl %ebx, %eax
leaq (%rax,%rax,2), %rax
shlq $0x5, %rax
movb %sil, 0xaa0(%rsp,%rax)
kmovd %ecx, %k1
vmovaps %ymm0, %ymm1 {%k1}
vmovaps %ymm1, 0xac0(%rsp,%rax)
vmovaps 0x3c0(%rsp), %xmm0
vmovlps %xmm0, 0xae0(%rsp,%rax)
movq 0x50(%rsp), %r12
leal 0x1(%r12), %ecx
movl %ecx, 0xae8(%rsp,%rax)
incl %ebx
movq 0x2d8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x268ded(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x2e0(%rsp), %r15
vmovaps 0x440(%rsp), %ymm2
jmp 0x1c8396e
movq 0x2d8(%rsp), %r8
movq 0x28(%rsp), %r14
vbroadcastss 0x268dc0(%rip), %ymm3 # 0x1eec714
movb 0xf(%rsp), %al
movq 0x2e0(%rsp), %r15
vmovaps 0x440(%rsp), %ymm2
movq 0x50(%rsp), %r12
testl %ebx, %ebx
je 0x1c83b27
leal -0x1(%rbx), %r9d
leaq (%r9,%r9,2), %rcx
shlq $0x5, %rcx
vmovaps 0xac0(%rsp,%rcx), %ymm0
movzbl 0xaa0(%rsp,%rcx), %esi
vaddps 0x560(%rsp), %ymm0, %ymm1
vcmpleps 0x200(%r11,%rdx,4){1to8}, %ymm1, %k0
kmovb %k0, %r10d
andl %esi, %r10d
je 0x1c83a77
kmovd %r10d, %k1
vbroadcastss 0x26805d(%rip), %ymm1 # 0x1eeba20
vblendmps %ymm0, %ymm1, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %esi
andb %r10b, %sil
je 0x1c839fb
movzbl %sil, %edi
jmp 0x1c839ff
movzbl %r10b, %edi
leaq (%rsp,%rcx), %rsi
addq $0xaa0, %rsi # imm = 0xAA0
vmovss 0x44(%rsi), %xmm0
tzcntl %edi, %ecx
movb $0x1, %dil
shlb %cl, %dil
movl 0x48(%rsi), %r12d
movzbl %dil, %edi
kmovd %edi, %k0
kmovd %r10d, %k1
kandnb %k1, %k0, %k0
kmovb %k0, (%rsi)
kortestb %k0, %k0
vmovaps 0x440(%rsp), %ymm2
je 0x1c83a44
movl %ebx, %r9d
vbroadcastss 0x40(%rsi), %ymm1
vsubss %xmm1, %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vfmadd132ps 0x29d4c4(%rip), %ymm1, %ymm0 # ymm0 = (ymm0 * mem) + ymm1
vmovaps %ymm0, 0x840(%rsp)
vmovsd 0x840(%rsp,%rcx,4), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
movl %r9d, %ebx
testb %r10b, %r10b
je 0x1c8396e
movq %r12, 0x50(%rsp)
vmovaps 0x3c0(%rsp), %xmm5
jmp 0x1c81511
vcmpleps 0x29d45f(%rip), %ymm1, %k2 # 0x1f20f00
vbroadcastss 0x2690da(%rip), %ymm1 # 0x1eecb84
vbroadcastss 0x267f6c(%rip), %ymm23 # 0x1eeba20
vblendmps %ymm1, %ymm23, %ymm0 {%k2}
vmovaps %ymm0, %ymm30 {%k1}
vblendmps %ymm23, %ymm1, %ymm0 {%k2}
kmovd %k2, %ecx
vmovaps %ymm0, %ymm31 {%k1}
knotb %k1, %k0
kmovd %k0, %esi
orb %cl, %sil
andb %dil, %sil
movl %esi, %edi
jmp 0x1c81a15
vcmpleps %ymm20, %ymm11, %k2
vbroadcastss 0x26908f(%rip), %ymm7 # 0x1eecb84
vbroadcastss 0x267f22(%rip), %ymm11 # 0x1eeba20
vblendmps %ymm7, %ymm11, %ymm6 {%k2}
vmovaps %ymm6, %ymm4 {%k1}
vblendmps %ymm11, %ymm7, %ymm6 {%k2}
vmovaps %ymm6, %ymm2 {%k1}
knotb %k1, %k1
korb %k2, %k1, %k1
kandb %k0, %k1, %k0
jmp 0x1c81e18
testb $0x1, %r13b
jne 0x1c83b57
blsrl %r15d, %eax
vmovaps 0x6e0(%rsp), %ymm0
vcmpleps 0x200(%r11,%rdx,4){1to8}, %ymm0, %k0
kmovb %k0, %r15d
andl %eax, %r15d
setne %al
jne 0x1c811cc
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersector1<8>::intersect_h<embree::avx512::RibbonCurve1Intersector1<embree::HermiteCurveT, 8>, embree::avx512::Intersect1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_h(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
Intersector().intersect(pre,ray,context,geom,primID,p0,t0,p1,t1,Epilog(ray,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x5e0, %rsp # imm = 0x5E0
movq %rcx, %r9
movzbl 0x1(%rcx), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %r8
vbroadcastss 0x12(%r9,%r8), %xmm0
movq %rdx, 0x18(%rsp)
movq %rsi, %r15
movq %rdi, %r11
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%r8), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps 0x10(%rsi), %xmm0, %xmm0
vpmovsxbd 0x6(%r9,%rcx,4), %ymm1
vpmovsxbd 0x6(%r9,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r9,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r9,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r9,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x28eabb(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x29d289(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x29d1fe(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x26d310(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x2689fe(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %rdi
subq %rcx, %rdi
vpmovsxwd 0x6(%r9,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r9,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r9,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r9,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc(%r15){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x29c10b(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x29c0e7(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2d6ae9(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x5a0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c85b8d
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
leaq 0x4a3480(%rip), %r8 # 0x21272e4
leaq 0x4a5899(%rip), %r12 # 0x2129704
tzcntq %r13, %rax
movl 0x2(%r9), %ecx
movl 0x6(%r9,%rax,4), %edx
movq 0x18(%rsp), %rax
movq (%rax), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0x40(%rsp)
movq (%rax,%rcx,8), %rbx
movq %rdx, 0x48(%rsp)
movq %rdx, %rax
imulq 0x68(%rbx), %rax
movq 0x58(%rbx), %rcx
movl (%rcx,%rax), %edx
movq 0xa0(%rbx), %rcx
movq %rcx, %rax
imulq %rdx, %rax
leaq 0x1(%rdx), %rsi
imulq %rsi, %rcx
movq 0x110(%rbx), %rdi
imulq %rdi, %rdx
imulq %rsi, %rdi
movq 0x100(%rbx), %rsi
vmovaps (%rsi,%rdx), %xmm8
movl 0x248(%rbx), %r10d
vmovaps (%rsi,%rdi), %xmm9
movslq %r10d, %rdx
movq %rdx, %rsi
shlq $0x6, %rsi
leaq (%rsi,%rdx,4), %r14
movl %r10d, %esi
movq 0x90(%rbx), %rdx
vmovaps (%rdx,%rax), %xmm1
vmovaps (%rdx,%rcx), %xmm7
vbroadcastss 0x2d955f(%rip), %xmm0 # 0x1f5d46c
vfnmadd132ps %xmm0, %xmm1, %xmm8 # xmm8 = -(xmm8 * xmm0) + xmm1
vfmadd132ps %xmm0, %xmm7, %xmm9 # xmm9 = (xmm9 * xmm0) + xmm7
vmovaps (%r15), %xmm0
vmovaps %xmm1, 0x80(%rsp)
vsubps %xmm0, %xmm1, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps 0x10(%r11), %xmm4
vmovaps 0x20(%r11), %xmm5
vmovaps 0x30(%r11), %xmm6
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm10
vfmadd231ps %xmm3, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm3) + xmm10
vfmadd231ps %xmm2, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm2) + xmm10
vsubps %xmm0, %xmm8, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm12
vfmadd231ps %xmm3, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm3) + xmm12
vfmadd231ps %xmm2, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm2) + xmm12
vsubps %xmm0, %xmm9, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm13
vfmadd231ps %xmm3, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm3) + xmm13
vfmadd231ps %xmm2, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm2) + xmm13
vmovaps %xmm7, 0xd0(%rsp)
vsubps %xmm0, %xmm7, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmulps %xmm6, %xmm0, %xmm15
vfmadd231ps %xmm2, %xmm5, %xmm15 # xmm15 = (xmm5 * xmm2) + xmm15
vfmadd231ps %xmm1, %xmm4, %xmm15 # xmm15 = (xmm4 * xmm1) + xmm15
vmovups (%r8,%r14), %ymm3
vbroadcastss %xmm10, %ymm1
vmovups 0x484(%r8,%r14), %ymm11
vbroadcastss 0x28e722(%rip), %ymm0 # 0x1f12704
vpermps %ymm10, %ymm0, %ymm20
vbroadcastss %xmm12, %ymm21
vpermps %ymm12, %ymm0, %ymm22
vmovups 0x908(%r8,%r14), %ymm14
vbroadcastss %xmm13, %ymm24
vpermps %ymm13, %ymm0, %ymm25
vmovups 0xd8c(%r8,%r14), %ymm16
vbroadcastss %xmm15, %ymm26
vpermps %ymm15, %ymm0, %ymm27
vmulps %ymm16, %ymm26, %ymm5
vmulps %ymm16, %ymm27, %ymm4
vfmadd231ps %ymm24, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm25) + ymm4
vfmadd231ps %ymm21, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm21) + ymm5
vfmadd231ps %ymm22, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm22) + ymm4
vfmadd231ps %ymm1, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm1) + ymm5
vmovups (%r12,%r14), %ymm2
vmovups 0x484(%r12,%r14), %ymm17
vmovups 0x908(%r12,%r14), %ymm18
vfmadd231ps %ymm20, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm20) + ymm4
vmovups 0xd8c(%r12,%r14), %ymm19
vmulps %ymm19, %ymm26, %ymm7
vmulps %ymm19, %ymm27, %ymm6
vfmadd231ps %ymm24, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm25) + ymm6
vmovaps %ymm21, 0x280(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm22, 0x260(%rsp)
vfmadd231ps %ymm22, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm22) + ymm6
vmovaps %ymm1, 0x2c0(%rsp)
vfmadd231ps %ymm1, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm1) + ymm7
vmovaps %ymm20, 0x2a0(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm29
vmulps %ymm31, %ymm4, %ymm0
vmulps %ymm29, %ymm5, %ymm1
vsubps %ymm1, %ymm0, %ymm20
vshufps $0xff, %xmm8, %xmm8, %xmm0 # xmm0 = xmm8[3,3,3,3]
vbroadcastsd %xmm0, %ymm21
vshufps $0xff, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vbroadcastss 0xc(%rdx,%rcx), %ymm28
vmulps %ymm16, %ymm28, %ymm0
vfmadd231ps %ymm22, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm21) + ymm0
vbroadcastss 0xc(%rdx,%rax), %ymm30
vfmadd231ps %ymm30, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm30) + ymm0
vmulps %ymm19, %ymm28, %ymm1
vmovaps %ymm22, 0x560(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x580(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vfmadd231ps %ymm30, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm30) + ymm1
vmulps %ymm29, %ymm29, %ymm21
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %ymm30, 0x520(%rsp)
vinsertps $0x30, %xmm30, %xmm10, %xmm20 # xmm20 = xmm10[0,1,2],xmm30[0]
vbroadcastss 0x29cd33(%rip), %xmm21 # 0x1f20ec4
vandps %xmm21, %xmm20, %xmm20
vmovaps %xmm8, 0xf0(%rsp)
vblendps $0x8, %xmm8, %xmm12, %xmm8 # xmm8 = xmm12[0,1,2],xmm8[3]
vandps %xmm21, %xmm8, %xmm8
vmaxps %xmm8, %xmm20, %xmm8
vmovaps %xmm9, 0xe0(%rsp)
vblendps $0x8, %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[0,1,2],xmm9[3]
vandps %xmm21, %xmm9, %xmm9
vmovaps %ymm28, 0x540(%rsp)
vinsertps $0x30, %xmm28, %xmm15, %xmm20 # xmm20 = xmm15[0,1,2],xmm28[0]
vandps %xmm21, %xmm20, %xmm20
vmaxps %xmm20, %xmm9, %xmm9
vmaxps %xmm9, %xmm8, %xmm8
vmovshdup %xmm8, %xmm9 # xmm9 = xmm8[1,1,3,3]
vmaxss %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,0]
vmaxss %xmm9, %xmm8, %xmm8
vcvtsi2ss %r10d, %xmm23, %xmm9
vmovaps %xmm9, 0x2e0(%rsp)
vbroadcastss %xmm9, %ymm9
vcmpgtps 0x29cd25(%rip), %ymm9, %k1 {%k1} # 0x1f20f40
vmulss 0x26cdbf(%rip), %xmm8, %xmm22 # 0x1ef0fe4
vbroadcastss 0x29ccae(%rip), %ymm8 # 0x1f20edc
vpermps %ymm10, %ymm8, %ymm9
vmovaps %ymm9, 0x4c0(%rsp)
vpermps %ymm12, %ymm8, %ymm9
vmovaps %ymm9, 0x4a0(%rsp)
vpermps %ymm13, %ymm8, %ymm20
vpermps %ymm15, %ymm8, %ymm21
kortestb %k1, %k1
vmovss 0xc(%r15), %xmm9
vmovaps %ymm20, 0x500(%rsp)
vmovaps %ymm21, 0x4e0(%rsp)
vmovaps %xmm22, 0x440(%rsp)
je 0x1c849ee
vmovaps %xmm9, 0x220(%rsp)
vmulps %ymm19, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm18 # ymm18 = (ymm20 * ymm18) + ymm8
vmovaps 0x4a0(%rsp), %ymm23
vfmadd213ps %ymm18, %ymm23, %ymm17 # ymm17 = (ymm23 * ymm17) + ymm18
vmovaps 0x4c0(%rsp), %ymm19
vfmadd213ps %ymm17, %ymm19, %ymm2 # ymm2 = (ymm19 * ymm2) + ymm17
vmulps %ymm16, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm14 # ymm14 = (ymm20 * ymm14) + ymm8
vfmadd213ps %ymm14, %ymm23, %ymm11 # ymm11 = (ymm23 * ymm11) + ymm14
vmovups 0x1210(%r8,%r14), %ymm8
vmovups 0x1694(%r8,%r14), %ymm9
vmovups 0x1b18(%r8,%r14), %ymm12
vmovups 0x1f9c(%r8,%r14), %ymm13
vfmadd213ps %ymm11, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + ymm11
vmulps %ymm13, %ymm26, %ymm11
vmulps %ymm13, %ymm27, %ymm10
vmulps %ymm13, %ymm21, %ymm13
vfmadd231ps %ymm24, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm25) + ymm10
vfmadd231ps %ymm12, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm12) + ymm13
vmovaps 0x280(%rsp), %ymm12
vfmadd231ps %ymm12, %ymm9, %ymm11 # ymm11 = (ymm9 * ymm12) + ymm11
vmovaps %ymm12, %ymm16
vmovaps 0x260(%rsp), %ymm30
vfmadd231ps %ymm30, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm30) + ymm10
vfmadd231ps %ymm9, %ymm23, %ymm13 # ymm13 = (ymm23 * ymm9) + ymm13
vmovaps 0x2c0(%rsp), %ymm28
vfmadd231ps %ymm28, %ymm8, %ymm11 # ymm11 = (ymm8 * ymm28) + ymm11
vmovaps 0x2a0(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm18) + ymm10
vmovups 0x1210(%r12,%r14), %ymm9
vmovups 0x1b18(%r12,%r14), %ymm14
vmovups 0x1f9c(%r12,%r14), %ymm15
vfmadd231ps %ymm8, %ymm19, %ymm13 # ymm13 = (ymm19 * ymm8) + ymm13
vmovaps %ymm26, 0x160(%rsp)
vmulps %ymm15, %ymm26, %ymm8
vmovaps %ymm27, 0x140(%rsp)
vmulps %ymm15, %ymm27, %ymm12
vmulps %ymm15, %ymm21, %ymm15
vmovaps %ymm24, 0x1a0(%rsp)
vfmadd231ps %ymm24, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm24) + ymm8
vmovaps %ymm25, 0x180(%rsp)
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm20, %ymm15 # ymm15 = (ymm20 * ymm14) + ymm15
vmovups 0x1694(%r12,%r14), %ymm14
vfmadd231ps %ymm16, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm16) + ymm8
vmovaps %ymm16, %ymm24
vfmadd231ps %ymm30, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm30) + ymm12
vfmadd231ps %ymm14, %ymm23, %ymm15 # ymm15 = (ymm23 * ymm14) + ymm15
vfmadd231ps %ymm28, %ymm9, %ymm8 # ymm8 = (ymm9 * ymm28) + ymm8
vfmadd231ps %ymm18, %ymm9, %ymm12 # ymm12 = (ymm9 * ymm18) + ymm12
vfmadd231ps %ymm9, %ymm19, %ymm15 # ymm15 = (ymm19 * ymm9) + ymm15
vbroadcastss 0x29cac7(%rip), %ymm16 # 0x1f20ec4
vandps %ymm16, %ymm11, %ymm9
vandps %ymm16, %ymm10, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm13, %ymm13
vmaxps %ymm13, %ymm9, %ymm9
vbroadcastss %xmm22, %ymm13
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm11 {%k2}
vmovaps %ymm29, %ymm10 {%k2}
vandps %ymm16, %ymm8, %ymm9
vandps %ymm16, %ymm12, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm15, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm8 {%k2}
vmovaps %ymm29, %ymm12 {%k2}
vbroadcastss 0x29ca56(%rip), %ymm13 # 0x1f20ec0
vxorps %ymm13, %ymm11, %ymm9
vxorps %ymm13, %ymm8, %ymm13
vxorps %xmm19, %xmm19, %xmm19
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm14
vbroadcastss 0x268287(%rip), %ymm16 # 0x1eec71c
vmulps %ymm16, %ymm11, %ymm11
vmulps %ymm11, %ymm14, %ymm11
vmulps %ymm14, %ymm14, %ymm15
vmulps %ymm11, %ymm15, %ymm15
vbroadcastss 0x268264(%rip), %ymm17 # 0x1eec718
vfmadd231ps %ymm14, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm14) + ymm15
vmulps %ymm15, %ymm10, %ymm11
vmulps %ymm9, %ymm15, %ymm10
vmulps %ymm19, %ymm15, %ymm14
vfmadd213ps %ymm19, %ymm8, %ymm8 # ymm8 = (ymm8 * ymm8) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm8 # ymm8 = (ymm12 * ymm12) + ymm8
vrsqrt14ps %ymm8, %ymm9
vmulps %ymm16, %ymm8, %ymm8
vmulps %ymm8, %ymm9, %ymm8
vmulps %ymm9, %ymm9, %ymm15
vmulps %ymm8, %ymm15, %ymm15
vfmadd231ps %ymm9, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm9) + ymm15
vmulps %ymm15, %ymm12, %ymm8
vmulps %ymm13, %ymm15, %ymm9
vmulps %ymm19, %ymm15, %ymm12
vmovaps %ymm11, %ymm13
vfmadd213ps %ymm5, %ymm0, %ymm13 # ymm13 = (ymm0 * ymm13) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm14, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm14, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm14) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm14
vfmsub231ps %ymm12, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm12) - ymm14
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm14, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm14) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm13, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm28, %ymm20
vmovaps %ymm18, %ymm21
vmovaps %ymm24, %ymm22
je 0x1c85784
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x268068(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x220(%rsp), %xmm9
vbroadcastss %xmm9, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%r15){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x140(%rsp), %ymm27
je 0x1c84a6a
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c84a6a
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x267fdc(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %r12d
vmovaps %ymm2, %ymm3
testw %r12w, %r12w
je 0x1c849e5
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r11){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r12b
je 0x1c849e5
vbroadcastss 0x26c235(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x26c84e(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x300(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl $0x0, 0x360(%rsp)
movl %esi, 0x364(%rsp)
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0xe0(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
movb %r12b, 0x3b0(%rsp)
movl 0x24(%r15), %eax
testl %eax, 0x34(%rbx)
je 0x1c849e5
vaddps 0x29c6fb(%rip), %ymm5, %ymm0 # 0x1f20f40
vmovss 0x267ec7(%rip), %xmm1 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x26718f(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %edi
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c857c7
cmpq $0x0, 0x40(%rbx)
jne 0x1c857c7
vmovss 0x3c0(%rsp,%rdi,4), %xmm0
vmovss 0x3e0(%rsp,%rdi,4), %xmm1
vmovss 0x267e0c(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmovaps %ymm3, %ymm8
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x26c6d0(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x26c0b3(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmovaps %ymm5, %ymm7
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26c6be(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x26c6aa(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0xd0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0xe0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps %ymm7, %ymm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xf0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vmovss 0x400(%rsp,%rdi,4), %xmm4
vmovss %xmm4, 0x20(%r15)
vfmadd132ps 0x80(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovaps %ymm8, %ymm3
vmovlps %xmm2, 0x30(%r15)
vextractps $0x2, %xmm2, 0x38(%r15)
vmovss %xmm0, 0x3c(%r15)
vmovss %xmm1, 0x40(%r15)
movq 0x48(%rsp), %rax
movl %eax, 0x44(%r15)
movq 0x40(%rsp), %rax
movl %eax, 0x48(%r15)
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rax
movl (%rax), %eax
movl %eax, 0x4c(%r15)
movq 0x8(%rcx), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%r15)
leaq 0x4a4d18(%rip), %r12 # 0x2129704
jmp 0x1c84a26
vxorps %xmm19, %xmm19, %xmm19
vmovaps 0x240(%rsp), %ymm5
vmovaps 0x1c0(%rsp), %ymm3
vmovaps 0x2c0(%rsp), %ymm20
vmovaps 0x2a0(%rsp), %ymm21
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x260(%rsp), %ymm30
vmovaps %ymm3, 0x1c0(%rsp)
vmovaps %ymm5, 0x240(%rsp)
cmpl $0x9, %esi
jge 0x1c84a84
leaq 0xff(%r13), %rax
vmovaps 0x5a0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %ecx
andl %eax, %r13d
andl %ecx, %r13d
jne 0x1c83e6b
jmp 0x1c85b8d
xorl %r12d, %r12d
vmovaps 0x240(%rsp), %ymm5
vmovaps 0x1c0(%rsp), %ymm3
jmp 0x1c8475d
vpbroadcastd %esi, %ymm31
vbroadcastss 0x440(%rsp), %ymm0
vmovaps %ymm0, 0x220(%rsp)
vbroadcastss %xmm9, %ymm0
vmovaps %ymm0, 0x440(%rsp)
vmovss 0x267c61(%rip), %xmm0 # 0x1eec714
vdivss 0x2e0(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x2e0(%rsp)
movl $0x8, %ebx
vpbroadcastd %ebx, %ymm0
vpor 0x2d5e43(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpgtd %ymm0, %ymm31, %k1
leaq (%r14,%r8), %rcx
vmovups (%rcx,%rbx,4), %ymm3
vmovups 0x484(%rcx,%rbx,4), %ymm10
vmovups 0x908(%rcx,%rbx,4), %ymm11
vmovups 0xd8c(%rcx,%rbx,4), %ymm12
vmulps %ymm12, %ymm26, %ymm5
vmulps %ymm12, %ymm27, %ymm4
vmovaps 0x540(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vfmadd231ps %ymm24, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm25) + ymm4
vmovaps 0x560(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm22) + ymm5
vfmadd231ps %ymm30, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm30) + ymm4
vmovaps 0x580(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm8) + ymm0
vfmadd231ps %ymm20, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm20) + ymm5
vfmadd231ps %ymm21, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm21) + ymm4
leaq (%r14,%r12), %rax
vmovups (%rax,%rbx,4), %ymm2
vmovups 0x484(%rax,%rbx,4), %ymm13
vmovaps 0x520(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm16) + ymm0
vmovups 0x908(%rax,%rbx,4), %ymm14
vmovups 0xd8c(%rax,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm7
vmulps %ymm15, %ymm27, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm24, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm22) + ymm7
vfmadd231ps %ymm30, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm30) + ymm6
vfmadd231ps %ymm8, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm8) + ymm1
vfmadd231ps %ymm20, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm20) + ymm7
vfmadd231ps %ymm21, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm21) + ymm6
vfmadd231ps %ymm16, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm16) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm30, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c8533d
vmovaps 0x4e0(%rsp), %ymm29
vmulps %ymm15, %ymm29, %ymm15
vmovaps 0x500(%rsp), %ymm18
vfmadd213ps %ymm15, %ymm18, %ymm14 # ymm14 = (ymm18 * ymm14) + ymm15
vmovaps 0x4a0(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x4c0(%rsp), %ymm28
vfmadd213ps %ymm13, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm13
vmulps %ymm12, %ymm29, %ymm12
vfmadd213ps %ymm12, %ymm18, %ymm11 # ymm11 = (ymm18 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm3) + ymm10
vmovups 0x1210(%rcx,%rbx,4), %ymm12
vmovups 0x1694(%rcx,%rbx,4), %ymm13
vmovups 0x1b18(%rcx,%rbx,4), %ymm14
vmovups 0x1f9c(%rcx,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm11
vmulps %ymm15, %ymm27, %ymm10
vmulps %ymm15, %ymm29, %ymm15
vfmadd231ps %ymm24, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm14) + ymm15
vfmadd231ps %ymm22, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm22) + ymm11
vfmadd231ps %ymm23, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm23) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm20, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm20) + ymm11
vfmadd231ps %ymm21, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm21) + ymm10
vfmadd231ps %ymm12, %ymm28, %ymm15 # ymm15 = (ymm28 * ymm12) + ymm15
vmovups 0x1210(%rax,%rbx,4), %ymm13
vmovups 0x1b18(%rax,%rbx,4), %ymm14
vmovups 0x1f9c(%rax,%rbx,4), %ymm16
vmulps %ymm16, %ymm26, %ymm17
vmulps %ymm16, %ymm27, %ymm12
vmulps %ymm16, %ymm29, %ymm16
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm14) + ymm16
vmovups 0x1694(%rax,%rbx,4), %ymm14
vfmadd231ps %ymm22, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm22) + ymm17
vfmadd231ps %ymm23, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm23) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm20, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm20) + ymm17
vfmadd231ps %ymm21, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm21) + ymm12
vfmadd231ps %ymm13, %ymm28, %ymm16 # ymm16 = (ymm28 * ymm13) + ymm16
vbroadcastss 0x29c165(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x220(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x29c0f1(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x267928(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x267906(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm19, %ymm14, %ymm13
vfmadd213ps %ymm19, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm19, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm13) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm23, %ymm30
je 0x1c85378
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x267715(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x440(%rsp), %ymm2, %k1
vcmpleps 0x20(%r15){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c85378
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c85378
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2676b5(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x420(%rsp)
movzbl %al, %edi
vmovaps %ymm2, %ymm3
testw %di, %di
je 0x1c85355
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r11){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %dil
je 0x1c85355
vbroadcastss 0x26b910(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x420(%rsp), %ymm1
vfmadd132ps 0x26bf29(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x300(%rsp)
vmovaps %ymm1, 0x420(%rsp)
vmovaps %ymm1, 0x320(%rsp)
vmovaps %ymm3, 0x340(%rsp)
movl %ebx, 0x360(%rsp)
movl %esi, 0x364(%rsp)
vmovaps 0x80(%rsp), %xmm0
vmovaps %xmm0, 0x370(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x380(%rsp)
vmovaps 0xe0(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
movb %dil, 0x3b0(%rsp)
movq 0x18(%rsp), %rax
movq (%rax), %rax
movq 0x1e8(%rax), %rax
movq 0x40(%rsp), %rcx
movq (%rax,%rcx,8), %r10
movl 0x24(%r15), %eax
testl %eax, 0x34(%r10)
je 0x1c85334
vmovaps %ymm5, 0x120(%rsp)
vaddps 0x29bdb8(%rip), %ymm5, %ymm0 # 0x1f20f40
vcvtsi2ss %ebx, %xmm18, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x2e0(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3c0(%rsp)
vmovaps 0x420(%rsp), %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps %ymm3, 0x400(%rsp)
kmovd %edi, %k1
vbroadcastss 0x26684f(%rip), %ymm0 # 0x1eeba20
vmovaps %ymm3, 0x100(%rsp)
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %dil, %al
movzbl %al, %eax
movzbl %dil, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r12d
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c85391
cmpq $0x0, 0x40(%r10)
jne 0x1c85391
vmovss 0x3c0(%rsp,%r12,4), %xmm0
vmovss 0x3e0(%rsp,%r12,4), %xmm1
vmovss 0x2674c0(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x26bd88(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x26b76b(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26bd7a(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x26bd66(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0xd0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0xe0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xf0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vmovss 0x400(%rsp,%r12,4), %xmm4
vmovss %xmm4, 0x20(%r15)
vfmadd132ps 0x80(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovlps %xmm2, 0x30(%r15)
vextractps $0x2, %xmm2, 0x38(%r15)
vmovss %xmm0, 0x3c(%r15)
vmovss %xmm1, 0x40(%r15)
movq 0x48(%rsp), %rax
movl %eax, 0x44(%r15)
movq 0x40(%rsp), %rax
movl %eax, 0x48(%r15)
movq 0x18(%rsp), %rcx
movq 0x8(%rcx), %rax
movl (%rax), %eax
movl %eax, 0x4c(%r15)
movq 0x8(%rcx), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%r15)
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
leaq 0x4a43c9(%rip), %r12 # 0x2129704
jmp 0x1c85355
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
vmovaps %ymm23, %ymm30
vmovaps %ymm3, 0x100(%rsp)
vmovaps %ymm5, 0x120(%rsp)
addq $0x8, %rbx
cmpl %ebx, %esi
jg 0x1c84acf
jmp 0x1c84a3d
xorl %edi, %edi
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
jmp 0x1c85083
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r10, 0xc8(%rsp)
movq %r9, 0x30(%rsp)
movq %r11, 0x28(%rsp)
movl %esi, 0x14(%rsp)
vmovaps %ymm24, 0x1a0(%rsp)
vmovaps %ymm25, 0x180(%rsp)
vmovaps %ymm26, 0x160(%rsp)
vmovaps %ymm27, 0x140(%rsp)
vmovdqa64 %ymm31, 0x480(%rsp)
vmovss 0x3c0(%rsp,%r12,4), %xmm0
vmovss 0x3e0(%rsp,%r12,4), %xmm1
vmovss 0x20(%r15), %xmm2
vmovss %xmm2, 0x20(%rsp)
vmovss 0x400(%rsp,%r12,4), %xmm2
vmovss %xmm2, 0x20(%r15)
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x2672ce(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x26bb96(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x26b579(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26bb88(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x26bb74(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x200(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0x80(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovlps %xmm2, 0x90(%rsp)
vextractps $0x2, %xmm2, 0x98(%rsp)
vmovss %xmm0, 0x9c(%rsp)
vmovss %xmm1, 0xa0(%rsp)
movq 0x48(%rsp), %rcx
movl %ecx, 0xa4(%rsp)
movq 0x40(%rsp), %rcx
movl %ecx, 0xa8(%rsp)
movl (%rax), %ecx
movl %ecx, 0xac(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0xb0(%rsp)
movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF
leaq 0x24(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x18(%r10), %rcx
movq %rcx, 0x58(%rsp)
movq %rax, 0x60(%rsp)
movq %r15, 0x68(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x70(%rsp)
movl $0x1, 0x78(%rsp)
movq 0x40(%r10), %rax
testq %rax, %rax
movl %edi, 0x38(%rsp)
je 0x1c855e2
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc8(%rsp), %r10
movl 0x38(%rsp), %edi
vmovdqa64 0x480(%rsp), %ymm31
vmovaps 0x140(%rsp), %ymm27
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm30
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm21
vmovaps 0x2c0(%rsp), %ymm20
movl 0x14(%rsp), %esi
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a1d1a(%rip), %r8 # 0x21272e4
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r9
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c856dc
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c85687
testb $0x2, (%rcx)
jne 0x1c85604
testb $0x40, 0x3e(%r10)
je 0x1c8567d
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc8(%rsp), %r10
movl 0x38(%rsp), %edi
vmovdqa64 0x480(%rsp), %ymm31
vmovaps 0x140(%rsp), %ymm27
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm30
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm21
vmovaps 0x2c0(%rsp), %ymm20
movl 0x14(%rsp), %esi
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a1c71(%rip), %r8 # 0x21272e4
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r9
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c856dc
movq 0x68(%rsp), %rax
movq 0x70(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c856e8
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%r15)
vmovaps 0x100(%rsp), %ymm3
movl $0x1, %eax
shlxl %r12d, %eax, %eax
kmovd %eax, %k0
movzbl %dil, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%r15){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %edi
ktestb %k1, %k0
vmovaps 0x120(%rsp), %ymm5
je 0x1c85776
kmovd %edi, %k1
vbroadcastss 0x2662e9(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %dil, %al
movzbl %al, %eax
movzbl %dil, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r12d
testb %dil, %dil
jne 0x1c85405
jmp 0x1c85334
xorl %r12d, %r12d
vmovaps 0x240(%rsp), %ymm5
vmovaps 0x1c0(%rsp), %ymm3
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x140(%rsp), %ymm27
vmovaps 0x220(%rsp), %xmm9
jmp 0x1c8475d
vmovaps 0x380(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x390(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
movq %r9, 0x30(%rsp)
movq %r11, 0x28(%rsp)
vmovaps %ymm5, 0x240(%rsp)
vmovaps %ymm3, 0x1c0(%rsp)
movl %esi, 0x14(%rsp)
vmovss 0x3c0(%rsp,%rdi,4), %xmm0
vmovss 0x3e0(%rsp,%rdi,4), %xmm1
vmovss 0x20(%r15), %xmm2
vmovss %xmm2, 0x20(%rsp)
vmovss 0x400(%rsp,%rdi,4), %xmm2
vmovss %xmm2, 0x20(%r15)
movq 0x18(%rsp), %rax
movq 0x8(%rax), %rax
vmovss 0x266eb9(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x26b781(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x26b164(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26b773(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x26b75f(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x1f0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x200(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0x80(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovlps %xmm2, 0x90(%rsp)
vextractps $0x2, %xmm2, 0x98(%rsp)
vmovss %xmm0, 0x9c(%rsp)
vmovss %xmm1, 0xa0(%rsp)
movq 0x48(%rsp), %rcx
movl %ecx, 0xa4(%rsp)
movq 0x40(%rsp), %rcx
movl %ecx, 0xa8(%rsp)
movl (%rax), %ecx
movl %ecx, 0xac(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0xb0(%rsp)
movl $0xffffffff, 0x24(%rsp) # imm = 0xFFFFFFFF
leaq 0x24(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x58(%rsp)
movq %rax, 0x60(%rsp)
movq %r15, 0x68(%rsp)
leaq 0x90(%rsp), %rax
movq %rax, 0x70(%rsp)
movl $0x1, 0x78(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
movq %rdi, 0x38(%rsp)
je 0x1c859f2
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x38(%rsp), %rdi
vmovaps 0x220(%rsp), %xmm9
vmovaps 0x140(%rsp), %ymm27
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm30
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm21
vmovaps 0x2c0(%rsp), %ymm20
movl 0x14(%rsp), %esi
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a190a(%rip), %r8 # 0x21272e4
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r9
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c85ae5
movq 0x18(%rsp), %rax
movq 0x10(%rax), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c85a90
testb $0x2, (%rcx)
jne 0x1c85a13
testb $0x40, 0x3e(%rbx)
je 0x1c85a86
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x38(%rsp), %rdi
vmovaps 0x220(%rsp), %xmm9
vmovaps 0x140(%rsp), %ymm27
vmovaps 0x160(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm24
vmovaps 0x260(%rsp), %ymm30
vmovaps 0x280(%rsp), %ymm22
vmovaps 0x2a0(%rsp), %ymm21
vmovaps 0x2c0(%rsp), %ymm20
movl 0x14(%rsp), %esi
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a1868(%rip), %r8 # 0x21272e4
movq 0x28(%rsp), %r11
movq 0x30(%rsp), %r9
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c85ae5
movq 0x68(%rsp), %rax
movq 0x70(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c85af1
vmovss 0x20(%rsp), %xmm0
vmovss %xmm0, 0x20(%r15)
vmovaps 0x1c0(%rsp), %ymm3
movl $0x1, %eax
shlxl %edi, %eax, %eax
kmovd %eax, %k0
movzbl %r12b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x20(%r15){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r12d
ktestb %k1, %k0
vmovaps 0x240(%rsp), %ymm5
je 0x1c85b7f
kmovd %r12d, %k1
vbroadcastss 0x265edf(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %edi
testb %r12b, %r12b
jne 0x1c8581d
jmp 0x1c849e5
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersector1<8>::occluded_h<embree::avx512::RibbonCurve1Intersector1<embree::HermiteCurveT, 8>, embree::avx512::Occluded1EpilogMU<8, true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_h(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
if (Intersector().intersect(pre,ray,context,geom,primID,p0,t0,p1,t1,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x600, %rsp # imm = 0x600
movq %rcx, %r8
movq %rsi, %r15
movq %rdi, %r10
movzbl 0x1(%rcx), %eax
leaq (%rax,%rax,4), %rcx
leaq (%rcx,%rcx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovaps (%r15), %xmm1
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps 0x10(%r15), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%r8,%rax,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%r8,%rcx), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rax,%rax,2), %r9
vpmovsxbd 0x6(%r8,%r9,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rax,%rcx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%r9,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm0
addq %rax, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %ecx
vpmovsxbd 0x6(%r8,%rcx), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x28ca88(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x29b250(%rip), %ymm17 # 0x1f20edc
vpermps %ymm2, %ymm17, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm17, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x29b1c4(%rip), %ymm8 # 0x1f20ec4
vandps %ymm4, %ymm8, %ymm5
vbroadcastss 0x26b2db(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm4 {%k1}
vandps %ymm1, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm0, %ymm8, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x2669c9(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rcx
shlq $0x3, %r9
subq %rax, %r9
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%r8,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rcx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r8,%r9), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%r15){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x29a0e0(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x29a0bc(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2d4ab8(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x5c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r11b
je 0x1c877f0
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r14d
tzcntq %r14, %rax
movl 0x2(%r8), %ecx
movl 0x6(%r8,%rax,4), %esi
movq (%rdx), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0x38(%rsp)
movq (%rax,%rcx,8), %rbx
movq %rsi, 0xb8(%rsp)
movq %rsi, %rax
imulq 0x68(%rbx), %rax
movq 0x58(%rbx), %rcx
movl (%rcx,%rax), %r9d
movq 0xa0(%rbx), %rcx
movq %rcx, %rax
imulq %r9, %rax
leaq 0x1(%r9), %rsi
imulq %rsi, %rcx
movq 0x110(%rbx), %rdi
imulq %rdi, %r9
imulq %rsi, %rdi
movq 0x100(%rbx), %rsi
vmovaps (%rsi,%r9), %xmm8
leaq 0x4a3800(%rip), %r9 # 0x2129704
movl 0x248(%rbx), %r12d
vmovaps (%rsi,%rdi), %xmm9
movslq %r12d, %rdi
movq %rdi, %rsi
shlq $0x6, %rsi
leaq (%rsi,%rdi,4), %r13
leaq 0x4a13bf(%rip), %rdi # 0x21272e4
movq 0x90(%rbx), %rsi
vmovaps (%rsi,%rax), %xmm1
vmovaps (%rsi,%rcx), %xmm7
vbroadcastss 0x2d752d(%rip), %xmm0 # 0x1f5d46c
vfnmadd132ps %xmm0, %xmm1, %xmm8 # xmm8 = -(xmm8 * xmm0) + xmm1
vfmadd132ps %xmm0, %xmm7, %xmm9 # xmm9 = (xmm9 * xmm0) + xmm7
vmovaps (%r15), %xmm0
vmovaps %xmm1, 0xd0(%rsp)
vsubps %xmm0, %xmm1, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps 0x10(%r10), %xmm4
vmovaps 0x20(%r10), %xmm5
vmovaps 0x30(%r10), %xmm6
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm10
vfmadd231ps %xmm3, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm3) + xmm10
vfmadd231ps %xmm2, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm2) + xmm10
vsubps %xmm0, %xmm8, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm12
vfmadd231ps %xmm3, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm3) + xmm12
vfmadd231ps %xmm2, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm2) + xmm12
vsubps %xmm0, %xmm9, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm6, %xmm1, %xmm13
vfmadd231ps %xmm3, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm3) + xmm13
vfmadd231ps %xmm2, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm2) + xmm13
vmovaps %xmm7, 0x290(%rsp)
vsubps %xmm0, %xmm7, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmulps %xmm6, %xmm0, %xmm15
vfmadd231ps %xmm2, %xmm5, %xmm15 # xmm15 = (xmm5 * xmm2) + xmm15
vfmadd231ps %xmm1, %xmm4, %xmm15 # xmm15 = (xmm4 * xmm1) + xmm15
vmovups (%rdi,%r13), %ymm3
vbroadcastss %xmm10, %ymm1
vmovups 0x484(%rdi,%r13), %ymm11
vbroadcastss 0x28c6f0(%rip), %ymm0 # 0x1f12704
vpermps %ymm10, %ymm0, %ymm20
vbroadcastss %xmm12, %ymm21
vpermps %ymm12, %ymm0, %ymm22
vmovups 0x908(%rdi,%r13), %ymm14
vbroadcastss %xmm13, %ymm24
vpermps %ymm13, %ymm0, %ymm25
vmovups 0xd8c(%rdi,%r13), %ymm16
vbroadcastss %xmm15, %ymm26
vpermps %ymm15, %ymm0, %ymm27
vmulps %ymm16, %ymm26, %ymm5
vmulps %ymm16, %ymm27, %ymm4
vfmadd231ps %ymm24, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm25) + ymm4
vfmadd231ps %ymm21, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm21) + ymm5
vfmadd231ps %ymm22, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm22) + ymm4
vfmadd231ps %ymm1, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm1) + ymm5
vmovups (%r9,%r13), %ymm2
vmovups 0x484(%r9,%r13), %ymm17
vmovups 0x908(%r9,%r13), %ymm18
vfmadd231ps %ymm20, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm20) + ymm4
vmovups 0xd8c(%r9,%r13), %ymm19
vmulps %ymm19, %ymm26, %ymm7
vmulps %ymm19, %ymm27, %ymm6
vfmadd231ps %ymm24, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm25) + ymm6
vmovaps %ymm21, 0x240(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm22, 0x220(%rsp)
vfmadd231ps %ymm22, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm22) + ymm6
vmovaps %ymm1, 0x260(%rsp)
vfmadd231ps %ymm1, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm1) + ymm7
vmovaps %ymm20, %ymm29
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm23
vmulps %ymm31, %ymm4, %ymm0
vmulps %ymm23, %ymm5, %ymm1
vsubps %ymm1, %ymm0, %ymm20
vshufps $0xff, %xmm8, %xmm8, %xmm0 # xmm0 = xmm8[3,3,3,3]
vbroadcastsd %xmm0, %ymm21
vshufps $0xff, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vbroadcastss 0xc(%rsi,%rcx), %ymm30
vmulps %ymm16, %ymm30, %ymm0
vfmadd231ps %ymm22, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm21) + ymm0
vbroadcastss 0xc(%rsi,%rax), %ymm28
vfmadd231ps %ymm28, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm28) + ymm0
vmulps %ymm19, %ymm30, %ymm1
vmovaps %ymm22, 0x580(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x5a0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vfmadd231ps %ymm28, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm28) + ymm1
vmulps %ymm23, %ymm23, %ymm21
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %ymm28, 0x540(%rsp)
vinsertps $0x30, %xmm28, %xmm10, %xmm20 # xmm20 = xmm10[0,1,2],xmm28[0]
vbroadcastss 0x29ad03(%rip), %xmm21 # 0x1f20ec4
vandps %xmm21, %xmm20, %xmm20
vmovaps %xmm8, 0x2b0(%rsp)
vblendps $0x8, %xmm8, %xmm12, %xmm8 # xmm8 = xmm12[0,1,2],xmm8[3]
vandps %xmm21, %xmm8, %xmm8
vmaxps %xmm8, %xmm20, %xmm8
vmovaps %xmm9, 0x2a0(%rsp)
vblendps $0x8, %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[0,1,2],xmm9[3]
vandps %xmm21, %xmm9, %xmm9
vmovaps %ymm30, 0x560(%rsp)
vinsertps $0x30, %xmm30, %xmm15, %xmm20 # xmm20 = xmm15[0,1,2],xmm30[0]
vandps %xmm21, %xmm20, %xmm20
vmaxps %xmm20, %xmm9, %xmm9
vmaxps %xmm9, %xmm8, %xmm8
vmovshdup %xmm8, %xmm9 # xmm9 = xmm8[1,1,3,3]
vmaxss %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,0]
vmaxss %xmm9, %xmm8, %xmm8
vcvtsi2ss %r12d, %xmm10, %xmm9
vmovaps %xmm9, 0x40(%rsp)
vbroadcastss %xmm9, %ymm9
vcmpgtps 0x29acf9(%rip), %ymm9, %k1 {%k1} # 0x1f20f40
vmulss 0x26ad93(%rip), %xmm8, %xmm22 # 0x1ef0fe4
vbroadcastss 0x29ac82(%rip), %ymm8 # 0x1f20edc
vpermps %ymm10, %ymm8, %ymm28
vpermps %ymm12, %ymm8, %ymm30
vpermps %ymm13, %ymm8, %ymm20
vpermps %ymm15, %ymm8, %ymm21
kortestb %k1, %k1
vmovss 0xc(%r15), %xmm8
vmovaps %ymm20, 0x520(%rsp)
vmovaps %ymm21, 0x500(%rsp)
vmovaps %xmm22, 0x480(%rsp)
je 0x1c86c2f
vmovaps %xmm8, 0x140(%rsp)
vmulps %ymm19, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm18 # ymm18 = (ymm20 * ymm18) + ymm8
vfmadd213ps %ymm18, %ymm30, %ymm17 # ymm17 = (ymm30 * ymm17) + ymm18
vfmadd213ps %ymm17, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm17
vmulps %ymm16, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm14 # ymm14 = (ymm20 * ymm14) + ymm8
vfmadd213ps %ymm14, %ymm30, %ymm11 # ymm11 = (ymm30 * ymm11) + ymm14
vmovups 0x1210(%rdi,%r13), %ymm8
vmovups 0x1694(%rdi,%r13), %ymm9
vmovups 0x1b18(%rdi,%r13), %ymm12
vmovups 0x1f9c(%rdi,%r13), %ymm13
vfmadd213ps %ymm11, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm3) + ymm11
vmulps %ymm13, %ymm26, %ymm11
vmulps %ymm13, %ymm27, %ymm10
vmulps %ymm13, %ymm21, %ymm13
vfmadd231ps %ymm24, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm25) + ymm10
vfmadd231ps %ymm12, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm12) + ymm13
vmovaps %ymm28, %ymm16
vmovaps 0x240(%rsp), %ymm28
vfmadd231ps %ymm28, %ymm9, %ymm11 # ymm11 = (ymm9 * ymm28) + ymm11
vmovaps %ymm30, %ymm17
vmovaps 0x220(%rsp), %ymm30
vfmadd231ps %ymm30, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm30) + ymm10
vfmadd231ps %ymm9, %ymm17, %ymm13 # ymm13 = (ymm17 * ymm9) + ymm13
vmovaps 0x260(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm8, %ymm11 # ymm11 = (ymm8 * ymm18) + ymm11
vfmadd231ps %ymm29, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm29) + ymm10
vmovups 0x1210(%r9,%r13), %ymm9
vmovups 0x1b18(%r9,%r13), %ymm14
vmovups 0x1f9c(%r9,%r13), %ymm15
vfmadd231ps %ymm8, %ymm16, %ymm13 # ymm13 = (ymm16 * ymm8) + ymm13
vmovaps %ymm26, 0x180(%rsp)
vmulps %ymm15, %ymm26, %ymm8
vmovaps %ymm27, 0x160(%rsp)
vmulps %ymm15, %ymm27, %ymm12
vmulps %ymm15, %ymm21, %ymm15
vmovaps %ymm24, 0x1c0(%rsp)
vfmadd231ps %ymm24, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm24) + ymm8
vmovaps %ymm25, 0x1a0(%rsp)
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm20, %ymm15 # ymm15 = (ymm20 * ymm14) + ymm15
vmovups 0x1694(%r9,%r13), %ymm14
vfmadd231ps %ymm28, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm28) + ymm8
vfmadd231ps %ymm30, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm30) + ymm12
vmovaps %ymm17, %ymm20
vfmadd231ps %ymm14, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm14) + ymm15
vfmadd231ps %ymm18, %ymm9, %ymm8 # ymm8 = (ymm9 * ymm18) + ymm8
vfmadd231ps %ymm29, %ymm9, %ymm12 # ymm12 = (ymm9 * ymm29) + ymm12
vmovaps %ymm16, %ymm21
vfmadd231ps %ymm9, %ymm16, %ymm15 # ymm15 = (ymm16 * ymm9) + ymm15
vbroadcastss 0x29aab7(%rip), %ymm16 # 0x1f20ec4
vandps %ymm16, %ymm11, %ymm9
vandps %ymm16, %ymm10, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm13, %ymm13
vmaxps %ymm13, %ymm9, %ymm9
vbroadcastss %xmm22, %ymm13
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm11 {%k2}
vmovaps %ymm23, %ymm10 {%k2}
vandps %ymm16, %ymm8, %ymm9
vandps %ymm16, %ymm12, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm15, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm8 {%k2}
vmovaps %ymm23, %ymm12 {%k2}
vbroadcastss 0x29aa46(%rip), %ymm13 # 0x1f20ec0
vxorps %ymm13, %ymm11, %ymm9
vxorps %ymm13, %ymm8, %ymm13
vxorps %xmm19, %xmm19, %xmm19
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm14
vbroadcastss 0x266277(%rip), %ymm16 # 0x1eec71c
vmulps %ymm16, %ymm11, %ymm11
vmulps %ymm11, %ymm14, %ymm11
vmulps %ymm14, %ymm14, %ymm15
vmulps %ymm11, %ymm15, %ymm15
vbroadcastss 0x266254(%rip), %ymm17 # 0x1eec718
vfmadd231ps %ymm14, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm14) + ymm15
vmulps %ymm15, %ymm10, %ymm11
vmulps %ymm9, %ymm15, %ymm10
vmulps %ymm19, %ymm15, %ymm14
vfmadd213ps %ymm19, %ymm8, %ymm8 # ymm8 = (ymm8 * ymm8) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm8 # ymm8 = (ymm12 * ymm12) + ymm8
vrsqrt14ps %ymm8, %ymm9
vmulps %ymm16, %ymm8, %ymm8
vmulps %ymm8, %ymm9, %ymm8
vmulps %ymm9, %ymm9, %ymm15
vmulps %ymm8, %ymm15, %ymm15
vfmadd231ps %ymm9, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm9) + ymm15
vmulps %ymm15, %ymm12, %ymm8
vmulps %ymm13, %ymm15, %ymm9
vmulps %ymm19, %ymm15, %ymm12
vmovaps %ymm11, %ymm13
vfmadd213ps %ymm5, %ymm0, %ymm13 # ymm13 = (ymm0 * ymm13) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm14, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm14, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm14) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm14
vfmsub231ps %ymm12, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm12) - ymm14
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm14, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm14) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm13, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm18, %ymm23
vmovaps %ymm29, %ymm31
vmovaps %ymm28, %ymm22
vmovaps %ymm30, %ymm18
je 0x1c877a2
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x266052(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x140(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x20(%r15){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1c0(%rsp), %ymm24
vmovaps 0x1a0(%rsp), %ymm25
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x160(%rsp), %ymm27
vmovaps %ymm21, %ymm28
vmovaps %ymm20, %ymm30
je 0x1c86cba
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c86cba
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x265fba(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x4a0(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c86c2b
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r10){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c86c2b
vbroadcastss 0x26a21a(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x4a0(%rsp), %ymm1
vfmadd132ps 0x26a833(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x320(%rsp)
vmovaps %ymm1, 0x4a0(%rsp)
vmovaps %ymm1, 0x340(%rsp)
vmovaps %ymm2, 0x360(%rsp)
movl $0x0, 0x380(%rsp)
movl %r12d, 0x384(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
movb %al, 0x3d0(%rsp)
movl 0x24(%r15), %ecx
testl %ecx, 0x34(%rbx)
je 0x1c86c2b
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
jne 0x1c86871
movb $0x1, %sil
cmpq $0x0, 0x48(%rbx)
je 0x1c86c67
vmovaps %ymm5, 0x200(%rsp)
vaddps 0x29a6be(%rip), %ymm5, %ymm0 # 0x1f20f40
vmovss 0x265e8a(%rip), %xmm1 # 0x1eec714
vdivss 0x40(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x4a0(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps %ymm2, 0x1e0(%rsp)
vmovaps %ymm2, 0x420(%rsp)
movzbl %al, %ecx
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0xf0(%rsp)
tzcntq %rcx, %rax
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0xc0(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
movb $0x1, %sil
movq %r8, 0x20(%rsp)
movq %rdx, 0x10(%rsp)
movq %r10, 0x18(%rsp)
movb %r11b, 0x7(%rsp)
vmovaps %ymm31, 0x300(%rsp)
vmovaps %ymm28, 0x2e0(%rsp)
vmovaps %ymm30, 0x2c0(%rsp)
movq %rcx, 0x440(%rsp)
vmovss 0x3e0(%rsp,%rax,4), %xmm0
vmovss 0x400(%rsp,%rax,4), %xmm1
vmovss 0x20(%r15), %xmm2
vmovss %xmm2, 0x30(%rsp)
movq %rax, 0x28(%rsp)
vmovss 0x420(%rsp,%rax,4), %xmm2
vmovss %xmm2, 0x20(%r15)
movq 0x8(%rdx), %rax
vmovss 0x265d9b(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x26a663(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x26a046(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26a655(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x26a641(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0xe0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0xc0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xf0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0xd0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovlps %xmm2, 0x80(%rsp)
vextractps $0x2, %xmm2, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovss %xmm1, 0x90(%rsp)
movq 0xb8(%rsp), %rcx
movl %ecx, 0x94(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x98(%rsp)
movl (%rax), %ecx
movl %ecx, 0x9c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0xa0(%rsp)
movl $0xffffffff, 0xc(%rsp) # imm = 0xFFFFFFFF
leaq 0xc(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x58(%rsp)
movq %rax, 0x60(%rsp)
movq %r15, 0x68(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x70(%rsp)
movl $0x1, 0x78(%rsp)
movq 0x48(%rbx), %rax
testq %rax, %rax
movl %esi, 0x8(%rsp)
je 0x1c86b2e
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x140(%rsp), %xmm8
movl 0x8(%rsp), %esi
vmovaps 0x2c0(%rsp), %ymm30
vmovaps 0x2e0(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm27
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm24
vmovaps 0x220(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x300(%rsp), %ymm31
vmovaps 0x260(%rsp), %ymm23
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a2bff(%rip), %r9 # 0x2129704
leaq 0x4a07d8(%rip), %rdi # 0x21272e4
movb 0x7(%rsp), %r11b
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %rdx
movq 0x20(%rsp), %r8
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c86be7
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c86c13
testb $0x2, (%rcx)
jne 0x1c86b4e
testb $0x40, 0x3e(%rbx)
je 0x1c86bdd
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
vmovaps 0x140(%rsp), %xmm8
movl 0x8(%rsp), %esi
vmovaps 0x2c0(%rsp), %ymm30
vmovaps 0x2e0(%rsp), %ymm28
vmovaps 0x160(%rsp), %ymm27
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x1a0(%rsp), %ymm25
vmovaps 0x1c0(%rsp), %ymm24
vmovaps 0x220(%rsp), %ymm18
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x300(%rsp), %ymm31
vmovaps 0x260(%rsp), %ymm23
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a2b42(%rip), %r9 # 0x2129704
leaq 0x4a071b(%rip), %rdi # 0x21272e4
movb 0x7(%rsp), %r11b
movq 0x18(%rsp), %r10
movq 0x10(%rsp), %rdx
movq 0x20(%rsp), %r8
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
jne 0x1c86c13
vmovss 0x30(%rsp), %xmm0
vmovss %xmm0, 0x20(%r15)
movq 0x28(%rsp), %rax
movq 0x440(%rsp), %rcx
btcq %rax, %rcx
tzcntq %rcx, %rax
setae %sil
jae 0x1c86933
andb $0x1, %sil
vmovaps 0x200(%rsp), %ymm5
vmovaps 0x1e0(%rsp), %ymm2
jmp 0x1c86c67
xorl %esi, %esi
jmp 0x1c86c67
xorl %esi, %esi
vxorps %xmm19, %xmm19, %xmm19
vmovaps 0x200(%rsp), %ymm5
vmovaps 0x1e0(%rsp), %ymm2
vmovaps 0x260(%rsp), %ymm23
vmovaps %ymm29, %ymm31
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x220(%rsp), %ymm18
vmovaps %ymm2, 0x1e0(%rsp)
vmovaps %ymm5, 0x200(%rsp)
cmpl $0x9, %r12d
jge 0x1c86cd3
testb $0x1, %sil
jne 0x1c877f0
leaq 0xff(%r14), %rax
vmovaps 0x5c0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %ecx
andl %eax, %r14d
andl %ecx, %r14d
setne %r11b
jne 0x1c85e92
jmp 0x1c877f0
xorl %eax, %eax
vmovaps 0x200(%rsp), %ymm5
vmovaps 0x1e0(%rsp), %ymm2
jmp 0x1c8677a
vpbroadcastd %r12d, %ymm21
vbroadcastss 0x480(%rsp), %ymm0
vmovaps %ymm0, 0x140(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x480(%rsp)
vmovss 0x265a12(%rip), %xmm0 # 0x1eec714
vdivss 0x40(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x440(%rsp)
movl $0x8, %ebx
vpbroadcastd %ebx, %ymm0
vpor 0x2d3bf7(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpgtd %ymm0, %ymm21, %k1
leaq (%rdi,%r13), %rcx
vmovups (%rcx,%rbx,4), %ymm3
vmovups 0x484(%rcx,%rbx,4), %ymm10
vmovups 0x908(%rcx,%rbx,4), %ymm11
vmovups 0xd8c(%rcx,%rbx,4), %ymm12
vmulps %ymm12, %ymm26, %ymm5
vmulps %ymm12, %ymm27, %ymm4
vmovaps 0x560(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vfmadd231ps %ymm24, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm25) + ymm4
vmovaps 0x580(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm22) + ymm5
vfmadd231ps %ymm18, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm18) + ymm4
vmovaps 0x5a0(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm8) + ymm0
vfmadd231ps %ymm23, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm23) + ymm5
vfmadd231ps %ymm31, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm31) + ymm4
leaq (%r13,%r9), %rax
vmovups (%rax,%rbx,4), %ymm2
vmovups 0x484(%rax,%rbx,4), %ymm13
vmovaps 0x540(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm16) + ymm0
vmovups 0x908(%rax,%rbx,4), %ymm14
vmovups 0xd8c(%rax,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm7
vmulps %ymm15, %ymm27, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm24, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm22) + ymm7
vfmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm18) + ymm6
vfmadd231ps %ymm8, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm8) + ymm1
vfmadd231ps %ymm23, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm23) + ymm7
vfmadd231ps %ymm31, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm31) + ymm6
vfmadd231ps %ymm16, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm16) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm18, %ymm20
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c87742
vmovaps 0x500(%rsp), %ymm29
vmulps %ymm15, %ymm29, %ymm15
vmovaps 0x520(%rsp), %ymm18
vfmadd213ps %ymm15, %ymm18, %ymm14 # ymm14 = (ymm18 * ymm14) + ymm15
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vfmadd213ps %ymm13, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm13
vmulps %ymm12, %ymm29, %ymm12
vfmadd213ps %ymm12, %ymm18, %ymm11 # ymm11 = (ymm18 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm3) + ymm10
vmovups 0x1210(%rcx,%rbx,4), %ymm12
vmovups 0x1694(%rcx,%rbx,4), %ymm13
vmovups 0x1b18(%rcx,%rbx,4), %ymm14
vmovups 0x1f9c(%rcx,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm11
vmulps %ymm15, %ymm27, %ymm10
vmulps %ymm15, %ymm29, %ymm15
vfmadd231ps %ymm24, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm14) + ymm15
vfmadd231ps %ymm22, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm22) + ymm11
vfmadd231ps %ymm20, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm20) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm23, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm23) + ymm11
vfmadd231ps %ymm31, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm31) + ymm10
vfmadd231ps %ymm12, %ymm28, %ymm15 # ymm15 = (ymm28 * ymm12) + ymm15
vmovups 0x1210(%rax,%rbx,4), %ymm13
vmovups 0x1b18(%rax,%rbx,4), %ymm14
vmovups 0x1f9c(%rax,%rbx,4), %ymm16
vmulps %ymm16, %ymm26, %ymm17
vmulps %ymm16, %ymm27, %ymm12
vmulps %ymm16, %ymm29, %ymm16
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm14) + ymm16
vmovups 0x1694(%rax,%rbx,4), %ymm14
vfmadd231ps %ymm22, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm22) + ymm17
vfmadd231ps %ymm20, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm20) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm23, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm23) + ymm17
vfmadd231ps %ymm31, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm31) + ymm12
vfmadd231ps %ymm13, %ymm28, %ymm16 # ymm16 = (ymm28 * ymm13) + ymm16
vbroadcastss 0x299f28(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x140(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x299eb4(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x2656eb(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x2656c9(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm19, %ymm14, %ymm13
vfmadd213ps %ymm19, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm19, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm13) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm20, %ymm18
je 0x1c87789
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2654d8(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x480(%rsp), %ymm2, %k1
vcmpleps 0x20(%r15){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c87789
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c87789
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x265478(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x460(%rsp)
movzbl %al, %eax
testw %ax, %ax
je 0x1c87765
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
vmulps (%r10){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm2, %k0
kmovd %k0, %ecx
andb %cl, %al
je 0x1c87765
movl %esi, 0x8(%rsp)
vbroadcastss 0x2696d4(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x460(%rsp), %ymm1
vfmadd132ps 0x269ced(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x320(%rsp)
vmovaps %ymm1, 0x460(%rsp)
vmovaps %ymm1, 0x340(%rsp)
vmovaps %ymm2, 0x360(%rsp)
movl %ebx, 0x380(%rsp)
movl %r12d, 0x384(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x390(%rsp)
vmovaps 0x2b0(%rsp), %xmm0
vmovaps %xmm0, 0x3a0(%rsp)
vmovaps 0x2a0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps 0x290(%rsp), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
movb %al, 0x3d0(%rsp)
movq (%rdx), %rcx
movq 0x1e8(%rcx), %rcx
movq 0x38(%rsp), %rsi
movq (%rcx,%rsi,8), %rsi
movl 0x24(%r15), %ecx
movq %rsi, 0x40(%rsp)
testl %ecx, 0x34(%rsi)
je 0x1c8775c
movq 0x10(%rdx), %rcx
cmpq $0x0, 0x10(%rcx)
movl 0x8(%rsp), %esi
jne 0x1c873d3
movq 0x40(%rsp), %rcx
cmpq $0x0, 0x48(%rcx)
movb $0x1, %cl
je 0x1c87762
vmovdqa64 %ymm21, 0x4e0(%rsp)
vmovaps %ymm30, 0x2c0(%rsp)
vmovaps %ymm28, 0x2e0(%rsp)
vmovaps %ymm27, 0x160(%rsp)
vmovaps %ymm26, 0x180(%rsp)
vmovaps %ymm25, 0x1a0(%rsp)
vmovaps %ymm24, 0x1c0(%rsp)
vmovaps %ymm31, 0x300(%rsp)
movb %r11b, 0x7(%rsp)
movq %r10, 0x18(%rsp)
movq %r8, 0x20(%rsp)
vmovaps %ymm5, 0x120(%rsp)
vaddps 0x299b0d(%rip), %ymm5, %ymm0 # 0x1f20f40
vcvtsi2ss %ebx, %xmm19, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x440(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x3e0(%rsp)
vmovaps 0x460(%rsp), %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps %ymm2, 0x100(%rsp)
vmovaps %ymm2, 0x420(%rsp)
movzbl %al, %edi
vmovaps 0x3a0(%rsp), %xmm0
vmovaps %xmm0, 0xe0(%rsp)
tzcntq %rdi, %rcx
vmovaps 0x3b0(%rsp), %xmm0
vmovaps %xmm0, 0x4d0(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x4c0(%rsp)
movb $0x1, %al
movl %eax, 0xc0(%rsp)
movq %rdx, 0x10(%rsp)
vmovss 0x3e0(%rsp,%rcx,4), %xmm0
vmovss 0x400(%rsp,%rcx,4), %xmm1
vmovss 0x20(%r15), %xmm2
vmovss %xmm2, 0xf0(%rsp)
movq %rcx, 0x30(%rsp)
vmovss 0x420(%rsp,%rcx,4), %xmm2
vmovss %xmm2, 0x20(%r15)
movq 0x8(%rdx), %rax
vmovss 0x26520f(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x269ad7(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2694ba(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x269ac9(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x269ab5(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x4c0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x4d0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xe0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0xd0(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovlps %xmm2, 0x80(%rsp)
vextractps $0x2, %xmm2, 0x88(%rsp)
vmovss %xmm0, 0x8c(%rsp)
vmovss %xmm1, 0x90(%rsp)
movq 0xb8(%rsp), %rcx
movl %ecx, 0x94(%rsp)
movq 0x38(%rsp), %rcx
movl %ecx, 0x98(%rsp)
movl (%rax), %ecx
movl %ecx, 0x9c(%rsp)
movl 0x4(%rax), %ecx
movl %ecx, 0xa0(%rsp)
movl $0xffffffff, 0xc(%rsp) # imm = 0xFFFFFFFF
leaq 0xc(%rsp), %rcx
movq %rcx, 0x50(%rsp)
movq 0x40(%rsp), %rsi
movq 0x18(%rsi), %rcx
movq %rcx, 0x58(%rsp)
movq %rax, 0x60(%rsp)
movq %r15, 0x68(%rsp)
leaq 0x80(%rsp), %rax
movq %rax, 0x70(%rsp)
movl $0x1, 0x78(%rsp)
movq 0x48(%rsi), %rax
testq %rax, %rax
movq %rdi, 0x28(%rsp)
je 0x1c8763d
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x28(%rsp), %rdi
movq 0x10(%rsp), %rdx
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c87678
movq 0x10(%rdx), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c876a5
testb $0x2, (%rcx)
jne 0x1c8765a
movq 0x40(%rsp), %rcx
testb $0x40, 0x3e(%rcx)
je 0x1c87664
leaq 0x50(%rsp), %rdi
vzeroupper
callq *%rax
movq 0x50(%rsp), %rax
cmpl $0x0, (%rax)
movq 0x10(%rsp), %rdx
movq 0x28(%rsp), %rdi
jne 0x1c876a5
vmovss 0xf0(%rsp), %xmm0
vmovss %xmm0, 0x20(%r15)
movq 0x30(%rsp), %rax
btcq %rax, %rdi
tzcntq %rdi, %rcx
setae %al
movl %eax, 0xc0(%rsp)
jae 0x1c874c4
movl 0xc0(%rsp), %ecx
andb $0x1, %cl
movq 0x20(%rsp), %r8
movq 0x18(%rsp), %r10
movb 0x7(%rsp), %r11b
leaq 0x49fc1f(%rip), %rdi # 0x21272e4
leaq 0x4a2038(%rip), %r9 # 0x2129704
vxorps %xmm19, %xmm19, %xmm19
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm2
vmovaps 0x260(%rsp), %ymm23
vmovaps 0x300(%rsp), %ymm31
vmovaps 0x240(%rsp), %ymm22
vmovaps 0x220(%rsp), %ymm18
vmovaps 0x1c0(%rsp), %ymm24
vmovaps 0x1a0(%rsp), %ymm25
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x160(%rsp), %ymm27
vmovaps 0x2e0(%rsp), %ymm28
vmovaps 0x2c0(%rsp), %ymm30
movl 0x8(%rsp), %esi
vmovdqa64 0x4e0(%rsp), %ymm21
jmp 0x1c87762
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm2
vmovaps %ymm20, %ymm18
jmp 0x1c87765
xorl %ecx, %ecx
movl 0x8(%rsp), %esi
orb %cl, %sil
vmovaps %ymm2, 0x100(%rsp)
vmovaps %ymm5, 0x120(%rsp)
addq $0x8, %rbx
cmpl %ebx, %r12d
jg 0x1c86d1b
jmp 0x1c86c7f
xorl %eax, %eax
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm2
jmp 0x1c872bc
xorl %eax, %eax
vmovaps 0x200(%rsp), %ymm5
vmovaps 0x1e0(%rsp), %ymm2
vmovaps 0x1c0(%rsp), %ymm24
vmovaps 0x1a0(%rsp), %ymm25
vmovaps 0x180(%rsp), %ymm26
vmovaps 0x160(%rsp), %ymm27
vmovaps 0x140(%rsp), %xmm8
vmovaps %ymm21, %ymm28
vmovaps %ymm20, %ymm30
jmp 0x1c8677a
andb $0x1, %r11b
movl %r11d, %eax
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 4>::intersect_h<embree::avx512::RibbonCurve1IntersectorK<embree::HermiteCurveT, 4, 8>, embree::avx512::Intersect1KEpilogMU<8, 4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_h(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; geom->gather_hermite(p0,t0,p1,t1,geom->curve(primID));
Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x6c0, %rsp # imm = 0x6C0
movq %rcx, %r10
movq %rdx, %r15
movq %rdi, 0x58(%rsp)
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rdx
vbroadcastss 0x12(%r8,%rdx), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x40(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x10(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rdx), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r8,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
leal (,%rdx,4), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %r9
leal (%r9,%r9), %edi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %rdi
vpmovsxbd 0x6(%r8,%rdi), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x28adf6(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x2995c4(%rip), %ymm16 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm16, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm16, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x299539(%rip), %ymm7 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm7, %ymm2, %ymm5
vbroadcastss 0x26964b(%rip), %ymm6 # 0x1ef0fe8
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm2 {%k1}
vandps %ymm7, %ymm1, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm1 {%k1}
vandps %ymm7, %ymm0, %ymm5
vcmpltps %ymm6, %ymm5, %k1
vmovaps %ymm6, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x264d39(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %rdi
subq %rcx, %rdi
vpmovsxwd 0x6(%r8,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%r9), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rdi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rdi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%rsi,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x298445(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x80(%rsi,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x298420(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2d2e22(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x680(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c89ac5
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
movq 0x58(%rsp), %rcx
addq %rcx, %rax
addq $0x10, %rax
movq %rax, 0x258(%rsp)
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x310(%rsp)
leaq 0x49f780(%rip), %r12 # 0x21272e4
tzcntq %r13, %rax
movl 0x2(%r8), %ecx
movl 0x6(%r8,%rax,4), %edx
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %rcx, 0x30(%rsp)
movq (%rax,%rcx,8), %rbx
movq %rdx, 0x38(%rsp)
movq %rdx, %rax
imulq 0x68(%rbx), %rax
movq 0x58(%rbx), %rcx
movl (%rcx,%rax), %edx
movq 0xa0(%rbx), %rcx
movq %rcx, %rax
imulq %rdx, %rax
leaq 0x1(%rdx), %r11
movq 0x100(%rbx), %rdi
movq 0x110(%rbx), %r9
imulq %r9, %rdx
vmovaps (%rdi,%rdx), %xmm8
movq 0x90(%rbx), %rdx
vmovaps (%rdx,%rax), %xmm1
imulq %r11, %rcx
vmovaps (%rdx,%rcx), %xmm7
imulq %r11, %r9
movl 0x248(%rbx), %r11d
vmovss (%rsi,%r15,4), %xmm0
vinsertps $0x1c, 0x10(%rsi,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%rsi,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovaps (%rdi,%r9), %xmm9
movslq %r11d, %r9
movq %r9, %rdi
shlq $0x6, %rdi
leaq (%rdi,%r9,4), %r14
vbroadcastss 0x2d5857(%rip), %xmm2 # 0x1f5d46c
vfnmadd132ps %xmm2, %xmm1, %xmm8 # xmm8 = -(xmm8 * xmm2) + xmm1
vfmadd132ps %xmm2, %xmm7, %xmm9 # xmm9 = (xmm9 * xmm2) + xmm7
vmovaps %xmm1, 0x60(%rsp)
vsubps %xmm0, %xmm1, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
movq 0x258(%rsp), %rdi
vmovaps (%rdi), %xmm4
vmovaps 0x10(%rdi), %xmm5
vmovaps 0x20(%rdi), %xmm6
leaq 0x4a1ab4(%rip), %rdi # 0x2129704
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm10
vfmadd231ps %xmm3, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm3) + xmm10
vfmadd231ps %xmm2, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm2) + xmm10
vsubps %xmm0, %xmm8, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm12
vfmadd231ps %xmm3, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm3) + xmm12
vfmadd231ps %xmm2, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm2) + xmm12
vsubps %xmm0, %xmm9, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm6, %xmm13
vfmadd231ps %xmm3, %xmm5, %xmm13 # xmm13 = (xmm5 * xmm3) + xmm13
vfmadd231ps %xmm2, %xmm4, %xmm13 # xmm13 = (xmm4 * xmm2) + xmm13
vmovaps %xmm7, 0xe0(%rsp)
vsubps %xmm0, %xmm7, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmulps %xmm0, %xmm6, %xmm15
vfmadd231ps %xmm2, %xmm5, %xmm15 # xmm15 = (xmm5 * xmm2) + xmm15
vfmadd231ps %xmm1, %xmm4, %xmm15 # xmm15 = (xmm4 * xmm1) + xmm15
vmovups (%r12,%r14), %ymm3
vbroadcastss %xmm10, %ymm1
vmovups 0x484(%r12,%r14), %ymm11
vbroadcastss 0x28aa17(%rip), %ymm0 # 0x1f12704
vpermps %ymm10, %ymm0, %ymm20
vbroadcastss %xmm12, %ymm21
vpermps %ymm12, %ymm0, %ymm22
vmovups 0x908(%r12,%r14), %ymm14
vbroadcastss %xmm13, %ymm24
vpermps %ymm13, %ymm0, %ymm25
vmovups 0xd8c(%r12,%r14), %ymm16
vbroadcastss %xmm15, %ymm26
vpermps %ymm15, %ymm0, %ymm27
vmulps %ymm16, %ymm26, %ymm5
vmulps %ymm16, %ymm27, %ymm4
vfmadd231ps %ymm24, %ymm14, %ymm5 # ymm5 = (ymm14 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm25) + ymm4
vfmadd231ps %ymm21, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm21) + ymm5
vfmadd231ps %ymm22, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm22) + ymm4
vfmadd231ps %ymm1, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm1) + ymm5
vmovups (%rdi,%r14), %ymm2
vmovups 0x484(%rdi,%r14), %ymm17
vmovups 0x908(%rdi,%r14), %ymm18
vfmadd231ps %ymm20, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm20) + ymm4
vmovups 0xd8c(%rdi,%r14), %ymm19
vmulps %ymm19, %ymm26, %ymm7
vmulps %ymm19, %ymm27, %ymm6
vfmadd231ps %ymm24, %ymm18, %ymm7 # ymm7 = (ymm18 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm18, %ymm6 # ymm6 = (ymm18 * ymm25) + ymm6
vmovaps %ymm21, 0x2a0(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm7 # ymm7 = (ymm17 * ymm21) + ymm7
vmovaps %ymm22, 0x280(%rsp)
vfmadd231ps %ymm22, %ymm17, %ymm6 # ymm6 = (ymm17 * ymm22) + ymm6
vmovaps %ymm1, 0x2e0(%rsp)
vfmadd231ps %ymm1, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm1) + ymm7
vmovaps %ymm20, 0x2c0(%rsp)
vfmadd231ps %ymm20, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm20) + ymm6
vsubps %ymm5, %ymm7, %ymm31
vsubps %ymm4, %ymm6, %ymm29
vmulps %ymm31, %ymm4, %ymm0
vmulps %ymm29, %ymm5, %ymm1
vsubps %ymm1, %ymm0, %ymm20
vshufps $0xff, %xmm8, %xmm8, %xmm0 # xmm0 = xmm8[3,3,3,3]
vbroadcastsd %xmm0, %ymm21
vshufps $0xff, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[3,3,3,3]
vbroadcastsd %xmm0, %ymm22
vbroadcastss 0xc(%rdx,%rcx), %ymm28
vmulps %ymm16, %ymm28, %ymm0
vfmadd231ps %ymm22, %ymm14, %ymm0 # ymm0 = (ymm14 * ymm22) + ymm0
vfmadd231ps %ymm21, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm21) + ymm0
vbroadcastss 0xc(%rdx,%rax), %ymm30
vfmadd231ps %ymm30, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm30) + ymm0
vmulps %ymm19, %ymm28, %ymm1
vmovaps %ymm22, 0x640(%rsp)
vfmadd231ps %ymm22, %ymm18, %ymm1 # ymm1 = (ymm18 * ymm22) + ymm1
vmovaps %ymm21, 0x660(%rsp)
vfmadd231ps %ymm21, %ymm17, %ymm1 # ymm1 = (ymm17 * ymm21) + ymm1
vfmadd231ps %ymm30, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm30) + ymm1
vmulps %ymm29, %ymm29, %ymm21
vfmadd231ps %ymm31, %ymm31, %ymm21 # ymm21 = (ymm31 * ymm31) + ymm21
vmaxps %ymm1, %ymm0, %ymm22
vmulps %ymm22, %ymm22, %ymm22
vmulps %ymm21, %ymm22, %ymm21
vmulps %ymm20, %ymm20, %ymm20
vcmpleps %ymm21, %ymm20, %k1
vmovaps %ymm30, 0x600(%rsp)
vinsertps $0x30, %xmm30, %xmm10, %xmm20 # xmm20 = xmm10[0,1,2],xmm30[0]
vbroadcastss 0x299028(%rip), %xmm21 # 0x1f20ec4
vandps %xmm21, %xmm20, %xmm20
vmovaps %xmm8, 0xf0(%rsp)
vblendps $0x8, %xmm8, %xmm12, %xmm8 # xmm8 = xmm12[0,1,2],xmm8[3]
vandps %xmm21, %xmm8, %xmm8
vmaxps %xmm8, %xmm20, %xmm8
vmovaps %xmm9, 0xd0(%rsp)
vblendps $0x8, %xmm9, %xmm13, %xmm9 # xmm9 = xmm13[0,1,2],xmm9[3]
vandps %xmm21, %xmm9, %xmm9
vmovaps %ymm28, 0x620(%rsp)
vinsertps $0x30, %xmm28, %xmm15, %xmm20 # xmm20 = xmm15[0,1,2],xmm28[0]
vandps %xmm21, %xmm20, %xmm20
vmaxps %xmm20, %xmm9, %xmm9
vmaxps %xmm9, %xmm8, %xmm8
vmovshdup %xmm8, %xmm9 # xmm9 = xmm8[1,1,3,3]
vmaxss %xmm8, %xmm9, %xmm9
vshufpd $0x1, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,0]
vmaxss %xmm9, %xmm8, %xmm8
vcvtsi2ss %r11d, %xmm23, %xmm9
vmovaps %xmm9, 0x320(%rsp)
vbroadcastss %xmm9, %ymm9
vcmpgtps 0x29901a(%rip), %ymm9, %k1 {%k1} # 0x1f20f40
vmulss 0x2690b4(%rip), %xmm8, %xmm22 # 0x1ef0fe4
vbroadcastss 0x298fa3(%rip), %ymm8 # 0x1f20edc
vpermps %ymm10, %ymm8, %ymm9
vmovaps %ymm9, 0x5a0(%rsp)
vpermps %ymm12, %ymm8, %ymm9
vmovaps %ymm9, 0x580(%rsp)
vpermps %ymm13, %ymm8, %ymm20
vpermps %ymm15, %ymm8, %ymm21
kortestb %k1, %k1
vmovss 0x30(%rsi,%r15,4), %xmm8
vmovaps %ymm20, 0x5e0(%rsp)
vmovaps %ymm21, 0x5c0(%rsp)
vmovaps %xmm22, 0x520(%rsp)
je 0x1c88739
vmovaps %xmm8, 0x260(%rsp)
vmulps %ymm19, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm18 # ymm18 = (ymm20 * ymm18) + ymm8
vmovaps 0x580(%rsp), %ymm23
vfmadd213ps %ymm18, %ymm23, %ymm17 # ymm17 = (ymm23 * ymm17) + ymm18
vmovaps 0x5a0(%rsp), %ymm19
vfmadd213ps %ymm17, %ymm19, %ymm2 # ymm2 = (ymm19 * ymm2) + ymm17
vmulps %ymm16, %ymm21, %ymm8
vfmadd213ps %ymm8, %ymm20, %ymm14 # ymm14 = (ymm20 * ymm14) + ymm8
vfmadd213ps %ymm14, %ymm23, %ymm11 # ymm11 = (ymm23 * ymm11) + ymm14
vmovups 0x1210(%r12,%r14), %ymm8
vmovups 0x1694(%r12,%r14), %ymm9
vmovups 0x1b18(%r12,%r14), %ymm12
vmovups 0x1f9c(%r12,%r14), %ymm13
vfmadd213ps %ymm11, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm3) + ymm11
vmulps %ymm13, %ymm26, %ymm11
vmulps %ymm13, %ymm27, %ymm10
vmulps %ymm13, %ymm21, %ymm13
vfmadd231ps %ymm24, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm25) + ymm10
vfmadd231ps %ymm12, %ymm20, %ymm13 # ymm13 = (ymm20 * ymm12) + ymm13
vmovaps 0x2a0(%rsp), %ymm12
vfmadd231ps %ymm12, %ymm9, %ymm11 # ymm11 = (ymm9 * ymm12) + ymm11
vmovaps %ymm12, %ymm16
vmovaps 0x280(%rsp), %ymm30
vfmadd231ps %ymm30, %ymm9, %ymm10 # ymm10 = (ymm9 * ymm30) + ymm10
vfmadd231ps %ymm9, %ymm23, %ymm13 # ymm13 = (ymm23 * ymm9) + ymm13
vmovaps 0x2e0(%rsp), %ymm28
vfmadd231ps %ymm28, %ymm8, %ymm11 # ymm11 = (ymm8 * ymm28) + ymm11
vmovaps 0x2c0(%rsp), %ymm18
vfmadd231ps %ymm18, %ymm8, %ymm10 # ymm10 = (ymm8 * ymm18) + ymm10
vmovups 0x1210(%rdi,%r14), %ymm9
vmovups 0x1b18(%rdi,%r14), %ymm14
vmovups 0x1f9c(%rdi,%r14), %ymm15
vfmadd231ps %ymm8, %ymm19, %ymm13 # ymm13 = (ymm19 * ymm8) + ymm13
vmovaps %ymm26, 0x1a0(%rsp)
vmulps %ymm15, %ymm26, %ymm8
vmovaps %ymm27, 0x180(%rsp)
vmulps %ymm15, %ymm27, %ymm12
vmulps %ymm15, %ymm21, %ymm15
vmovaps %ymm24, 0x1e0(%rsp)
vfmadd231ps %ymm24, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm24) + ymm8
vmovaps %ymm25, 0x1c0(%rsp)
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm20, %ymm15 # ymm15 = (ymm20 * ymm14) + ymm15
vmovups 0x1694(%rdi,%r14), %ymm14
vfmadd231ps %ymm16, %ymm14, %ymm8 # ymm8 = (ymm14 * ymm16) + ymm8
vmovaps %ymm16, %ymm24
vfmadd231ps %ymm30, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm30) + ymm12
vfmadd231ps %ymm14, %ymm23, %ymm15 # ymm15 = (ymm23 * ymm14) + ymm15
vfmadd231ps %ymm28, %ymm9, %ymm8 # ymm8 = (ymm9 * ymm28) + ymm8
vfmadd231ps %ymm18, %ymm9, %ymm12 # ymm12 = (ymm9 * ymm18) + ymm12
vfmadd231ps %ymm9, %ymm19, %ymm15 # ymm15 = (ymm19 * ymm9) + ymm15
vbroadcastss 0x298dbb(%rip), %ymm16 # 0x1f20ec4
vandps %ymm16, %ymm11, %ymm9
vandps %ymm16, %ymm10, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm13, %ymm13
vmaxps %ymm13, %ymm9, %ymm9
vbroadcastss %xmm22, %ymm13
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm11 {%k2}
vmovaps %ymm29, %ymm10 {%k2}
vandps %ymm16, %ymm8, %ymm9
vandps %ymm16, %ymm12, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vandps %ymm16, %ymm15, %ymm14
vmaxps %ymm14, %ymm9, %ymm9
vcmpltps %ymm13, %ymm9, %k2
vmovaps %ymm31, %ymm8 {%k2}
vmovaps %ymm29, %ymm12 {%k2}
vbroadcastss 0x298d4a(%rip), %ymm13 # 0x1f20ec0
vxorps %ymm13, %ymm11, %ymm9
vxorps %ymm13, %ymm8, %ymm13
vxorps %xmm19, %xmm19, %xmm19
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm14
vbroadcastss 0x26457b(%rip), %ymm16 # 0x1eec71c
vmulps %ymm16, %ymm11, %ymm11
vmulps %ymm11, %ymm14, %ymm11
vmulps %ymm14, %ymm14, %ymm15
vmulps %ymm11, %ymm15, %ymm15
vbroadcastss 0x264558(%rip), %ymm17 # 0x1eec718
vfmadd231ps %ymm14, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm14) + ymm15
vmulps %ymm15, %ymm10, %ymm11
vmulps %ymm9, %ymm15, %ymm10
vmulps %ymm19, %ymm15, %ymm14
vfmadd213ps %ymm19, %ymm8, %ymm8 # ymm8 = (ymm8 * ymm8) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm8 # ymm8 = (ymm12 * ymm12) + ymm8
vrsqrt14ps %ymm8, %ymm9
vmulps %ymm16, %ymm8, %ymm8
vmulps %ymm8, %ymm9, %ymm8
vmulps %ymm9, %ymm9, %ymm15
vmulps %ymm8, %ymm15, %ymm15
vfmadd231ps %ymm9, %ymm17, %ymm15 # ymm15 = (ymm17 * ymm9) + ymm15
vmulps %ymm15, %ymm12, %ymm8
vmulps %ymm13, %ymm15, %ymm9
vmulps %ymm19, %ymm15, %ymm12
vmovaps %ymm11, %ymm13
vfmadd213ps %ymm5, %ymm0, %ymm13 # ymm13 = (ymm0 * ymm13) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm14, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm14, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm14) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm14
vfmsub231ps %ymm12, %ymm10, %ymm14 # ymm14 = (ymm10 * ymm12) - ymm14
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm14, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm14) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm13, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm28, %ymm20
vmovaps %ymm18, %ymm21
vmovaps %ymm24, %ymm22
je 0x1c895e0
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x26435c(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vmovaps 0x260(%rsp), %xmm8
vbroadcastss %xmm8, %ymm7
vcmpleps %ymm2, %ymm7, %k1
vcmpleps 0x80(%rsi,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm27
je 0x1c887b7
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c887b7
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x2642cf(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x540(%rsp)
movzbl %al, %r12d
vmovaps %ymm2, %ymm3
testw %r12w, %r12w
je 0x1c88730
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x58(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %r12b
je 0x1c88730
vbroadcastss 0x268522(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x540(%rsp), %ymm1
vfmadd132ps 0x268b3b(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x340(%rsp)
vmovaps %ymm1, 0x540(%rsp)
vmovaps %ymm1, 0x360(%rsp)
vmovaps %ymm3, 0x380(%rsp)
movl $0x0, 0x3a0(%rsp)
movl %r11d, 0x3a4(%rsp)
vmovaps 0x60(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vmovdqa 0xe0(%rsp), %xmm0
vmovdqa %xmm0, 0x3e0(%rsp)
movb %r12b, 0x3f0(%rsp)
movl 0x90(%rsi,%r15,4), %eax
testl %eax, 0x34(%rbx)
je 0x1c88730
vaddps 0x2989e6(%rip), %ymm5, %ymm0 # 0x1f20f40
vmovss 0x2641b2(%rip), %xmm1 # 0x1eec714
vdivss 0x320(%rsp), %xmm1, %xmm1
vbroadcastss %xmm1, %ymm1
vmulps %ymm0, %ymm1, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x540(%rsp), %ymm0
vmovaps %ymm0, 0x420(%rsp)
vmovaps %ymm3, 0x440(%rsp)
kmovd %r12d, %k1
vbroadcastss 0x26347a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %edx
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps %ymm5, 0x160(%rsp)
vmovaps %ymm3, 0x140(%rsp)
jne 0x1c89623
cmpq $0x0, 0x40(%rbx)
jne 0x1c89623
vmovss 0x400(%rsp,%rdx,4), %xmm0
vmovss 0x420(%rsp,%rdx,4), %xmm1
vmovss 0x2640ea(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x2689b2(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x268395(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x2689a4(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x268990(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0xe0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0xd0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vmovaps 0x160(%rsp), %ymm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xf0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0x60(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovss 0x440(%rsp,%rdx,4), %xmm3
vmovss %xmm3, 0x80(%rsi,%r15,4)
vmovaps 0x140(%rsp), %ymm3
vmovss %xmm2, 0xc0(%rsi,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%rsi,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%rsi,%r15,4)
vmovss %xmm0, 0xf0(%rsi,%r15,4)
vmovss %xmm1, 0x100(%rsi,%r15,4)
movq 0x38(%rsp), %rax
movl %eax, 0x110(%rsi,%r15,4)
movq 0x30(%rsp), %rax
movl %eax, 0x120(%rsi,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%rsi,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rsi,%r15,4)
leaq 0x49ebad(%rip), %r12 # 0x21272e4
jmp 0x1c88771
vxorps %xmm19, %xmm19, %xmm19
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm3
vmovaps 0x2e0(%rsp), %ymm20
vmovaps 0x2c0(%rsp), %ymm21
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x280(%rsp), %ymm30
vmovaps %ymm3, 0x140(%rsp)
vmovaps %ymm5, 0x160(%rsp)
cmpl $0x9, %r11d
jge 0x1c887d1
leaq 0xff(%r13), %rax
vmovaps 0x680(%rsp), %ymm0
vcmpleps 0x80(%rsi,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %ecx
andl %eax, %r13d
andl %ecx, %r13d
jne 0x1c87b64
jmp 0x1c89ac5
xorl %r12d, %r12d
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm3
jmp 0x1c8846a
vpbroadcastd %r11d, %ymm31
vbroadcastss 0x520(%rsp), %ymm0
vmovaps %ymm0, 0x260(%rsp)
vbroadcastss %xmm8, %ymm0
vmovaps %ymm0, 0x520(%rsp)
vmovss 0x263f14(%rip), %xmm0 # 0x1eec714
vdivss 0x320(%rsp), %xmm0, %xmm0
vbroadcastss %xmm0, %ymm0
vmovaps %ymm0, 0x320(%rsp)
movq 0x30(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0xc0(%rsp)
movq 0x38(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x240(%rsp)
movl $0x8, %ebx
vpbroadcastd %ebx, %ymm0
vpor 0x2d20ce(%rip), %ymm0, %ymm0 # 0x1f5a920
vpcmpgtd %ymm0, %ymm31, %k1
leaq (%r14,%r12), %rcx
vmovups (%rcx,%rbx,4), %ymm3
vmovups 0x484(%rcx,%rbx,4), %ymm10
vmovups 0x908(%rcx,%rbx,4), %ymm11
vmovups 0xd8c(%rcx,%rbx,4), %ymm12
vmulps %ymm12, %ymm26, %ymm5
vmulps %ymm12, %ymm27, %ymm4
vmovaps 0x620(%rsp), %ymm1
vmulps %ymm1, %ymm12, %ymm0
vfmadd231ps %ymm24, %ymm11, %ymm5 # ymm5 = (ymm11 * ymm24) + ymm5
vfmadd231ps %ymm25, %ymm11, %ymm4 # ymm4 = (ymm11 * ymm25) + ymm4
vmovaps 0x640(%rsp), %ymm9
vfmadd231ps %ymm9, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm9) + ymm0
vfmadd231ps %ymm22, %ymm10, %ymm5 # ymm5 = (ymm10 * ymm22) + ymm5
vfmadd231ps %ymm30, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm30) + ymm4
vmovaps 0x660(%rsp), %ymm8
vfmadd231ps %ymm8, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm8) + ymm0
vfmadd231ps %ymm20, %ymm3, %ymm5 # ymm5 = (ymm3 * ymm20) + ymm5
vfmadd231ps %ymm21, %ymm3, %ymm4 # ymm4 = (ymm3 * ymm21) + ymm4
leaq (%r14,%rdi), %rax
vmovups (%rax,%rbx,4), %ymm2
vmovups 0x484(%rax,%rbx,4), %ymm13
vmovaps 0x600(%rsp), %ymm16
vfmadd231ps %ymm16, %ymm3, %ymm0 # ymm0 = (ymm3 * ymm16) + ymm0
vmovups 0x908(%rax,%rbx,4), %ymm14
vmovups 0xd8c(%rax,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm7
vmulps %ymm15, %ymm27, %ymm6
vmulps %ymm1, %ymm15, %ymm1
vfmadd231ps %ymm24, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm24) + ymm7
vfmadd231ps %ymm25, %ymm14, %ymm6 # ymm6 = (ymm14 * ymm25) + ymm6
vfmadd231ps %ymm9, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm9) + ymm1
vfmadd231ps %ymm22, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm22) + ymm7
vfmadd231ps %ymm30, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm30) + ymm6
vfmadd231ps %ymm8, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm8) + ymm1
vfmadd231ps %ymm20, %ymm2, %ymm7 # ymm7 = (ymm2 * ymm20) + ymm7
vfmadd231ps %ymm21, %ymm2, %ymm6 # ymm6 = (ymm2 * ymm21) + ymm6
vfmadd231ps %ymm16, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm16) + ymm1
vmaxps %ymm1, %ymm0, %ymm16
vsubps %ymm5, %ymm7, %ymm9
vsubps %ymm4, %ymm6, %ymm8
vmulps %ymm9, %ymm4, %ymm17
vmovaps %ymm30, %ymm23
vmulps %ymm8, %ymm5, %ymm18
vsubps %ymm18, %ymm17, %ymm17
vmulps %ymm8, %ymm8, %ymm18
vfmadd231ps %ymm9, %ymm9, %ymm18 # ymm18 = (ymm9 * ymm9) + ymm18
vmulps %ymm16, %ymm16, %ymm16
vmulps %ymm18, %ymm16, %ymm16
vmulps %ymm17, %ymm17, %ymm17
vcmpleps %ymm16, %ymm17, %k1 {%k1}
kortestb %k1, %k1
je 0x1c890d4
vmovaps 0x5c0(%rsp), %ymm29
vmulps %ymm15, %ymm29, %ymm15
vmovaps 0x5e0(%rsp), %ymm18
vfmadd213ps %ymm15, %ymm18, %ymm14 # ymm14 = (ymm18 * ymm14) + ymm15
vmovaps 0x580(%rsp), %ymm30
vfmadd213ps %ymm14, %ymm30, %ymm13 # ymm13 = (ymm30 * ymm13) + ymm14
vmovaps 0x5a0(%rsp), %ymm28
vfmadd213ps %ymm13, %ymm28, %ymm2 # ymm2 = (ymm28 * ymm2) + ymm13
vmulps %ymm12, %ymm29, %ymm12
vfmadd213ps %ymm12, %ymm18, %ymm11 # ymm11 = (ymm18 * ymm11) + ymm12
vfmadd213ps %ymm11, %ymm30, %ymm10 # ymm10 = (ymm30 * ymm10) + ymm11
vfmadd213ps %ymm10, %ymm28, %ymm3 # ymm3 = (ymm28 * ymm3) + ymm10
vmovups 0x1210(%rcx,%rbx,4), %ymm12
vmovups 0x1694(%rcx,%rbx,4), %ymm13
vmovups 0x1b18(%rcx,%rbx,4), %ymm14
vmovups 0x1f9c(%rcx,%rbx,4), %ymm15
vmulps %ymm15, %ymm26, %ymm11
vmulps %ymm15, %ymm27, %ymm10
vmulps %ymm15, %ymm29, %ymm15
vfmadd231ps %ymm24, %ymm14, %ymm11 # ymm11 = (ymm14 * ymm24) + ymm11
vfmadd231ps %ymm25, %ymm14, %ymm10 # ymm10 = (ymm14 * ymm25) + ymm10
vfmadd231ps %ymm14, %ymm18, %ymm15 # ymm15 = (ymm18 * ymm14) + ymm15
vfmadd231ps %ymm22, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm22) + ymm11
vfmadd231ps %ymm23, %ymm13, %ymm10 # ymm10 = (ymm13 * ymm23) + ymm10
vfmadd231ps %ymm13, %ymm30, %ymm15 # ymm15 = (ymm30 * ymm13) + ymm15
vfmadd231ps %ymm20, %ymm12, %ymm11 # ymm11 = (ymm12 * ymm20) + ymm11
vfmadd231ps %ymm21, %ymm12, %ymm10 # ymm10 = (ymm12 * ymm21) + ymm10
vfmadd231ps %ymm12, %ymm28, %ymm15 # ymm15 = (ymm28 * ymm12) + ymm15
vmovups 0x1210(%rax,%rbx,4), %ymm13
vmovups 0x1b18(%rax,%rbx,4), %ymm14
vmovups 0x1f9c(%rax,%rbx,4), %ymm16
vmulps %ymm16, %ymm26, %ymm17
vmulps %ymm16, %ymm27, %ymm12
vmulps %ymm16, %ymm29, %ymm16
vfmadd231ps %ymm24, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm24) + ymm17
vfmadd231ps %ymm25, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm25) + ymm12
vfmadd231ps %ymm14, %ymm18, %ymm16 # ymm16 = (ymm18 * ymm14) + ymm16
vmovups 0x1694(%rax,%rbx,4), %ymm14
vfmadd231ps %ymm22, %ymm14, %ymm17 # ymm17 = (ymm14 * ymm22) + ymm17
vfmadd231ps %ymm23, %ymm14, %ymm12 # ymm12 = (ymm14 * ymm23) + ymm12
vfmadd231ps %ymm14, %ymm30, %ymm16 # ymm16 = (ymm30 * ymm14) + ymm16
vfmadd231ps %ymm20, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm20) + ymm17
vfmadd231ps %ymm21, %ymm13, %ymm12 # ymm12 = (ymm13 * ymm21) + ymm12
vfmadd231ps %ymm13, %ymm28, %ymm16 # ymm16 = (ymm28 * ymm13) + ymm16
vbroadcastss 0x2983f0(%rip), %ymm18 # 0x1f20ec4
vandps %ymm18, %ymm11, %ymm13
vandps %ymm18, %ymm10, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm15, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vmovaps 0x260(%rsp), %ymm15
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm11 {%k2}
vmovaps %ymm8, %ymm10 {%k2}
vandps %ymm18, %ymm17, %ymm13
vandps %ymm18, %ymm12, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vandps %ymm18, %ymm16, %ymm14
vmaxps %ymm14, %ymm13, %ymm13
vcmpltps %ymm15, %ymm13, %k2
vmovaps %ymm9, %ymm17 {%k2}
vmovaps %ymm8, %ymm12 {%k2}
vbroadcastss 0x29837c(%rip), %ymm9 # 0x1f20ec0
vxorps %ymm9, %ymm11, %ymm8
vxorps %ymm9, %ymm17, %ymm9
vfmadd213ps %ymm19, %ymm11, %ymm11 # ymm11 = (ymm11 * ymm11) + ymm19
vfmadd231ps %ymm10, %ymm10, %ymm11 # ymm11 = (ymm10 * ymm10) + ymm11
vrsqrt14ps %ymm11, %ymm13
vbroadcastss 0x263bb3(%rip), %ymm15 # 0x1eec71c
vmulps %ymm15, %ymm11, %ymm11
vmulps %ymm11, %ymm13, %ymm11
vmulps %ymm13, %ymm13, %ymm14
vmulps %ymm11, %ymm14, %ymm14
vbroadcastss 0x263b91(%rip), %ymm16 # 0x1eec718
vfmadd231ps %ymm13, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm13) + ymm14
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm8, %ymm14, %ymm10
vmulps %ymm19, %ymm14, %ymm13
vfmadd213ps %ymm19, %ymm17, %ymm17 # ymm17 = (ymm17 * ymm17) + ymm19
vfmadd231ps %ymm12, %ymm12, %ymm17 # ymm17 = (ymm12 * ymm12) + ymm17
vrsqrt14ps %ymm17, %ymm8
vmulps %ymm15, %ymm17, %ymm14
vmulps %ymm14, %ymm8, %ymm14
vmulps %ymm8, %ymm8, %ymm15
vmulps %ymm14, %ymm15, %ymm14
vfmadd231ps %ymm8, %ymm16, %ymm14 # ymm14 = (ymm16 * ymm8) + ymm14
vmulps %ymm14, %ymm12, %ymm8
vmulps %ymm9, %ymm14, %ymm9
vmulps %ymm19, %ymm14, %ymm12
vmovaps %ymm11, %ymm14
vfmadd213ps %ymm5, %ymm0, %ymm14 # ymm14 = (ymm0 * ymm14) + ymm5
vmovaps %ymm10, %ymm15
vfmadd213ps %ymm4, %ymm0, %ymm15 # ymm15 = (ymm0 * ymm15) + ymm4
vmovaps %ymm13, %ymm16
vfmadd213ps %ymm3, %ymm0, %ymm16 # ymm16 = (ymm0 * ymm16) + ymm3
vmovaps %ymm8, %ymm17
vfmadd213ps %ymm7, %ymm1, %ymm17 # ymm17 = (ymm1 * ymm17) + ymm7
vfnmadd213ps %ymm5, %ymm0, %ymm11 # ymm11 = -(ymm0 * ymm11) + ymm5
vmovaps %ymm9, %ymm5
vfmadd213ps %ymm6, %ymm1, %ymm5 # ymm5 = (ymm1 * ymm5) + ymm6
vfnmadd213ps %ymm4, %ymm0, %ymm10 # ymm10 = -(ymm0 * ymm10) + ymm4
vmovaps %ymm12, %ymm4
vfmadd213ps %ymm2, %ymm1, %ymm4 # ymm4 = (ymm1 * ymm4) + ymm2
vfnmadd231ps %ymm13, %ymm0, %ymm3 # ymm3 = -(ymm0 * ymm13) + ymm3
vfnmadd213ps %ymm7, %ymm1, %ymm8 # ymm8 = -(ymm1 * ymm8) + ymm7
vfnmadd213ps %ymm6, %ymm1, %ymm9 # ymm9 = -(ymm1 * ymm9) + ymm6
vfnmadd231ps %ymm12, %ymm1, %ymm2 # ymm2 = -(ymm1 * ymm12) + ymm2
vsubps %ymm11, %ymm17, %ymm6
vsubps %ymm10, %ymm5, %ymm7
vsubps %ymm3, %ymm4, %ymm12
vmulps %ymm3, %ymm7, %ymm13
vfmsub231ps %ymm12, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm12) - ymm13
vmulps %ymm11, %ymm12, %ymm12
vfmsub231ps %ymm6, %ymm3, %ymm12 # ymm12 = (ymm3 * ymm6) - ymm12
vmulps %ymm6, %ymm10, %ymm6
vfmsub231ps %ymm7, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm7) - ymm6
vfmadd231ps %ymm12, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm12) + ymm6
vfmadd231ps %ymm13, %ymm19, %ymm6 # ymm6 = (ymm19 * ymm13) + ymm6
vcmpleps %ymm19, %ymm6, %k2
vmovaps %ymm14, %ymm8 {%k2}
vmovaps %ymm15, %ymm9 {%k2}
vmovaps %ymm16, %ymm2 {%k2}
vblendmps %ymm17, %ymm11, %ymm13 {%k2}
vblendmps %ymm5, %ymm10, %ymm14 {%k2}
vblendmps %ymm4, %ymm3, %ymm15 {%k2}
vmovaps %ymm11, %ymm17 {%k2}
vmovaps %ymm10, %ymm5 {%k2}
vmovaps %ymm3, %ymm4 {%k2}
vsubps %ymm8, %ymm17, %ymm6
vsubps %ymm9, %ymm5, %ymm5
vsubps %ymm2, %ymm4, %ymm11
vsubps %ymm13, %ymm8, %ymm7
vsubps %ymm14, %ymm9, %ymm10
vsubps %ymm15, %ymm2, %ymm12
vmulps %ymm8, %ymm11, %ymm4
vfmsub231ps %ymm6, %ymm2, %ymm4 # ymm4 = (ymm2 * ymm6) - ymm4
vmulps %ymm6, %ymm9, %ymm3
vfmsub231ps %ymm5, %ymm8, %ymm3 # ymm3 = (ymm8 * ymm5) - ymm3
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm2, %ymm5, %ymm4
vfmsub231ps %ymm11, %ymm9, %ymm4 # ymm4 = (ymm9 * ymm11) - ymm4
vfmadd231ps %ymm4, %ymm19, %ymm3 # ymm3 = (ymm19 * ymm4) + ymm3
vmulps %ymm13, %ymm12, %ymm16
vfmsub231ps %ymm15, %ymm7, %ymm16 # ymm16 = (ymm7 * ymm15) - ymm16
vmulps %ymm15, %ymm10, %ymm15
vfmsub231ps %ymm12, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm12) - ymm15
vmulps %ymm7, %ymm14, %ymm4
vfmsub231ps %ymm13, %ymm10, %ymm4 # ymm4 = (ymm10 * ymm13) - ymm4
vfmadd231ps %ymm16, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm16) + ymm4
vfmadd231ps %ymm15, %ymm19, %ymm4 # ymm4 = (ymm19 * ymm15) + ymm4
vmaxps %ymm4, %ymm3, %ymm13
vcmpleps %ymm19, %ymm13, %k0 {%k1}
kortestb %k0, %k0
vmovaps %ymm23, %ymm30
je 0x1c89110
vmulps %ymm5, %ymm12, %ymm13
vfmsub231ps %ymm11, %ymm10, %ymm13 # ymm13 = (ymm10 * ymm11) - ymm13
vmulps %ymm7, %ymm11, %ymm11
vfmsub231ps %ymm12, %ymm6, %ymm11 # ymm11 = (ymm6 * ymm12) - ymm11
vmulps %ymm6, %ymm10, %ymm10
vfmsub231ps %ymm5, %ymm7, %ymm10 # ymm10 = (ymm7 * ymm5) - ymm10
vxorps %xmm6, %xmm6, %xmm6
vfmadd213ps %ymm10, %ymm11, %ymm6 # ymm6 = (ymm11 * ymm6) + ymm10
vfmadd231ps %ymm19, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm19) + ymm6
vrcp14ps %ymm6, %ymm7
vmovaps %ymm7, %ymm5
vfnmadd213ps 0x2639a0(%rip){1to8}, %ymm6, %ymm5 # ymm5 = -(ymm6 * ymm5) + mem
vfmadd132ps %ymm7, %ymm7, %ymm5 # ymm5 = (ymm5 * ymm7) + ymm7
vmulps %ymm2, %ymm10, %ymm2
vfmadd231ps %ymm11, %ymm9, %ymm2 # ymm2 = (ymm9 * ymm11) + ymm2
vfmadd231ps %ymm13, %ymm8, %ymm2 # ymm2 = (ymm8 * ymm13) + ymm2
vmulps %ymm5, %ymm2, %ymm2
vcmpgeps 0x520(%rsp), %ymm2, %k1
vcmpleps 0x80(%rsi,%r15,4){1to8}, %ymm2, %k1 {%k1}
kandb %k0, %k1, %k0
kortestb %k0, %k0
je 0x1c89110
vcmpneqps %ymm19, %ymm6, %k1
ktestb %k1, %k0
je 0x1c89110
kandb %k1, %k0, %k0
kmovd %k0, %eax
vmulps %ymm5, %ymm3, %ymm3
vmulps %ymm5, %ymm4, %ymm4
vbroadcastss 0x26393f(%rip), %ymm6 # 0x1eec714
vsubps %ymm3, %ymm6, %ymm5
vmovaps %ymm3, %ymm5 {%k2}
vsubps %ymm4, %ymm6, %ymm3
vmovaps %ymm4, %ymm3 {%k2}
vmovaps %ymm3, 0x500(%rsp)
movzbl %al, %edx
vmovaps %ymm2, %ymm3
testw %dx, %dx
je 0x1c890ec
vsubps %ymm0, %ymm1, %ymm1
vfmadd213ps %ymm0, %ymm5, %ymm1 # ymm1 = (ymm5 * ymm1) + ymm0
vaddps %ymm1, %ymm1, %ymm0
movq 0x58(%rsp), %rax
vmulps (%rax,%r15,4){1to8}, %ymm0, %ymm0
vcmpnleps %ymm0, %ymm3, %k0
kmovd %k0, %eax
andb %al, %dl
je 0x1c890ec
vbroadcastss 0x267b95(%rip), %ymm0 # 0x1ef09cc
vmovaps 0x500(%rsp), %ymm1
vfmadd132ps 0x2681ae(%rip){1to8}, %ymm0, %ymm1 # ymm1 = (ymm1 * mem) + ymm0
vmovaps %ymm5, 0x340(%rsp)
vmovaps %ymm1, 0x500(%rsp)
vmovaps %ymm1, 0x360(%rsp)
vmovaps %ymm3, 0x380(%rsp)
movl %ebx, 0x3a0(%rsp)
movl %r11d, 0x3a4(%rsp)
vmovaps 0x60(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovaps 0xf0(%rsp), %xmm0
vmovaps %xmm0, 0x3c0(%rsp)
vmovaps 0xd0(%rsp), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vmovdqa 0xe0(%rsp), %xmm0
vmovdqa %xmm0, 0x3e0(%rsp)
movb %dl, 0x3f0(%rsp)
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq 0x30(%rsp), %rcx
movq (%rax,%rcx,8), %r9
movl 0x90(%rsi,%r15,4), %eax
testl %eax, 0x34(%r9)
je 0x1c890cb
vmovaps %ymm5, 0x120(%rsp)
vaddps 0x298041(%rip), %ymm5, %ymm0 # 0x1f20f40
vcvtsi2ss %ebx, %xmm18, %xmm1
vbroadcastss %xmm1, %ymm1
vaddps %ymm0, %ymm1, %ymm0
vmulps 0x320(%rsp), %ymm0, %ymm0
vmovaps %ymm0, 0x400(%rsp)
vmovaps 0x500(%rsp), %ymm0
vmovaps %ymm0, 0x420(%rsp)
vmovaps %ymm3, 0x440(%rsp)
kmovd %edx, %k1
vbroadcastss 0x262ad8(%rip), %ymm0 # 0x1eeba20
vmovaps %ymm3, 0x100(%rsp)
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %dl, %al
movzbl %al, %eax
movzbl %dl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r12d
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c89129
cmpq $0x0, 0x40(%r9)
jne 0x1c89129
vmovss 0x400(%rsp,%r12,4), %xmm0
vmovss 0x420(%rsp,%r12,4), %xmm1
vmovss 0x263750(%rip), %xmm2 # 0x1eec714
vsubss %xmm0, %xmm2, %xmm2
vmulss %xmm2, %xmm2, %xmm3
vmulss %xmm2, %xmm0, %xmm2
vmulss 0x268018(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2679fb(%rip), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vmulss %xmm0, %xmm0, %xmm5
vfmsub132ss 0x26800a(%rip), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) - xmm5
vmovss 0x267ff6(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm2, %xmm2
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0xe0(%rsp), %xmm5, %xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0xd0(%rsp), %xmm5, %xmm2 # xmm2 = (xmm2 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0xf0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm3 * mem) + xmm2
vbroadcastss %xmm4, %xmm2
vfmadd132ps 0x60(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vmovss 0x440(%rsp,%r12,4), %xmm3
vmovss %xmm3, 0x80(%rsi,%r15,4)
vmovss %xmm2, 0xc0(%rsi,%r15,4)
vextractps $0x1, %xmm2, 0xd0(%rsi,%r15,4)
vextractps $0x2, %xmm2, 0xe0(%rsi,%r15,4)
vmovss %xmm0, 0xf0(%rsi,%r15,4)
vmovss %xmm1, 0x100(%rsi,%r15,4)
movq 0x38(%rsp), %rax
movl %eax, 0x110(%rsi,%r15,4)
movq 0x30(%rsp), %rax
movl %eax, 0x120(%rsi,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%rsi,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%rsi,%r15,4)
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
leaq 0x49e212(%rip), %r12 # 0x21272e4
jmp 0x1c890ec
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
vmovaps %ymm23, %ymm30
vmovaps %ymm3, 0x100(%rsp)
vmovaps %ymm5, 0x120(%rsp)
addq $0x8, %rbx
cmpl %ebx, %r11d
jg 0x1c88844
jmp 0x1c88789
xorl %edx, %edx
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
jmp 0x1c88df9
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps 0x3d0(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x3e0(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
movq %r9, 0xa8(%rsp)
movq %r8, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movl %r11d, 0x14(%rsp)
vmovaps %ymm24, 0x1e0(%rsp)
vmovaps %ymm25, 0x1c0(%rsp)
vmovaps %ymm26, 0x1a0(%rsp)
vmovaps %ymm27, 0x180(%rsp)
vmovdqa64 %ymm31, 0x560(%rsp)
vmovss 0x80(%rsi,%r15,4), %xmm0
vmovss %xmm0, 0x230(%rsp)
vmovss 0x440(%rsp,%r12,4), %xmm0
vbroadcastss 0x400(%rsp,%r12,4), %xmm1
vbroadcastss 0x420(%rsp,%r12,4), %xmm2
vmovss %xmm0, 0x80(%rsi,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x26352a(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x267df2(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2677d5(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x267de4(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x267dd0(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x200(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x210(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x220(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd132ps 0x60(%rsp), %xmm3, %xmm0 # xmm0 = (xmm0 * mem) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x470(%rsp)
vmovaps %xmm4, 0x480(%rsp)
vmovaps %xmm0, 0x490(%rsp)
vmovaps %xmm1, 0x4a0(%rsp)
vmovaps %xmm2, 0x4b0(%rsp)
vmovaps 0x240(%rsp), %xmm0
vmovaps %xmm0, 0x4c0(%rsp)
vmovdqa 0xc0(%rsp), %xmm0
vmovdqa %xmm0, 0x4d0(%rsp)
leaq 0x4e0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4e0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4f0(%rsp)
vmovaps 0x310(%rsp), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x40(%rsp), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%r9), %rcx
movq %rcx, 0x80(%rsp)
movq %rax, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
leaq 0x470(%rsp), %rax
movq %rax, 0x98(%rsp)
movl $0x4, 0xa0(%rsp)
movq 0x40(%r9), %rax
testq %rax, %rax
movl %edx, 0xb0(%rsp)
je 0x1c893d3
leaq 0x78(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xa8(%rsp), %r9
movl 0xb0(%rsp), %edx
vmovdqa64 0x560(%rsp), %ymm31
vmovaps 0x180(%rsp), %ymm27
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x280(%rsp), %ymm30
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm20
movl 0x14(%rsp), %r11d
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a0340(%rip), %rdi # 0x2129704
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %r10
movq 0x28(%rsp), %r8
vmovdqa 0x40(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x120(%rsp), %ymm5
vmovaps 0x100(%rsp), %ymm3
je 0x1c89546
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c894b0
testb $0x2, (%rcx)
jne 0x1c8941c
testb $0x40, 0x3e(%r9)
je 0x1c894b0
leaq 0x78(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xa8(%rsp), %r9
movl 0xb0(%rsp), %edx
vmovdqa64 0x560(%rsp), %ymm31
vmovaps 0x180(%rsp), %ymm27
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x280(%rsp), %ymm30
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm20
movl 0x14(%rsp), %r11d
vmovaps 0x100(%rsp), %ymm3
vmovaps 0x120(%rsp), %ymm5
vxorps %xmm19, %xmm19, %xmm19
leaq 0x4a0263(%rip), %rdi # 0x2129704
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %r10
movq 0x28(%rsp), %r8
vmovdqa 0x40(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c89546
movq 0x90(%rsp), %rax
movq 0x98(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c89559
vmovd 0x230(%rsp), %xmm0
vmovd %xmm0, 0x80(%rsi,%r15,4)
movl $0x1, %eax
shlxl %r12d, %eax, %eax
kmovd %eax, %k0
movzbl %dl, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%rsi,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %edx
ktestb %k1, %k0
je 0x1c895d3
kmovd %edx, %k1
vbroadcastss 0x26248a(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %dl, %al
movzbl %al, %eax
movzbl %dl, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %r12d
testb %dl, %dl
jne 0x1c891a3
jmp 0x1c890cb
xorl %r12d, %r12d
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm3
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x180(%rsp), %ymm27
vmovaps 0x260(%rsp), %xmm8
jmp 0x1c8846a
movq 0x30(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0xb0(%rsp)
movq 0x38(%rsp), %rax
vpbroadcastd %eax, %xmm0
vmovdqa %xmm0, 0x230(%rsp)
vmovaps 0x3c0(%rsp), %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps 0x3d0(%rsp), %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps 0x3e0(%rsp), %xmm0
vmovaps %xmm0, 0x200(%rsp)
movq %r8, 0x28(%rsp)
movq %r10, 0x20(%rsp)
movq %rsi, 0x18(%rsp)
movl %r11d, 0x14(%rsp)
vmovss 0x80(%rsi,%r15,4), %xmm0
vmovss %xmm0, 0x240(%rsp)
vmovss 0x440(%rsp,%rdx,4), %xmm0
vbroadcastss 0x400(%rsp,%rdx,4), %xmm1
vbroadcastss 0x420(%rsp,%rdx,4), %xmm2
vmovss %xmm0, 0x80(%rsi,%r15,4)
movq 0x8(%r10), %rax
vmovss 0x263039(%rip), %xmm0 # 0x1eec714
vsubss %xmm1, %xmm0, %xmm0
vmulss %xmm0, %xmm0, %xmm3
vmulss %xmm0, %xmm1, %xmm0
vmulss 0x267901(%rip), %xmm3, %xmm4 # 0x1ef0ff0
vfmadd231ss 0x2672e4(%rip), %xmm0, %xmm3 # xmm3 = (xmm0 * mem) + xmm3
vmulss %xmm1, %xmm1, %xmm5
vfmsub132ss 0x2678f3(%rip), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) - xmm5
vmovss 0x2678df(%rip), %xmm6 # 0x1ef0fec
vmulss %xmm6, %xmm3, %xmm3
vmulss %xmm6, %xmm0, %xmm0
vmulss %xmm6, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps 0x200(%rsp), %xmm5, %xmm5
vbroadcastss %xmm0, %xmm0
vfmadd132ps 0x210(%rsp), %xmm5, %xmm0 # xmm0 = (xmm0 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x220(%rsp), %xmm0, %xmm3 # xmm3 = (xmm3 * mem) + xmm0
vbroadcastss %xmm4, %xmm0
vfmadd132ps 0x60(%rsp), %xmm3, %xmm0 # xmm0 = (xmm0 * mem) + xmm3
vbroadcastss %xmm0, %xmm3
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps %xmm3, 0x470(%rsp)
vmovaps %xmm4, 0x480(%rsp)
vmovaps %xmm0, 0x490(%rsp)
vmovaps %xmm1, 0x4a0(%rsp)
vmovaps %xmm2, 0x4b0(%rsp)
vmovaps 0x230(%rsp), %xmm0
vmovaps %xmm0, 0x4c0(%rsp)
vmovdqa 0xb0(%rsp), %xmm0
vmovdqa %xmm0, 0x4d0(%rsp)
leaq 0x4e0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x4e0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x4f0(%rsp)
vmovaps 0x310(%rsp), %xmm0
vmovaps %xmm0, 0x40(%rsp)
leaq 0x40(%rsp), %rcx
movq %rcx, 0x78(%rsp)
movq 0x18(%rbx), %rcx
movq %rcx, 0x80(%rsp)
movq %rax, 0x88(%rsp)
movq %rsi, 0x90(%rsp)
leaq 0x470(%rsp), %rax
movq %rax, 0x98(%rsp)
movl $0x4, 0xa0(%rsp)
movq 0x40(%rbx), %rax
testq %rax, %rax
movq %rdx, 0xc0(%rsp)
je 0x1c898bb
leaq 0x78(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc0(%rsp), %rdx
vmovaps 0x260(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm27
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x280(%rsp), %ymm30
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm20
movl 0x14(%rsp), %r11d
vxorps %xmm19, %xmm19, %xmm19
leaq 0x49fe58(%rip), %rdi # 0x2129704
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %r10
movq 0x28(%rsp), %r8
vmovdqa 0x40(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
vmovaps 0x160(%rsp), %ymm5
vmovaps 0x140(%rsp), %ymm3
je 0x1c89a27
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c89991
testb $0x2, (%rcx)
jne 0x1c89903
testb $0x40, 0x3e(%rbx)
je 0x1c89991
leaq 0x78(%rsp), %rdi
vzeroupper
callq *%rax
movq 0xc0(%rsp), %rdx
vmovaps 0x260(%rsp), %xmm8
vmovaps 0x180(%rsp), %ymm27
vmovaps 0x1a0(%rsp), %ymm26
vmovaps 0x1c0(%rsp), %ymm25
vmovaps 0x1e0(%rsp), %ymm24
vmovaps 0x280(%rsp), %ymm30
vmovaps 0x2a0(%rsp), %ymm22
vmovaps 0x2c0(%rsp), %ymm21
vmovaps 0x2e0(%rsp), %ymm20
movl 0x14(%rsp), %r11d
vmovaps 0x140(%rsp), %ymm3
vmovaps 0x160(%rsp), %ymm5
vxorps %xmm19, %xmm19, %xmm19
leaq 0x49fd82(%rip), %rdi # 0x2129704
movq 0x18(%rsp), %rsi
movq 0x20(%rsp), %r10
movq 0x28(%rsp), %r8
vmovdqa 0x40(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c89a27
movq 0x90(%rsp), %rax
movq 0x98(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c89a3a
vmovd 0x240(%rsp), %xmm0
vmovd %xmm0, 0x80(%rsi,%r15,4)
movl $0x1, %eax
shlxl %edx, %eax, %eax
kmovd %eax, %k0
movzbl %r12b, %eax
kmovd %eax, %k1
kandnb %k1, %k0, %k0
vcmpleps 0x80(%rsi,%r15,4){1to8}, %ymm3, %k1
kandb %k1, %k0, %k2
kmovd %k2, %r12d
ktestb %k1, %k0
je 0x1c89ab7
kmovd %r12d, %k1
vbroadcastss 0x261fa7(%rip), %ymm0 # 0x1eeba20
vblendmps %ymm3, %ymm0, %ymm0 {%k1}
vshufps $0xb1, %ymm0, %ymm0, %ymm1 # ymm1 = ymm0[1,0,3,2,5,4,7,6]
vminps %ymm1, %ymm0, %ymm1
vshufpd $0x5, %ymm1, %ymm1, %ymm2 # ymm2 = ymm1[1,0,3,2]
vminps %ymm2, %ymm1, %ymm1
vpermpd $0x4e, %ymm1, %ymm2 # ymm2 = ymm1[2,3,0,1]
vminps %ymm2, %ymm1, %ymm1
vcmpeqps %ymm1, %ymm0, %k0
kmovd %k0, %eax
andb %r12b, %al
movzbl %al, %eax
movzbl %r12b, %ecx
cmovnel %eax, %ecx
tzcntl %ecx, %edx
testb %r12b, %r12b
jne 0x1c89695
jmp 0x1c88730
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersector1<8>::intersect_hn<embree::avx512::OrientedCurve1Intersector1<embree::HermiteCurveT, 7, 8>, embree::avx512::Intersect1Epilog1<true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayHitK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_hn(const Precalculations& pre, RayHit& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
Intersector().intersect(pre,ray,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x348, %rsp # imm = 0x348
movq %rcx, %r9
movzbl 0x1(%rcx), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %r8
vbroadcastss 0x12(%r9,%r8), %xmm0
movq %rdx, %r10
movq %rsi, %r15
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%r8), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps 0x10(%rsi), %xmm0, %xmm0
vpmovsxbd 0x6(%r9,%rcx,4), %ymm1
vpmovsxbd 0x6(%r9,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r9,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r8
vpmovsxbd 0x6(%r9,%r8), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r9,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x27e7c8(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x28cf96(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm27, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x28cf0b(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x25d01d(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x25870b(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r8
subq %rcx, %r8
vpmovsxwd 0x6(%r9,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r9,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r9,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r9,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r9,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0xc(%r15){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x28be18(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x20(%r15){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x28bdf4(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2c67f6(%rip), %ymm7, %k0 # 0x1f5a920
vmovups %ymm3, 0x2c0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c95d48
movq %rdi, %rbp
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
vbroadcastss 0x25ce8f(%rip), %xmm31 # 0x1ef0fec
vbroadcastss 0x28cd5d(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x28cd4f(%rip), %xmm18 # 0x1f20ec0
leaq 0x10(%rsp), %r11
tzcntq %r12, %rax
movl 0x2(%r9), %r13d
movl 0x6(%r9,%rax,4), %ecx
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r13,8), %rax
movq %rcx, 0x58(%rsp)
imulq 0x68(%rax), %rcx
movq 0x58(%rax), %rdx
movq 0x90(%rax), %rsi
movl (%rdx,%rcx), %ecx
movq 0xa0(%rax), %r8
movq %r8, %rdx
imulq %rcx, %rdx
vmovaps (%rsi,%rdx), %xmm4
leaq 0x1(%rcx), %rdx
imulq %rdx, %r8
vmovaps (%rsi,%r8), %xmm3
movq 0xc8(%rax), %rsi
movq 0xd8(%rax), %rbx
movq %rbx, %r8
imulq %rcx, %r8
vmovups (%rsi,%r8), %xmm5
imulq %rdx, %rbx
vmovups (%rsi,%rbx), %xmm6
movq 0x100(%rax), %rsi
movq 0x110(%rax), %rbx
movq %rbx, %r8
imulq %rcx, %r8
vmovaps (%rsi,%r8), %xmm7
imulq %rdx, %rbx
vmovaps (%rsi,%rbx), %xmm8
movq 0x148(%rax), %rsi
imulq %rsi, %rcx
imulq %rdx, %rsi
movq 0x138(%rax), %rax
vmovups (%rax,%rcx), %xmm9
vbroadcastss 0x25dc84(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm4, %xmm7 # xmm7 = (xmm7 * xmm22) + xmm4
vfnmadd132ps %xmm22, %xmm3, %xmm8 # xmm8 = -(xmm8 * xmm22) + xmm3
vmovups (%rax,%rsi), %xmm10
vfmadd132ps %xmm22, %xmm5, %xmm9 # xmm9 = (xmm9 * xmm22) + xmm5
vfnmadd132ps %xmm22, %xmm6, %xmm10 # xmm10 = -(xmm10 * xmm22) + xmm6
vxorps %xmm19, %xmm19, %xmm19
vmulps %xmm19, %xmm3, %xmm0
vfmadd231ps %xmm19, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm19) + xmm0
vxorps %xmm1, %xmm1, %xmm1
vfmadd213ps %xmm0, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm1) + xmm0
vaddps %xmm1, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm31) + xmm0
vfnmadd231ps %xmm31, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm31) + xmm0
vmulps %xmm19, %xmm6, %xmm11
vfmadd231ps %xmm19, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm19) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm11, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm2) + xmm11
vaddps %xmm2, %xmm5, %xmm12
vfmadd231ps %xmm31, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm31) + xmm11
vfnmadd231ps %xmm31, %xmm5, %xmm11 # xmm11 = -(xmm5 * xmm31) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm3, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm3
vfmadd231ps %xmm19, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm19) + xmm2
vfmadd231ps %xmm19, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm19) + xmm2
vmulps %xmm31, %xmm3, %xmm3
vfnmadd231ps %xmm8, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm19, %xmm3 # xmm3 = (xmm19 * xmm7) + xmm3
vfnmadd231ps %xmm4, %xmm19, %xmm3 # xmm3 = -(xmm19 * xmm4) + xmm3
vxorps %xmm4, %xmm4, %xmm4
vfmadd213ps %xmm6, %xmm10, %xmm4 # xmm4 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm19, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm19) + xmm4
vfmadd231ps %xmm19, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm19) + xmm4
vmulps %xmm31, %xmm6, %xmm7
vfnmadd231ps %xmm10, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm10) + xmm7
vfmadd231ps %xmm9, %xmm19, %xmm7 # xmm7 = (xmm19 * xmm9) + xmm7
vfnmadd231ps %xmm5, %xmm19, %xmm7 # xmm7 = -(xmm19 * xmm5) + xmm7
vshufps $0xc9, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm0, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,2,0,3]
vmulps %xmm0, %xmm8, %xmm8
vfmsub231ps %xmm11, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm11) - xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,2,0,3]
vmulps %xmm3, %xmm9, %xmm9
vfmsub231ps %xmm4, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm4) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm4 # xmm4 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[1,2,0,3]
vmulps %xmm3, %xmm9, %xmm9
vfmsub231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm6, %xmm6, %xmm7
vmovss %xmm7, %xmm19, %xmm9 # xmm9 = xmm7[0],xmm19[1,2,3]
vrsqrt14ss %xmm9, %xmm19, %xmm10
vmovss 0x2583a4(%rip), %xmm14 # 0x1eec718
vmulss %xmm14, %xmm10, %xmm11
vmovss 0x2587ff(%rip), %xmm15 # 0x1eecb80
vmulss %xmm7, %xmm15, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vdpps $0x7f, %xmm8, %xmm6, %xmm11
vbroadcastss %xmm10, %xmm10
vmulps %xmm6, %xmm10, %xmm12
vbroadcastss %xmm7, %xmm13
vmulps %xmm8, %xmm13, %xmm8
vbroadcastss %xmm11, %xmm11
vmulps %xmm6, %xmm11, %xmm6
vsubps %xmm6, %xmm8, %xmm6
vrcp14ss %xmm9, %xmm19, %xmm8
vmovss 0x25cc29(%rip), %xmm16 # 0x1ef0ff8
vfnmadd213ss %xmm16, %xmm8, %xmm7 # xmm7 = -(xmm8 * xmm7) + xmm16
vmulss %xmm7, %xmm8, %xmm7
vbroadcastss %xmm7, %xmm7
vmulps %xmm6, %xmm7, %xmm6
vmulps %xmm6, %xmm10, %xmm6
vdpps $0x7f, %xmm4, %xmm4, %xmm7
vmovss %xmm7, %xmm19, %xmm8 # xmm8 = xmm7[0],xmm19[1,2,3]
vrsqrt14ss %xmm8, %xmm19, %xmm9
vmulss %xmm14, %xmm9, %xmm10
vmulss %xmm7, %xmm15, %xmm11
vmulss %xmm9, %xmm11, %xmm11
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm10, %xmm9
vbroadcastss %xmm9, %xmm9
vdpps $0x7f, %xmm5, %xmm4, %xmm10
vmulps %xmm4, %xmm9, %xmm11
vbroadcastss %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vbroadcastss %xmm10, %xmm10
vmulps %xmm4, %xmm10, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm8, %xmm19, %xmm5
vfnmadd213ss %xmm16, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm16
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vmulps %xmm4, %xmm9, %xmm4
vshufps $0xff, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm7
vsubps %xmm7, %xmm1, %xmm13
vshufps $0xff, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm6, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vsubps %xmm5, %xmm0, %xmm6
vaddps %xmm7, %xmm1, %xmm14
vaddps %xmm5, %xmm0, %xmm0
vshufps $0xff, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[3,3,3,3]
vmulps %xmm1, %xmm11, %xmm5
vsubps %xmm5, %xmm2, %xmm15
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vmulps %xmm7, %xmm11, %xmm7
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm7, %xmm1
vsubps %xmm1, %xmm3, %xmm4
vaddps %xmm5, %xmm2, %xmm16
vaddps %xmm1, %xmm3, %xmm1
vmulps %xmm22, %xmm6, %xmm2
vaddps %xmm2, %xmm13, %xmm19
vmulps %xmm22, %xmm4, %xmm2
vsubps %xmm2, %xmm15, %xmm20
vmulps %xmm22, %xmm0, %xmm0
vaddps %xmm0, %xmm14, %xmm21
vmulps %xmm22, %xmm1, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vmovaps (%r15), %xmm4
vsubps %xmm4, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x10(%rbp), %xmm3
vmovaps 0x20(%rbp), %xmm5
vmovaps 0x30(%rbp), %xmm7
vmulps %xmm0, %xmm7, %xmm0
vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm4, %xmm19, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm6, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm6) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm4, %xmm20, %xmm2
vbroadcastss %xmm2, %xmm6
vshufps $0x55, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm6, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm6) + xmm2
vsubps %xmm4, %xmm15, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm8) + xmm6
vsubps %xmm4, %xmm14, %xmm8
vbroadcastss %xmm8, %xmm9
vshufps $0x55, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,1,1,1]
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm7, %xmm8, %xmm8
vfmadd231ps %xmm10, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm10) + xmm8
vfmadd231ps %xmm9, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm9) + xmm8
vsubps %xmm4, %xmm21, %xmm9
vbroadcastss %xmm9, %xmm10
vshufps $0x55, %xmm9, %xmm9, %xmm11 # xmm11 = xmm9[1,1,1,1]
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm9
vfmadd231ps %xmm11, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm11) + xmm9
vfmadd231ps %xmm10, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm10) + xmm9
vsubps %xmm4, %xmm22, %xmm10
vbroadcastss %xmm10, %xmm11
vshufps $0x55, %xmm10, %xmm10, %xmm12 # xmm12 = xmm10[1,1,1,1]
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm12, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm12) + xmm10
vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10
vsubps %xmm4, %xmm16, %xmm4
vbroadcastss %xmm4, %xmm11
vshufps $0x55, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmulps %xmm4, %xmm7, %xmm4
vfmadd231ps %xmm12, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm12) + xmm4
vfmadd231ps %xmm11, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm11) + xmm4
vmovlhps %xmm8, %xmm0, %xmm29 # xmm29 = xmm0[0],xmm8[0]
vmovlhps %xmm9, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm9[0]
vmovlhps %xmm10, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm10[0]
vmovlhps %xmm4, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm4[0]
vminps %xmm12, %xmm29, %xmm3
vmaxps %xmm12, %xmm29, %xmm5
vminps %xmm24, %xmm23, %xmm7
vminps %xmm7, %xmm3, %xmm3
vmaxps %xmm24, %xmm23, %xmm7
vmaxps %xmm7, %xmm5, %xmm5
vshufpd $0x3, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1]
vminps %xmm7, %xmm3, %xmm3
vmaxps %xmm11, %xmm5, %xmm5
vandps %xmm17, %xmm3, %xmm3
vandps %xmm17, %xmm5, %xmm5
vmaxps %xmm5, %xmm3, %xmm3
vmovshdup %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm5, %xmm3
leaq 0xff(%r12), %r8
vmulss 0x25d83c(%rip), %xmm3, %xmm3 # 0x1ef1eb8
vmovddup %xmm0, %xmm7 # xmm7 = xmm0[0,0]
vmovddup %xmm1, %xmm11 # xmm11 = xmm1[0,0]
vmovddup %xmm2, %xmm17 # xmm17 = xmm2[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm8, %xmm5 # xmm5 = xmm8[0,0]
vmovddup %xmm9, %xmm8 # xmm8 = xmm9[0,0]
vmovddup %xmm10, %xmm9 # xmm9 = xmm10[0,0]
vmovddup %xmm4, %xmm10 # xmm10 = xmm4[0,0]
vmovaps %xmm3, 0x170(%rsp)
vbroadcastss %xmm3, %ymm28
vxorps %xmm18, %xmm28, %xmm0
vbroadcastss %xmm0, %ymm30
vsubps %xmm29, %xmm12, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps %xmm12, 0x90(%rsp)
vsubps %xmm12, %xmm23, %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps %xmm23, 0x80(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps %xmm13, 0x160(%rsp)
vmovaps %xmm14, 0x150(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x280(%rsp)
vmovaps %xmm19, 0x120(%rsp)
vmovaps %xmm21, 0x100(%rsp)
vsubps %xmm19, %xmm21, %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovaps %xmm20, 0x110(%rsp)
vmovaps %xmm22, 0xf0(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x260(%rsp)
vmovaps %xmm15, 0x140(%rsp)
vmovaps %xmm16, 0x130(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x250(%rsp)
xorl %ebx, %ebx
vmovsd 0x257f6c(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm14
vmovaps %xmm29, 0x20(%rsp)
vmovaps %xmm7, 0x200(%rsp)
vmovaps %xmm11, 0x1f0(%rsp)
vmovaps %xmm17, 0x1e0(%rsp)
vmovaps %xmm6, 0x1d0(%rsp)
vmovaps %xmm5, 0x1c0(%rsp)
vmovaps %xmm8, 0x1b0(%rsp)
vmovaps %xmm9, 0x1a0(%rsp)
vmovaps %xmm10, 0x190(%rsp)
vmovups %ymm28, 0x300(%rsp)
vmovups %ymm30, 0x2e0(%rsp)
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vbroadcastss 0x257f18(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm11, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm11) + xmm4
vfmadd231ps %xmm17, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm17) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x28c69b(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x27deb3(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x28c64a(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x28c661(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x25c699(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2cb3a9(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm28, %ymm18, %k1
vcmpnltps %ymm30, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c94c5f
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm30, %ymm1, %k1
vcmpleps %ymm28, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c94c7f
movl %ebx, %eax
movl %ecx, 0x210(%rsp,%rax,4)
vmovlps %xmm0, 0x2a0(%rsp,%rax,8)
vmovlps %xmm26, 0x320(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x28c23b(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x28c22d(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x25c34f(%rip), %ymm19 # 0x1ef0fec
vmovss 0x28c239(%rip), %xmm20 # 0x1f20ee0
vmovss 0x257a63(%rip), %xmm21 # 0x1eec714
vmovss 0x25c345(%rip), %xmm22 # 0x1ef1000
vmovss 0x25cd87(%rip), %xmm23 # 0x1ef1a4c
vbroadcastss 0x257a45(%rip), %xmm24 # 0x1eec714
vmovss 0x25d1e3(%rip), %xmm25 # 0x1ef1ebc
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x70(%rsp), %xmm28
vmovaps 0x60(%rsp), %xmm30
testl %ebx, %ebx
je 0x1c95d27
leal -0x1(%rbx), %eax
vmovss 0x2a0(%rsp,%rax,8), %xmm0
vmovss 0x2a4(%rsp,%rax,8), %xmm1
movl 0x210(%rsp,%rax,4), %ecx
vmovsd 0x320(%rsp,%rax,8), %xmm14
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x210(%rsp,%rax,4)
cmovel %eax, %ebx
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm2
vmulss %xmm20, %xmm2, %xmm2
incq %rdx
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm3
vmulss %xmm20, %xmm3, %xmm3
vsubss %xmm2, %xmm21, %xmm4
vmulss %xmm2, %xmm1, %xmm15
vfmadd231ss %xmm4, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm4) + xmm15
vsubss %xmm3, %xmm21, %xmm2
vmulss %xmm3, %xmm1, %xmm7
vfmadd231ss %xmm2, %xmm0, %xmm7 # xmm7 = (xmm0 * xmm2) + xmm7
vsubss %xmm15, %xmm7, %xmm0
vucomiss %xmm0, %xmm22
jbe 0x1c95cc5
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vucomiss %xmm0, %xmm23
seta %cl
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm24, %xmm2
vmulps 0x1c0(%rsp), %xmm1, %xmm3
vmulps 0x1b0(%rsp), %xmm1, %xmm4
vmulps 0x1a0(%rsp), %xmm1, %xmm5
vmulps 0x190(%rsp), %xmm1, %xmm1
vfmadd231ps 0x200(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x1f0(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps 0x1e0(%rsp), %xmm2, %xmm5 # xmm5 = (xmm2 * mem) + xmm5
vfmadd231ps 0x1d0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm15, 0x30(%rsp)
vbroadcastss %xmm15, %xmm6
vmovaps %xmm7, (%rsp)
vbroadcastss %xmm7, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm19, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss %xmm25, %xmm0, %xmm5
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x25ba9d(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c94f50
vmovss 0x25cf76(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c94fb1
vmovss 0x25cf68(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x25ba71(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c94fb1
vmovss 0x25cf36(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x25ba3f(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c959b0
vcmpltss %xmm16, %xmm13, %k1
vmovaps %xmm21, %xmm15
vmovss 0x25ba04(%rip), %xmm16 # 0x1ef09cc
vxorps %xmm11, %xmm11, %xmm11
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm11, %xmm9, %k1
vmovaps %xmm21, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x256a1f(%rip), %xmm7 # 0x1eeba20
vmovss %xmm11, %xmm7, %xmm7 {%k1}
vmovss 0x257b75(%rip), %xmm8 # 0x1eecb84
vmovss %xmm11, %xmm8, %xmm8 {%k1}
vcmpltss %xmm11, %xmm14, %k1
vmovaps %xmm21, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c95031
jnp 0x1c95076
vucomiss %xmm13, %xmm14
jne 0x1c9507e
jp 0x1c9507e
vxorps %xmm16, %xmm16, %xmm16
vucomiss %xmm16, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2569c4(%rip), %xmm13 # 0x1eeba20
vmovss %xmm16, %xmm13, %xmm13 {%k1}
vmovss 0x257b1a(%rip), %xmm14 # 0x1eecb84
vmovss 0x2576a0(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c950a5
vxorps %xmm16, %xmm16, %xmm16
jmp 0x1c950af
vxorps %xmm18, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm21, %xmm13
vxorps %xmm16, %xmm16, %xmm16
vfmadd213ss %xmm14, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0x90(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm15
vcmpltss %xmm16, %xmm10, %k1
vmovaps %xmm21, %xmm13
vmovss 0x25b8f4(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
jne 0x1c950e1
jnp 0x1c9514b
vucomiss %xmm9, %xmm10
jne 0x1c95120
jp 0x1c95120
vucomiss %xmm16, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x25691a(%rip), %xmm9 # 0x1eeba20
vmovss %xmm16, %xmm9, %xmm9 {%k1}
vmovss 0x257a70(%rip), %xmm10 # 0x1eecb84
vmovss 0x2575f6(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c95141
vxorps %xmm18, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm21, %xmm9
vfmadd213ss %xmm10, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm21, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm21, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
movb $0x1, %r14b
vucomiss %xmm8, %xmm7
ja 0x1c959b3
vaddss 0x2c82d0(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x2579fc(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm24, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm21, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm21, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm21, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm31, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm31, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm31, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm21, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x30(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vmovaps (%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x25788e(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm29, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm29
vmovaps %xmm30, %xmm12
vfmadd213ps %xmm14, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm14
vmovaps 0x290(%rsp), %xmm13
vfmadd213ps %xmm15, %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + xmm15
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm31, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x28bb44(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2ca962(%rip), %xmm29 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm29, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2c55c2(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x28bab3(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x28ba4c(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm14 # xmm14 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c95995
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps %xmm26, %xmm15
jbe 0x1c955bc
testb %sil, %sil
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25ba7f(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x20(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
je 0x1c955f6
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c955f6
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25ba22(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x20(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c959ab
movl $0xc8, %eax
vsubss %xmm0, %xmm21, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm29, %xmm6 # xmm6 = (xmm29 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm17, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm13
ja 0x1c956ab
decq %rax
jne 0x1c95607
jmp 0x1c95982
vucomiss %xmm16, %xmm0
jb 0x1c95982
vucomiss %xmm0, %xmm21
jb 0x1c95982
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm16, %xmm1
jb 0x1c95982
vucomiss %xmm1, %xmm21
jb 0x1c95982
vmovss 0x18(%rbp), %xmm2
vinsertps $0x1c, 0x28(%rbp), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x38(%rbp), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vmovaps (%r15), %xmm3
vmovaps 0x160(%rsp), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vdpps $0x7f, %xmm2, %xmm4, %xmm4
vmovaps 0x120(%rsp), %xmm5
vsubps %xmm3, %xmm5, %xmm5
vdpps $0x7f, %xmm2, %xmm5, %xmm5
vmovaps 0x110(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm6
vdpps $0x7f, %xmm2, %xmm6, %xmm6
vmovaps 0x140(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vdpps $0x7f, %xmm2, %xmm7, %xmm7
vmovaps 0x150(%rsp), %xmm8
vsubps %xmm3, %xmm8, %xmm8
vdpps $0x7f, %xmm2, %xmm8, %xmm8
vmovaps 0x100(%rsp), %xmm9
vsubps %xmm3, %xmm9, %xmm9
vdpps $0x7f, %xmm2, %xmm9, %xmm9
vmovaps 0xf0(%rsp), %xmm10
vsubps %xmm3, %xmm10, %xmm10
vdpps $0x7f, %xmm2, %xmm10, %xmm10
vmovaps 0x130(%rsp), %xmm12
vsubps %xmm3, %xmm12, %xmm3
vdpps $0x7f, %xmm2, %xmm3, %xmm2
vsubss %xmm1, %xmm21, %xmm3
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm1, %xmm10, %xmm10
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm4, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm6) + xmm10
vfmadd231ss %xmm7, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm7) + xmm1
vsubss %xmm0, %xmm21, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm10, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm10) + xmm1
vfmadd231ss %xmm9, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm8) + xmm1
vucomiss 0xc(%r15), %xmm1
jb 0x1c95982
vmovss 0x20(%r15), %xmm12
vucomiss %xmm1, %xmm12
jb 0x1c95982
movq %r12, 0x18(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm24, %xmm8
vmulps 0x150(%rsp), %xmm7, %xmm9
vmulps 0x100(%rsp), %xmm7, %xmm10
vmulps 0xf0(%rsp), %xmm7, %xmm11
vmulps 0x130(%rsp), %xmm7, %xmm7
vfmadd231ps 0x160(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x120(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x110(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x140(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps %xmm31, %xmm7, %xmm6
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %r13, %r12
movq (%rax,%r13,8), %r13
movl 0x24(%r15), %eax
testl %eax, 0x34(%r13)
je 0x1c9597a
vbroadcastss %xmm5, %xmm5
vmulps 0x250(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x260(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x270(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x280(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps (%rsp), %xmm7
jne 0x1c959c1
cmpq $0x0, 0x40(%r13)
jne 0x1c959c1
vmovss %xmm1, 0x20(%r15)
vshufps $0xe9, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[1,2,2,3]
vmovlps %xmm1, 0x30(%r15)
vmovss %xmm2, 0x38(%r15)
vmovlps %xmm0, 0x3c(%r15)
movq 0x58(%rsp), %rax
movl %eax, 0x44(%r15)
movq %r12, %r13
movl %r13d, 0x48(%r15)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x4c(%r15)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x50(%r15)
movq 0x18(%rsp), %r12
jmp 0x1c95987
movq %r12, %r13
movq 0x18(%rsp), %r12
vmovaps (%rsp), %xmm7
testb %r14b, %r14b
jne 0x1c94cef
jmp 0x1c95cc5
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x20(%rsp), %xmm29
vmovaps %xmm26, %xmm15
jmp 0x1c95982
xorl %r14d, %r14d
jmp 0x1c95982
movb $0x1, %r14b
vmovaps %xmm26, %xmm14
vmovaps 0x30(%rsp), %xmm15
jmp 0x1c95982
movq 0x8(%r10), %rax
vshufps $0xe9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,2,3]
vmovlps %xmm3, 0x220(%rsp)
vmovss %xmm2, 0x228(%rsp)
vmovlps %xmm0, 0x22c(%rsp)
movq 0x58(%rsp), %rcx
movl %ecx, 0x234(%rsp)
movl %r12d, 0x238(%rsp)
movl (%rax), %ecx
movl %ecx, 0x23c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x240(%rsp)
vmovss %xmm1, 0x20(%r15)
movl $0xffffffff, 0x10(%rsp) # imm = 0xFFFFFFFF
movq %r11, 0xb0(%rsp)
movq 0x18(%r13), %rax
movq %rax, 0xb8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xc0(%rsp)
movq %r15, 0xc8(%rsp)
leaq 0x220(%rsp), %rax
movq %rax, 0xd0(%rsp)
movl $0x1, 0xd8(%rsp)
movq 0x40(%r13), %rax
testq %rax, %rax
movq %r9, 0x50(%rsp)
movq %r8, 0x48(%rsp)
vmovaps %xmm14, 0xe0(%rsp)
vmovss %xmm12, 0x14(%rsp)
je 0x1c95b66
leaq 0xb0(%rsp), %rdi
movq %r10, 0xa8(%rsp)
vzeroupper
callq *%rax
vmovss 0x14(%rsp), %xmm12
vmovaps (%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
vmovaps 0x60(%rsp), %xmm30
vmovaps 0x70(%rsp), %xmm28
movq 0x48(%rsp), %r8
vmovaps 0x20(%rsp), %xmm29
leaq 0x10(%rsp), %r11
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25c3d8(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x256c26(%rip), %xmm24 # 0x1eec714
vmovss 0x25bf54(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x25b4fe(%rip), %xmm22 # 0x1ef1000
vmovss 0x256c08(%rip), %xmm21 # 0x1eec714
vmovss 0x28b3ca(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x25b4cc(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x28b396(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x28b390(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x25b4ae(%rip), %xmm31 # 0x1ef0fec
vbroadcastss 0x28b394(%rip), %ymm27 # 0x1f20edc
movq 0xa8(%rsp), %r10
movq 0x50(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c95cb7
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c95c5c
testb $0x2, (%rcx)
jne 0x1c95b87
testb $0x40, 0x3e(%r13)
je 0x1c95c4f
leaq 0xb0(%rsp), %rdi
movq %r10, %r13
vzeroupper
callq *%rax
vmovss 0x14(%rsp), %xmm12
vmovaps (%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm14
vmovaps 0x60(%rsp), %xmm30
vmovaps 0x70(%rsp), %xmm28
movq 0x48(%rsp), %r8
vmovaps 0x20(%rsp), %xmm29
leaq 0x10(%rsp), %r11
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25c2d9(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x256b27(%rip), %xmm24 # 0x1eec714
vmovss 0x25be55(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x25b3ff(%rip), %xmm22 # 0x1ef1000
vmovss 0x256b09(%rip), %xmm21 # 0x1eec714
vmovss 0x28b2cb(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x25b3cd(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x28b297(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x28b291(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x25b3af(%rip), %xmm31 # 0x1ef0fec
vbroadcastss 0x28b295(%rip), %ymm27 # 0x1f20edc
movq %r13, %r10
movq 0x50(%rsp), %r9
movq 0xb0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c95cb7
movq 0xc8(%rsp), %rax
movq 0xd0(%rsp), %rcx
vmovss (%rcx), %xmm0
vmovss %xmm0, 0x30(%rax)
vmovss 0x4(%rcx), %xmm0
vmovss %xmm0, 0x34(%rax)
vmovss 0x8(%rcx), %xmm0
vmovss %xmm0, 0x38(%rax)
vmovss 0xc(%rcx), %xmm0
vmovss %xmm0, 0x3c(%rax)
vmovss 0x10(%rcx), %xmm0
vmovss %xmm0, 0x40(%rax)
movl 0x14(%rcx), %edx
movl %edx, 0x44(%rax)
movl 0x18(%rcx), %edx
movl %edx, 0x48(%rax)
movl 0x1c(%rcx), %edx
movl %edx, 0x4c(%rax)
movl 0x20(%rcx), %ecx
movl %ecx, 0x50(%rax)
jmp 0x1c95cbd
vmovss %xmm12, 0x20(%r15)
movq %r12, %r13
jmp 0x1c95973
vinsertps $0x10, %xmm7, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm7[0],xmm15[2,3]
vmovdqa 0x200(%rsp), %xmm7
vmovaps 0x1f0(%rsp), %xmm11
vmovaps 0x1e0(%rsp), %xmm17
vmovaps 0x1d0(%rsp), %xmm6
vmovaps 0x1c0(%rsp), %xmm5
vmovaps 0x1b0(%rsp), %xmm8
vmovaps 0x1a0(%rsp), %xmm9
vmovaps 0x190(%rsp), %xmm10
vmovups 0x300(%rsp), %ymm28
vmovups 0x2e0(%rsp), %ymm30
jmp 0x1c947e7
vmovups 0x2c0(%rsp), %ymm0
vcmpleps 0x20(%r15){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %r8d, %r12d
andl %eax, %r12d
jne 0x1c94176
addq $0x348, %rsp # imm = 0x348
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersector1<8>::occluded_hn<embree::avx512::OrientedCurve1Intersector1<embree::HermiteCurveT, 7, 8>, embree::avx512::Occluded1Epilog1<true>>(embree::avx512::CurvePrecalculations1 const&, embree::RayK<1>&, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_hn(const Precalculations& pre, Ray& ray, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
if (Intersector().intersect(pre,ray,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar));
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x358, %rsp # imm = 0x358
movq %rcx, %r9
movq %rdx, %r10
movq %rsi, %r8
movq %rdi, %r12
movzbl 0x1(%rcx), %eax
leaq (%rax,%rax,4), %rcx
leaq (%rcx,%rcx,4), %rdx
vbroadcastss 0x12(%r9,%rdx), %xmm0
vmovaps (%rsi), %xmm1
vsubps 0x6(%r9,%rdx), %xmm1, %xmm1
vmulps 0x10(%rsi), %xmm0, %xmm2
vmulps %xmm1, %xmm0, %xmm3
vpmovsxbd 0x6(%r9,%rax,4), %ymm0
vcvtdq2ps %ymm0, %ymm5
vpmovsxbd 0x6(%r9,%rcx), %ymm0
vcvtdq2ps %ymm0, %ymm6
leaq (%rax,%rax,2), %rdx
vpmovsxbd 0x6(%r9,%rdx,2), %ymm0
vcvtdq2ps %ymm0, %ymm7
leaq (%rax,%rcx,2), %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm8
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r9,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm9
addq %rax, %rsi
vpmovsxbd 0x6(%r9,%rsi), %ymm0
vcvtdq2ps %ymm0, %ymm10
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %edi
vpmovsxbd 0x6(%r9,%rdi), %ymm0
addq %rax, %rdi
vpmovsxbd 0x6(%r9,%rdi), %ymm1
vcvtdq2ps %ymm0, %ymm11
vcvtdq2ps %ymm1, %ymm12
shll $0x2, %ecx
vpmovsxbd 0x6(%r9,%rcx), %ymm0
vcvtdq2ps %ymm0, %ymm13
vbroadcastss %xmm2, %ymm14
vbroadcastss 0x27c8d1(%rip), %ymm16 # 0x1f12704
vpermps %ymm2, %ymm16, %ymm15
vbroadcastss 0x28b099(%rip), %ymm27 # 0x1f20edc
vpermps %ymm2, %ymm27, %ymm0
vmulps %ymm7, %ymm0, %ymm4
vmulps %ymm0, %ymm10, %ymm1
vmulps %ymm0, %ymm13, %ymm0
vfmadd231ps %ymm6, %ymm15, %ymm4 # ymm4 = (ymm15 * ymm6) + ymm4
vfmadd231ps %ymm9, %ymm15, %ymm1 # ymm1 = (ymm15 * ymm9) + ymm1
vfmadd231ps %ymm15, %ymm12, %ymm0 # ymm0 = (ymm12 * ymm15) + ymm0
vfmadd231ps %ymm5, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm5) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vbroadcastss %xmm3, %ymm14
vpermps %ymm3, %ymm16, %ymm15
vpermps %ymm3, %ymm27, %ymm2
vmulps %ymm7, %ymm2, %ymm7
vmulps %ymm2, %ymm10, %ymm3
vmulps %ymm2, %ymm13, %ymm2
vfmadd231ps %ymm6, %ymm15, %ymm7 # ymm7 = (ymm15 * ymm6) + ymm7
vfmadd231ps %ymm9, %ymm15, %ymm3 # ymm3 = (ymm15 * ymm9) + ymm3
vfmadd231ps %ymm12, %ymm15, %ymm2 # ymm2 = (ymm15 * ymm12) + ymm2
vfmadd231ps %ymm5, %ymm14, %ymm7 # ymm7 = (ymm14 * ymm5) + ymm7
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vbroadcastss 0x28b00d(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x25b124(%rip), %ymm8 # 0x1ef0fe8
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm8, %ymm6, %k1
vmovaps %ymm8, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm8, %ymm5, %k1
vmovaps %ymm8, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x256812(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %rdi
subq %rax, %rdi
vpmovsxwd 0x6(%r9,%rdi), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm7, %ymm5, %ymm5
vpmovsxwd 0x6(%r9,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm7, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rcx
shlq $0x3, %rdx
subq %rax, %rdx
movl %eax, %edi
shll $0x4, %edi
vpmovsxwd 0x6(%r9,%rdi), %ymm6
subq %rsi, %rdi
vpmovsxwd 0x6(%r9,%rdi), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r9,%rcx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r9,%rdx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0xc(%r8){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x289f29(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x20(%r8){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x289f05(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2c4901(%rip), %ymm1, %k0 # 0x1f5a920
vmovups %ymm6, 0x2d0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne %r11b
je 0x1c97bf3
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
vbroadcastss 0x25af99(%rip), %xmm31 # 0x1ef0fec
vbroadcastss 0x28ae67(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x28ae59(%rip), %xmm18 # 0x1f20ec0
movq %r8, 0x20(%rsp)
tzcntq %r13, %rax
movl 0x2(%r9), %r14d
movl 0x6(%r9,%rax,4), %ecx
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r14,8), %rax
movq %rcx, 0xb0(%rsp)
imulq 0x68(%rax), %rcx
movq 0x58(%rax), %rdx
movq 0x90(%rax), %rsi
movl (%rdx,%rcx), %ecx
movq 0xa0(%rax), %rdi
movq %rdi, %rdx
imulq %rcx, %rdx
vmovaps (%rsi,%rdx), %xmm4
leaq 0x1(%rcx), %rdx
imulq %rdx, %rdi
vmovaps (%rsi,%rdi), %xmm3
movq 0xc8(%rax), %rsi
movq 0xd8(%rax), %rdi
movq %rdi, %r8
imulq %rcx, %r8
vmovups (%rsi,%r8), %xmm5
movq 0x100(%rax), %r8
imulq %rdx, %rdi
vmovups (%rsi,%rdi), %xmm6
movq 0x110(%rax), %rsi
movq %rsi, %rdi
imulq %rcx, %rdi
vmovaps (%r8,%rdi), %xmm7
imulq %rdx, %rsi
vmovaps (%r8,%rsi), %xmm8
movq 0x20(%rsp), %r8
movq 0x148(%rax), %rsi
imulq %rsi, %rcx
imulq %rdx, %rsi
movq 0x138(%rax), %rax
vbroadcastss 0x25bd8b(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm4, %xmm7 # xmm7 = (xmm7 * xmm22) + xmm4
vfnmadd132ps %xmm22, %xmm3, %xmm8 # xmm8 = -(xmm8 * xmm22) + xmm3
vmovups (%rax,%rcx), %xmm9
vfmadd132ps %xmm22, %xmm5, %xmm9 # xmm9 = (xmm9 * xmm22) + xmm5
vmovups (%rax,%rsi), %xmm10
vfnmadd132ps %xmm22, %xmm6, %xmm10 # xmm10 = -(xmm10 * xmm22) + xmm6
vxorps %xmm19, %xmm19, %xmm19
vmulps %xmm19, %xmm3, %xmm0
vfmadd231ps %xmm19, %xmm8, %xmm0 # xmm0 = (xmm8 * xmm19) + xmm0
vpxor %xmm1, %xmm1, %xmm1
vfmadd213ps %xmm0, %xmm7, %xmm1 # xmm1 = (xmm7 * xmm1) + xmm0
vaddps %xmm1, %xmm4, %xmm1
vfmadd231ps %xmm31, %xmm7, %xmm0 # xmm0 = (xmm7 * xmm31) + xmm0
vfnmadd231ps %xmm31, %xmm4, %xmm0 # xmm0 = -(xmm4 * xmm31) + xmm0
vmulps %xmm19, %xmm6, %xmm11
vfmadd231ps %xmm19, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm19) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm11, %xmm9, %xmm2 # xmm2 = (xmm9 * xmm2) + xmm11
vaddps %xmm2, %xmm5, %xmm12
vfmadd231ps %xmm31, %xmm9, %xmm11 # xmm11 = (xmm9 * xmm31) + xmm11
vfnmadd231ps %xmm31, %xmm5, %xmm11 # xmm11 = -(xmm5 * xmm31) + xmm11
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm3, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm3
vfmadd231ps %xmm19, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm19) + xmm2
vfmadd231ps %xmm19, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm19) + xmm2
vmulps %xmm31, %xmm3, %xmm3
vfnmadd231ps %xmm8, %xmm31, %xmm3 # xmm3 = -(xmm31 * xmm8) + xmm3
vfmadd231ps %xmm7, %xmm19, %xmm3 # xmm3 = (xmm19 * xmm7) + xmm3
vfnmadd231ps %xmm4, %xmm19, %xmm3 # xmm3 = -(xmm19 * xmm4) + xmm3
vxorps %xmm4, %xmm4, %xmm4
vfmadd213ps %xmm6, %xmm10, %xmm4 # xmm4 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm19, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm19) + xmm4
vfmadd231ps %xmm19, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm19) + xmm4
vmulps %xmm31, %xmm6, %xmm7
vfnmadd231ps %xmm10, %xmm31, %xmm7 # xmm7 = -(xmm31 * xmm10) + xmm7
vfmadd231ps %xmm9, %xmm19, %xmm7 # xmm7 = (xmm19 * xmm9) + xmm7
vfnmadd231ps %xmm5, %xmm19, %xmm7 # xmm7 = -(xmm19 * xmm5) + xmm7
vshufps $0xc9, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm6 # xmm6 = xmm12[1,2,0,3]
vmulps %xmm6, %xmm0, %xmm6
vfmsub231ps %xmm12, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm12) - xmm6
vshufps $0xc9, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,2,0,3]
vshufps $0xc9, %xmm11, %xmm11, %xmm8 # xmm8 = xmm11[1,2,0,3]
vmulps %xmm0, %xmm8, %xmm8
vfmsub231ps %xmm11, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm11) - xmm8
vshufps $0xc9, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm5 # xmm5 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm9 # xmm9 = xmm4[1,2,0,3]
vmulps %xmm3, %xmm9, %xmm9
vfmsub231ps %xmm4, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm4) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm4 # xmm4 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[1,2,0,3]
vmulps %xmm3, %xmm9, %xmm9
vfmsub231ps %xmm7, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm7) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm5 # xmm5 = xmm9[1,2,0,3]
vdpps $0x7f, %xmm6, %xmm6, %xmm7
vmovss %xmm7, %xmm19, %xmm9 # xmm9 = xmm7[0],xmm19[1,2,3]
vrsqrt14ss %xmm9, %xmm19, %xmm10
vmovss 0x2564a6(%rip), %xmm14 # 0x1eec718
vmulss %xmm14, %xmm10, %xmm11
vmovss 0x256901(%rip), %xmm15 # 0x1eecb80
vmulss %xmm7, %xmm15, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vdpps $0x7f, %xmm8, %xmm6, %xmm11
vmulps %xmm6, %xmm10, %xmm12
vbroadcastss %xmm7, %xmm13
vmulps %xmm8, %xmm13, %xmm8
vbroadcastss %xmm11, %xmm11
vmulps %xmm6, %xmm11, %xmm6
vsubps %xmm6, %xmm8, %xmm6
vrcp14ss %xmm9, %xmm19, %xmm8
vmovss 0x25ad2b(%rip), %xmm16 # 0x1ef0ff8
vfnmadd213ss %xmm16, %xmm8, %xmm7 # xmm7 = -(xmm8 * xmm7) + xmm16
vmulss %xmm7, %xmm8, %xmm7
vbroadcastss %xmm7, %xmm7
vmulps %xmm6, %xmm7, %xmm6
vmulps %xmm6, %xmm10, %xmm6
vdpps $0x7f, %xmm4, %xmm4, %xmm7
vmovss %xmm7, %xmm19, %xmm8 # xmm8 = xmm7[0],xmm19[1,2,3]
vrsqrt14ss %xmm8, %xmm19, %xmm9
vmulss %xmm14, %xmm9, %xmm10
vmulss %xmm7, %xmm15, %xmm11
vmulss %xmm9, %xmm11, %xmm11
vmulss %xmm9, %xmm9, %xmm9
vmulss %xmm9, %xmm11, %xmm9
vsubss %xmm9, %xmm10, %xmm9
vbroadcastss %xmm9, %xmm9
vmulps %xmm4, %xmm9, %xmm10
vdpps $0x7f, %xmm5, %xmm4, %xmm11
vbroadcastss %xmm7, %xmm13
vmulps %xmm5, %xmm13, %xmm5
vbroadcastss %xmm11, %xmm11
vmulps %xmm4, %xmm11, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm8, %xmm19, %xmm5
vfnmadd213ss %xmm16, %xmm5, %xmm7 # xmm7 = -(xmm5 * xmm7) + xmm16
vmulss %xmm7, %xmm5, %xmm5
vbroadcastss %xmm5, %xmm5
vmulps %xmm5, %xmm4, %xmm4
vmulps %xmm4, %xmm9, %xmm4
vshufps $0xff, %xmm1, %xmm1, %xmm5 # xmm5 = xmm1[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm7
vsubps %xmm7, %xmm1, %xmm13
vshufps $0xff, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm6, %xmm5, %xmm5
vaddps %xmm5, %xmm8, %xmm5
vsubps %xmm5, %xmm0, %xmm6
vaddps %xmm7, %xmm1, %xmm14
vaddps %xmm5, %xmm0, %xmm0
vshufps $0xff, %xmm2, %xmm2, %xmm1 # xmm1 = xmm2[3,3,3,3]
vmulps %xmm1, %xmm10, %xmm5
vsubps %xmm5, %xmm2, %xmm15
vshufps $0xff, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[3,3,3,3]
vmulps %xmm7, %xmm10, %xmm7
vmulps %xmm4, %xmm1, %xmm1
vaddps %xmm1, %xmm7, %xmm1
vsubps %xmm1, %xmm3, %xmm4
vaddps %xmm5, %xmm2, %xmm16
vaddps %xmm1, %xmm3, %xmm1
vmulps %xmm22, %xmm6, %xmm2
vaddps %xmm2, %xmm13, %xmm19
vmulps %xmm22, %xmm4, %xmm2
vsubps %xmm2, %xmm15, %xmm20
vmulps %xmm22, %xmm0, %xmm0
vaddps %xmm0, %xmm14, %xmm21
vmulps %xmm22, %xmm1, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vmovaps (%r8), %xmm4
vsubps %xmm4, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vmovaps 0x10(%r12), %xmm3
vmovaps 0x20(%r12), %xmm5
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps 0x30(%r12), %xmm7
vmulps %xmm0, %xmm7, %xmm0
vfmadd231ps %xmm2, %xmm5, %xmm0 # xmm0 = (xmm5 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm4, %xmm19, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,1,1,1]
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm6, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm6) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm4, %xmm20, %xmm2
vbroadcastss %xmm2, %xmm6
vshufps $0x55, %xmm2, %xmm2, %xmm8 # xmm8 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ps %xmm6, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm6) + xmm2
vsubps %xmm4, %xmm15, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm3, %xmm6 # xmm6 = (xmm3 * xmm8) + xmm6
vsubps %xmm4, %xmm14, %xmm8
vbroadcastss %xmm8, %xmm9
vshufps $0x55, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,1,1,1]
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm7, %xmm8, %xmm8
vfmadd231ps %xmm10, %xmm5, %xmm8 # xmm8 = (xmm5 * xmm10) + xmm8
vfmadd231ps %xmm9, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm9) + xmm8
vsubps %xmm4, %xmm21, %xmm9
vbroadcastss %xmm9, %xmm10
vshufps $0x55, %xmm9, %xmm9, %xmm11 # xmm11 = xmm9[1,1,1,1]
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm9
vfmadd231ps %xmm11, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm11) + xmm9
vfmadd231ps %xmm10, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm10) + xmm9
vsubps %xmm4, %xmm22, %xmm10
vbroadcastss %xmm10, %xmm11
vshufps $0x55, %xmm10, %xmm10, %xmm12 # xmm12 = xmm10[1,1,1,1]
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm12, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm12) + xmm10
vfmadd231ps %xmm11, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm11) + xmm10
vsubps %xmm4, %xmm16, %xmm4
vbroadcastss %xmm4, %xmm11
vshufps $0x55, %xmm4, %xmm4, %xmm12 # xmm12 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[2,2,2,2]
vmulps %xmm4, %xmm7, %xmm4
vfmadd231ps %xmm12, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm12) + xmm4
vfmadd231ps %xmm11, %xmm3, %xmm4 # xmm4 = (xmm3 * xmm11) + xmm4
vmovlhps %xmm8, %xmm0, %xmm29 # xmm29 = xmm0[0],xmm8[0]
vmovlhps %xmm9, %xmm1, %xmm12 # xmm12 = xmm1[0],xmm9[0]
vmovlhps %xmm10, %xmm2, %xmm23 # xmm23 = xmm2[0],xmm10[0]
vmovlhps %xmm4, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm4[0]
vminps %xmm12, %xmm29, %xmm3
vmaxps %xmm12, %xmm29, %xmm5
vminps %xmm24, %xmm23, %xmm7
vminps %xmm7, %xmm3, %xmm3
vmaxps %xmm24, %xmm23, %xmm7
vmaxps %xmm7, %xmm5, %xmm5
vshufpd $0x3, %xmm3, %xmm3, %xmm7 # xmm7 = xmm3[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm11 # xmm11 = xmm5[1,1]
vminps %xmm7, %xmm3, %xmm3
vmaxps %xmm11, %xmm5, %xmm5
vandps %xmm17, %xmm3, %xmm3
vandps %xmm17, %xmm5, %xmm5
vmaxps %xmm5, %xmm3, %xmm3
vmovshdup %xmm3, %xmm5 # xmm5 = xmm3[1,1,3,3]
vmaxss %xmm3, %xmm5, %xmm3
leaq 0xff(%r13), %rdi
vmulss 0x25b939(%rip), %xmm3, %xmm3 # 0x1ef1eb8
vmovddup %xmm0, %xmm7 # xmm7 = xmm0[0,0]
vmovddup %xmm1, %xmm11 # xmm11 = xmm1[0,0]
vmovddup %xmm2, %xmm17 # xmm17 = xmm2[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm8, %xmm5 # xmm5 = xmm8[0,0]
vmovddup %xmm9, %xmm8 # xmm8 = xmm9[0,0]
vmovddup %xmm10, %xmm9 # xmm9 = xmm10[0,0]
vmovddup %xmm4, %xmm10 # xmm10 = xmm4[0,0]
vmovaps %xmm3, 0x180(%rsp)
vbroadcastss %xmm3, %ymm28
vxorps %xmm18, %xmm28, %xmm0
vbroadcastss %xmm0, %ymm30
vsubps %xmm29, %xmm12, %xmm0
vmovaps %xmm0, 0x70(%rsp)
vmovaps %xmm12, 0x90(%rsp)
vsubps %xmm12, %xmm23, %xmm0
vmovaps %xmm0, 0x60(%rsp)
vmovaps %xmm23, 0x80(%rsp)
vmovaps %xmm24, 0x190(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x2a0(%rsp)
vmovaps %xmm13, 0x170(%rsp)
vmovaps %xmm14, 0x160(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x290(%rsp)
vmovaps %xmm19, 0x130(%rsp)
vmovaps %xmm21, 0x110(%rsp)
vsubps %xmm19, %xmm21, %xmm0
vmovaps %xmm0, 0x280(%rsp)
vmovaps %xmm20, 0x120(%rsp)
vmovaps %xmm22, 0x100(%rsp)
vsubps %xmm20, %xmm22, %xmm0
vmovaps %xmm0, 0x270(%rsp)
vmovaps %xmm15, 0x150(%rsp)
vmovaps %xmm16, 0x140(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x260(%rsp)
xorl %r15d, %r15d
vmovsd 0x256068(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm14
xorl %ebp, %ebp
vmovaps %xmm29, 0x30(%rsp)
vmovaps %xmm7, 0x210(%rsp)
vmovaps %xmm11, 0x200(%rsp)
vmovaps %xmm17, 0x1f0(%rsp)
vmovaps %xmm6, 0x1e0(%rsp)
vmovaps %xmm5, 0x1d0(%rsp)
vmovaps %xmm8, 0x1c0(%rsp)
vmovaps %xmm9, 0x1b0(%rsp)
vmovaps %xmm10, 0x1a0(%rsp)
vmovups %ymm28, 0x310(%rsp)
vmovups %ymm30, 0x2f0(%rsp)
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vbroadcastss 0x25600c(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm7, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm7) + xmm3
vfmadd231ps %xmm11, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm11) + xmm4
vfmadd231ps %xmm17, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm17) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x28a78f(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x27bfa7(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x28a73e(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x28a755(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x25a78d(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2c949d(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm28, %ymm18, %k1
vcmpnltps %ymm30, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c96b6b
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm30, %ymm1, %k1
vcmpleps %ymm28, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c96b8b
movl %ebp, %eax
movl %ecx, 0x220(%rsp,%rax,4)
vmovlps %xmm0, 0x2b0(%rsp,%rax,8)
vmovlps %xmm26, 0x330(%rsp,%rax,8)
incl %ebp
vbroadcastss 0x28a32f(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x28a321(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x25a443(%rip), %ymm19 # 0x1ef0fec
vmovss 0x28a32d(%rip), %xmm20 # 0x1f20ee0
vmovss 0x255b57(%rip), %xmm21 # 0x1eec714
vmovss 0x25a439(%rip), %xmm22 # 0x1ef1000
vmovss 0x25ae7b(%rip), %xmm23 # 0x1ef1a4c
vbroadcastss 0x255b39(%rip), %xmm24 # 0x1eec714
vmovss 0x25b2d7(%rip), %xmm25 # 0x1ef1ebc
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x70(%rsp), %xmm28
vmovaps 0x60(%rsp), %xmm30
testl %ebp, %ebp
je 0x1c97bc8
leal -0x1(%rbp), %eax
vmovss 0x2b0(%rsp,%rax,8), %xmm0
vmovss 0x2b4(%rsp,%rax,8), %xmm1
movl 0x220(%rsp,%rax,4), %ecx
vmovsd 0x330(%rsp,%rax,8), %xmm14
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x220(%rsp,%rax,4)
cmovel %eax, %ebp
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm2
vmulss %xmm20, %xmm2, %xmm2
incq %rdx
vpxord %xmm26, %xmm26, %xmm26
vcvtsi2ss %rdx, %xmm26, %xmm3
vmulss %xmm20, %xmm3, %xmm3
vsubss %xmm2, %xmm21, %xmm4
vmulss %xmm2, %xmm1, %xmm15
vfmadd231ss %xmm4, %xmm0, %xmm15 # xmm15 = (xmm0 * xmm4) + xmm15
vsubss %xmm3, %xmm21, %xmm2
vmulss %xmm3, %xmm1, %xmm13
vfmadd231ss %xmm2, %xmm0, %xmm13 # xmm13 = (xmm0 * xmm2) + xmm13
vsubss %xmm15, %xmm13, %xmm0
vucomiss %xmm0, %xmm22
jbe 0x1c97b60
vmovaps %xmm14, %xmm26
vshufps $0x50, %xmm14, %xmm14, %xmm1 # xmm1 = xmm14[0,0,1,1]
vucomiss %xmm0, %xmm23
seta %cl
cmpl $0x4, %ebp
setae %al
vsubps %xmm1, %xmm24, %xmm2
vmulps 0x1d0(%rsp), %xmm1, %xmm3
vmulps 0x1c0(%rsp), %xmm1, %xmm4
vmulps 0x1b0(%rsp), %xmm1, %xmm5
vmulps 0x1a0(%rsp), %xmm1, %xmm1
vfmadd231ps 0x210(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x200(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps 0x1f0(%rsp), %xmm2, %xmm5 # xmm5 = (xmm2 * mem) + xmm5
vfmadd231ps 0x1e0(%rsp), %xmm2, %xmm1 # xmm1 = (xmm2 * mem) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm15, 0x40(%rsp)
vbroadcastss %xmm15, %xmm6
vmovaps %xmm13, 0x10(%rsp)
vbroadcastss %xmm13, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm19, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss %xmm25, %xmm0, %xmm5
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x259b90(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c96e5d
vmovss 0x25b069(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c96ebe
vmovss 0x25b05b(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x259b64(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c96ebe
vmovss 0x25b029(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x259b32(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c97b34
vcmpltss %xmm16, %xmm13, %k1
vmovaps %xmm21, %xmm15
vmovss 0x259af7(%rip), %xmm16 # 0x1ef09cc
vxorps %xmm11, %xmm11, %xmm11
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm11, %xmm9, %k1
vmovaps %xmm21, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x254b12(%rip), %xmm7 # 0x1eeba20
vmovss %xmm11, %xmm7, %xmm7 {%k1}
vmovss 0x255c68(%rip), %xmm8 # 0x1eecb84
vmovss %xmm11, %xmm8, %xmm8 {%k1}
vcmpltss %xmm11, %xmm14, %k1
vmovaps %xmm21, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c96f3e
jnp 0x1c96f83
vucomiss %xmm13, %xmm14
jne 0x1c96f8b
jp 0x1c96f8b
vxorps %xmm16, %xmm16, %xmm16
vucomiss %xmm16, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x254ab7(%rip), %xmm13 # 0x1eeba20
vmovss %xmm16, %xmm13, %xmm13 {%k1}
vmovss 0x255c0d(%rip), %xmm14 # 0x1eecb84
vmovss 0x255793(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c96fb2
vxorps %xmm16, %xmm16, %xmm16
jmp 0x1c96fbc
vxorps %xmm18, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm21, %xmm13
vxorps %xmm16, %xmm16, %xmm16
vfmadd213ss %xmm14, %xmm16, %xmm13 # xmm13 = (xmm16 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vmovaps 0x90(%rsp), %xmm14
vmovaps 0x80(%rsp), %xmm15
vcmpltss %xmm16, %xmm10, %k1
vmovaps %xmm21, %xmm13
vmovss 0x2599e7(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
jne 0x1c96fee
jnp 0x1c97058
vucomiss %xmm9, %xmm10
jne 0x1c9702d
jp 0x1c9702d
vucomiss %xmm16, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x254a0d(%rip), %xmm9 # 0x1eeba20
vmovss %xmm16, %xmm9, %xmm9 {%k1}
vmovss 0x255b63(%rip), %xmm10 # 0x1eecb84
vmovss 0x2556e9(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c9704e
vxorps %xmm18, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm21, %xmm9
vfmadd213ss %xmm10, %xmm16, %xmm9 # xmm9 = (xmm16 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm21, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm21, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c97b36
vaddss 0x2c63c4(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x255af0(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm16, %xmm7
vminss %xmm21, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm24, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm21, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm21, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm21, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm31, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm31, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm31, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm21, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x40(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vmovaps 0x10(%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x255981(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm29, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm29
vmovaps %xmm30, %xmm12
vfmadd213ps %xmm14, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm14
vmovaps 0x2a0(%rsp), %xmm13
vfmadd213ps %xmm15, %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + xmm15
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm31, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x289c37(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2c8a55(%rip), %xmm29 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm29, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2c36b5(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x289ba6(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x289b3f(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm14 # xmm14 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c97b1a
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps %xmm26, %xmm15
jbe 0x1c974c9
testb %sil, %sil
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x259b72(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x190(%rsp), %xmm12
vmovaps 0x180(%rsp), %xmm13
je 0x1c97503
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c97503
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x259b15(%rip), %xmm11 # 0x1ef0fec
vmovaps 0x30(%rsp), %xmm29
vmovaps 0x90(%rsp), %xmm9
vmovaps 0x80(%rsp), %xmm10
vmovaps 0x190(%rsp), %xmm12
vmovaps 0x180(%rsp), %xmm13
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c97b30
movl $0xc8, %eax
vsubss %xmm0, %xmm21, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm29, %xmm6 # xmm6 = (xmm29 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm17, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm13
ja 0x1c975b8
decq %rax
jne 0x1c97514
jmp 0x1c97b42
vucomiss %xmm16, %xmm0
jb 0x1c97b42
vucomiss %xmm0, %xmm21
vmovaps 0x10(%rsp), %xmm13
jb 0x1c97b48
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm16, %xmm1
jb 0x1c97b48
vucomiss %xmm1, %xmm21
jb 0x1c97b48
vmovss 0x18(%r12), %xmm2
vinsertps $0x1c, 0x28(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x38(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vmovaps (%r8), %xmm3
vmovaps 0x170(%rsp), %xmm4
vsubps %xmm3, %xmm4, %xmm4
vdpps $0x7f, %xmm2, %xmm4, %xmm4
vmovaps 0x130(%rsp), %xmm5
vsubps %xmm3, %xmm5, %xmm5
vdpps $0x7f, %xmm2, %xmm5, %xmm5
vmovaps 0x120(%rsp), %xmm6
vsubps %xmm3, %xmm6, %xmm6
vdpps $0x7f, %xmm2, %xmm6, %xmm6
vmovaps 0x150(%rsp), %xmm7
vsubps %xmm3, %xmm7, %xmm7
vdpps $0x7f, %xmm2, %xmm7, %xmm7
vmovaps 0x160(%rsp), %xmm8
vsubps %xmm3, %xmm8, %xmm8
vdpps $0x7f, %xmm2, %xmm8, %xmm8
vmovaps 0x110(%rsp), %xmm9
vsubps %xmm3, %xmm9, %xmm9
vdpps $0x7f, %xmm2, %xmm9, %xmm9
vmovaps 0x100(%rsp), %xmm10
vsubps %xmm3, %xmm10, %xmm10
vdpps $0x7f, %xmm2, %xmm10, %xmm10
vmovaps 0x140(%rsp), %xmm12
vsubps %xmm3, %xmm12, %xmm3
vdpps $0x7f, %xmm2, %xmm3, %xmm2
vsubss %xmm1, %xmm21, %xmm3
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm1, %xmm10, %xmm10
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm4, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm6) + xmm10
vfmadd231ss %xmm7, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm7) + xmm1
vsubss %xmm0, %xmm21, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm10, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm10) + xmm1
vfmadd231ss %xmm9, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm8) + xmm1
vucomiss 0xc(%r8), %xmm1
jb 0x1c97b48
vmovss 0x20(%r8), %xmm12
vucomiss %xmm1, %xmm12
jb 0x1c97b48
movq %r13, 0xb8(%rsp)
movq %r12, %r13
movl %r15d, %r12d
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm24, %xmm8
vmulps 0x160(%rsp), %xmm7, %xmm9
vmulps 0x110(%rsp), %xmm7, %xmm10
vmulps 0x100(%rsp), %xmm7, %xmm11
vmulps 0x140(%rsp), %xmm7, %xmm7
vfmadd231ps 0x170(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x130(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x120(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x150(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps %xmm31, %xmm7, %xmm6
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq %r14, %r15
movq (%rax,%r14,8), %r14
movl 0x24(%r8), %eax
testl %eax, 0x34(%r14)
je 0x1c97b02
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
jne 0x1c977fc
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c97b04
vbroadcastss %xmm5, %xmm5
vmulps 0x260(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x270(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x280(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x290(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%r10), %rax
vshufps $0xe9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,2,3]
vmovlps %xmm3, 0x230(%rsp)
vmovss %xmm2, 0x238(%rsp)
vmovlps %xmm0, 0x23c(%rsp)
movq 0xb0(%rsp), %rcx
movl %ecx, 0x244(%rsp)
movl %r15d, 0x248(%rsp)
movl (%rax), %ecx
movl %ecx, 0x24c(%rsp)
movl 0x4(%rax), %eax
movl %eax, 0x250(%rsp)
vmovss %xmm1, 0x20(%r8)
movl $0xffffffff, 0x2c(%rsp) # imm = 0xFFFFFFFF
leaq 0x2c(%rsp), %rax
movq %rax, 0xc0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0xc8(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0xd0(%rsp)
movq %r8, 0xd8(%rsp)
leaq 0x230(%rsp), %rax
movq %rax, 0xe0(%rsp)
movl $0x1, 0xe8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
movq %r9, 0x58(%rsp)
movb %r11b, 0xb(%rsp)
movq %rdi, 0x50(%rsp)
vmovaps %xmm14, 0xf0(%rsp)
vmovss %xmm12, 0xc(%rsp)
je 0x1c97a02
leaq 0xc0(%rsp), %rdi
movq %r10, 0xa8(%rsp)
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x60(%rsp), %xmm30
vmovaps 0x70(%rsp), %xmm28
movq 0x50(%rsp), %rdi
vmovaps 0x30(%rsp), %xmm29
movq 0x20(%rsp), %r8
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25a541(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x254d8f(%rip), %xmm24 # 0x1eec714
vmovss 0x25a0bd(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x259667(%rip), %xmm22 # 0x1ef1000
vmovss 0x254d71(%rip), %xmm21 # 0x1eec714
vmovss 0x289533(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x259635(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x2894ff(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x2894f9(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x259617(%rip), %xmm31 # 0x1ef0fec
movb 0xb(%rsp), %r11b
vbroadcastss 0x2894f8(%rip), %ymm27 # 0x1f20edc
movq 0xa8(%rsp), %r10
movq 0x58(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c97b52
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c97afe
testb $0x2, (%rcx)
jne 0x1c97a23
testb $0x40, 0x3e(%r14)
je 0x1c97af1
leaq 0xc0(%rsp), %rdi
movq %r10, %r14
vzeroupper
callq *%rax
vmovss 0xc(%rsp), %xmm12
vmovaps 0x10(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm15
vmovaps 0xf0(%rsp), %xmm14
vmovaps 0x60(%rsp), %xmm30
vmovaps 0x70(%rsp), %xmm28
movq 0x50(%rsp), %rdi
vmovaps 0x30(%rsp), %xmm29
movq 0x20(%rsp), %r8
vxorps %xmm16, %xmm16, %xmm16
vmovss 0x25a43c(%rip), %xmm25 # 0x1ef1ebc
vbroadcastss 0x254c8a(%rip), %xmm24 # 0x1eec714
vmovss 0x259fb8(%rip), %xmm23 # 0x1ef1a4c
vmovss 0x259562(%rip), %xmm22 # 0x1ef1000
vmovss 0x254c6c(%rip), %xmm21 # 0x1eec714
vmovss 0x28942e(%rip), %xmm20 # 0x1f20ee0
vbroadcastss 0x259530(%rip), %ymm19 # 0x1ef0fec
vbroadcastss 0x2893fa(%rip), %xmm18 # 0x1f20ec0
vbroadcastss 0x2893f4(%rip), %xmm17 # 0x1f20ec4
vbroadcastss 0x259512(%rip), %xmm31 # 0x1ef0fec
movb 0xb(%rsp), %r11b
vbroadcastss 0x2893f3(%rip), %ymm27 # 0x1f20edc
movq %r14, %r10
movq 0x58(%rsp), %r9
movq 0xc0(%rsp), %rax
cmpl $0x0, (%rax)
je 0x1c97b52
movb $0x1, %al
jmp 0x1c97b54
xorl %eax, %eax
orb %al, %r12b
movq %r15, %r14
movl %r12d, %r15d
movq %r13, %r12
movq 0xb8(%rsp), %r13
jmp 0x1c97b48
vxorps %xmm16, %xmm16, %xmm16
vmovaps 0x30(%rsp), %xmm29
vmovaps %xmm26, %xmm15
jmp 0x1c97b42
xorl %ebx, %ebx
jmp 0x1c97b42
movb $0x1, %bl
vmovaps %xmm26, %xmm14
vmovaps 0x40(%rsp), %xmm15
vmovaps 0x10(%rsp), %xmm13
testb %bl, %bl
jne 0x1c96bfb
jmp 0x1c97b60
xorl %eax, %eax
testb %al, %al
jne 0x1c97b04
vmovss %xmm12, 0x20(%r8)
jmp 0x1c97b04
vinsertps $0x10, %xmm13, %xmm15, %xmm0 # xmm0 = xmm15[0],xmm13[0],xmm15[2,3]
vmovaps 0x210(%rsp), %xmm7
vmovaps 0x200(%rsp), %xmm11
vmovaps 0x1f0(%rsp), %xmm17
vmovaps 0x1e0(%rsp), %xmm6
vmovaps 0x1d0(%rsp), %xmm5
vmovaps 0x1c0(%rsp), %xmm8
vmovaps 0x1b0(%rsp), %xmm9
vmovaps 0x1a0(%rsp), %xmm10
vmovups 0x310(%rsp), %ymm28
vmovups 0x2f0(%rsp), %ymm30
jmp 0x1c966f3
testb $0x1, %r15b
jne 0x1c97bf3
vmovups 0x2d0(%rsp), %ymm0
vcmpleps 0x20(%r8){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %edi, %r13d
andl %eax, %r13d
setne %r11b
jne 0x1c9606c
andb $0x1, %r11b
movl %r11d, %eax
addq $0x358, %rsp # imm = 0x358
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 4>::intersect_hn<embree::avx512::OrientedCurve1IntersectorK<embree::HermiteCurveT, 4>, embree::avx512::Intersect1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayHitK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_hn(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x478, %rsp # imm = 0x478
movq %rcx, %r10
movq %rdx, %r15
movq %rsi, %r11
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rdx
vbroadcastss 0x12(%r8,%rdx), %xmm0
vmovss (%rsi,%r15,4), %xmm1
vmovss 0x40(%rsi,%r15,4), %xmm2
vinsertps $0x10, 0x10(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%rsi,%r15,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%rsi,%r15,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rdx), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r8,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
leal (,%rdx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rsi
leal (%rsi,%rsi), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x27a9f8(%rip), %ymm26 # 0x1f12704
vbroadcastss 0x2891c6(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm26, %ymm14
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm26, %ymm14
vpermps %ymm3, %ymm27, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x289139(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x25924b(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x254939(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r9
subq %rcx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rsi), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %rsi
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %rsi, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x30(%r11,%r15,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x288045(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x80(%r11,%r15,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x288020(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2c2a22(%rip), %ymm7, %k0 # 0x1f5a920
vmovups %ymm3, 0x3f0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c99d14
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %ebp
leaq (%r15,%r15,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x10, %r13
movl $0x1, %eax
shlxl %r15d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x1c0(%rsp)
vbroadcastss 0x259092(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x288f60(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x288f52(%rip), %xmm20 # 0x1f20ec0
vxorps %xmm31, %xmm31, %xmm31
tzcntq %rbp, %rax
movl 0x2(%r8), %ebx
movl 0x6(%r8,%rax,4), %r14d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rbx,8), %rax
movq %r14, %rcx
imulq 0x68(%rax), %rcx
movq 0x58(%rax), %rdx
movq 0x90(%rax), %rsi
movl (%rdx,%rcx), %ecx
movq 0xa0(%rax), %rdi
movq %rdi, %rdx
imulq %rcx, %rdx
vmovaps (%rsi,%rdx), %xmm5
leaq 0x1(%rcx), %rdx
imulq %rdx, %rdi
vmovaps (%rsi,%rdi), %xmm4
movq 0xc8(%rax), %rsi
movq 0xd8(%rax), %rdi
movq %rdi, %r9
imulq %rcx, %r9
vmovups (%rsi,%r9), %xmm6
movq 0x100(%rax), %r9
imulq %rdx, %rdi
vmovups (%rsi,%rdi), %xmm7
movq 0x110(%rax), %rsi
movq %rsi, %rdi
imulq %rcx, %rdi
vmovaps (%r9,%rdi), %xmm8
movq %rbx, %rdi
imulq %rdx, %rsi
vmovaps (%r9,%rsi), %xmm9
movq %r14, %r9
movq 0x148(%rax), %rsi
imulq %rsi, %rcx
imulq %rdx, %rsi
movq 0x138(%rax), %rax
vmovups (%rax,%rcx), %xmm10
vmovss (%r11,%r15,4), %xmm0
vinsertps $0x1c, 0x10(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r11,%r15,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovss 0x30(%r11,%r15,4), %xmm1
vmovss %xmm1, 0x8c(%rsp)
vbroadcastss 0x259e5c(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm5, %xmm8 # xmm8 = (xmm8 * xmm22) + xmm5
vfnmadd132ps %xmm22, %xmm4, %xmm9 # xmm9 = -(xmm9 * xmm22) + xmm4
vmovups (%rax,%rsi), %xmm11
vfmadd132ps %xmm22, %xmm6, %xmm10 # xmm10 = (xmm10 * xmm22) + xmm6
vfnmadd132ps %xmm22, %xmm7, %xmm11 # xmm11 = -(xmm11 * xmm22) + xmm7
vxorps %xmm15, %xmm15, %xmm15
vmulps %xmm4, %xmm15, %xmm1
vfmadd231ps %xmm15, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm15) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm1
vaddps %xmm2, %xmm5, %xmm2
vfmadd231ps %xmm17, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm5, %xmm1 # xmm1 = -(xmm5 * xmm17) + xmm1
vmulps %xmm7, %xmm15, %xmm12
vfmadd231ps %xmm15, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm15) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm3) + xmm12
vaddps %xmm3, %xmm6, %xmm13
vfmadd231ps %xmm17, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = -(xmm6 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm3) + xmm4
vfmadd231ps %xmm15, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm15) + xmm3
vfmadd231ps %xmm15, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm15) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm9, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm9) + xmm4
vfmadd231ps %xmm8, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm8) + xmm4
vfnmadd231ps %xmm5, %xmm15, %xmm4 # xmm4 = -(xmm15 * xmm5) + xmm4
vxorps %xmm5, %xmm5, %xmm5
vfmadd213ps %xmm7, %xmm11, %xmm5 # xmm5 = (xmm11 * xmm5) + xmm7
vfmadd231ps %xmm15, %xmm10, %xmm5 # xmm5 = (xmm10 * xmm15) + xmm5
vfmadd231ps %xmm15, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm15) + xmm5
vmulps %xmm17, %xmm7, %xmm8
vfnmadd231ps %xmm11, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm11) + xmm8
vfmadd231ps %xmm10, %xmm15, %xmm8 # xmm8 = (xmm15 * xmm10) + xmm8
vfnmadd231ps %xmm6, %xmm15, %xmm8 # xmm8 = -(xmm15 * xmm6) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm7 # xmm7 = xmm13[1,2,0,3]
vmulps %xmm7, %xmm1, %xmm7
vfmsub231ps %xmm13, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm13) - xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,2,0,3]
vmulps %xmm1, %xmm9, %xmm9
vfmsub231ps %xmm12, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm12) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3]
vmulps %xmm4, %xmm10, %xmm10
vfmsub231ps %xmm5, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm5) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm5 # xmm5 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm10, %xmm10
vfmsub231ps %xmm8, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm8) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm6 # xmm6 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm15, %xmm10 # xmm10 = xmm8[0],xmm15[1,2,3]
vrsqrt14ss %xmm10, %xmm15, %xmm11
vmovss 0x254589(%rip), %xmm16 # 0x1eec718
vmulss %xmm16, %xmm11, %xmm12
vmovss 0x2549e1(%rip), %xmm17 # 0x1eecb80
vmulss %xmm17, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vdpps $0x7f, %xmm9, %xmm7, %xmm12
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm9, %xmm14, %xmm9
vbroadcastss %xmm12, %xmm12
vmulps %xmm7, %xmm12, %xmm7
vsubps %xmm7, %xmm9, %xmm7
vrcp14ss %xmm10, %xmm15, %xmm9
vmovss 0x258e09(%rip), %xmm18 # 0x1ef0ff8
vfnmadd213ss %xmm18, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm18
vmulss %xmm8, %xmm9, %xmm8
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vdpps $0x7f, %xmm5, %xmm5, %xmm8
vmovss %xmm8, %xmm15, %xmm9 # xmm9 = xmm8[0],xmm15[1,2,3]
vrsqrt14ss %xmm9, %xmm15, %xmm10
vmulss %xmm16, %xmm10, %xmm11
vmulss %xmm17, %xmm8, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vdpps $0x7f, %xmm6, %xmm5, %xmm11
vmulps %xmm5, %xmm10, %xmm12
vbroadcastss %xmm8, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm11, %xmm11
vmulps %xmm5, %xmm11, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm9, %xmm15, %xmm6
vfnmadd213ss %xmm18, %xmm6, %xmm8 # xmm8 = -(xmm6 * xmm8) + xmm18
vmulss %xmm6, %xmm8, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm13, %xmm8
vsubps %xmm8, %xmm2, %xmm14
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm13, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm13
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm12, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vmulps %xmm22, %xmm7, %xmm3
vaddps %xmm3, %xmm14, %xmm17
vmulps %xmm22, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm18
vmulps %xmm22, %xmm1, %xmm1
vaddps %xmm1, %xmm13, %xmm21
vmulps %xmm22, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x320(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%r13), %xmm4
vmovaps 0x10(%r13), %xmm5
vmovaps 0x20(%r13), %xmm7
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x310(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm6, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm6) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm18, %xmm3
vbroadcastss %xmm3, %xmm6
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x300(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm7, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm6) + xmm3
vsubps %xmm0, %xmm15, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vmovaps %xmm6, 0x2f0(%rsp)
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm8) + xmm6
vsubps %xmm0, %xmm13, %xmm10
vbroadcastss %xmm10, %xmm8
vshufps $0x55, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2e0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm9) + xmm10
vfmadd231ps %xmm8, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm8) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm8
vshufps $0x55, %xmm11, %xmm11, %xmm9 # xmm9 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2d0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm11, %xmm11
vfmadd231ps %xmm9, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm9) + xmm11
vfmadd231ps %xmm8, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm8) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm8
vshufps $0x55, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2c0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm7, %xmm12, %xmm12
vfmadd231ps %xmm9, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm9) + xmm12
vfmadd231ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm8) + xmm12
vsubps %xmm0, %xmm16, %xmm9
vbroadcastss %xmm9, %xmm0
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2b0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vfmadd231ps %xmm8, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm8) + xmm7
vfmadd231ps %xmm0, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm0) + xmm7
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm23 # xmm23 = xmm3[0],xmm12[0]
vmovlhps %xmm7, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm7[0]
vminps %xmm9, %xmm8, %xmm0
vmaxps %xmm9, %xmm8, %xmm4
vminps %xmm24, %xmm23, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm24, %xmm23, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vminps %xmm5, %xmm0, %xmm0
vshufpd $0x3, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1]
vmaxps %xmm5, %xmm4, %xmm4
vandps %xmm19, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
leaq 0xff(%rbp), %r12
vmulss 0x2599d4(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm19 # xmm19 = xmm1[0,0]
vmovddup %xmm2, %xmm25 # xmm25 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm10, %xmm5 # xmm5 = xmm10[0,0]
vmovddup %xmm11, %xmm2 # xmm2 = xmm11[0,0]
vmovddup %xmm12, %xmm3 # xmm3 = xmm12[0,0]
vmovddup %xmm7, %xmm10 # xmm10 = xmm7[0,0]
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm20, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r14d, %r14d
vmovaps %xmm8, 0xb0(%rsp)
vsubps %xmm8, %xmm9, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vsubps %xmm9, %xmm23, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm23, 0x100(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x330(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vmovaps %xmm13, 0x290(%rsp)
vsubps %xmm14, %xmm13, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm3, %xmm9
vmovaps %xmm2, %xmm7
vmovaps %xmm17, 0x260(%rsp)
vmovaps %xmm21, 0x240(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm18, 0x250(%rsp)
vmovaps %xmm22, 0x230(%rsp)
vsubps %xmm18, %xmm22, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm15, 0x280(%rsp)
vmovaps %xmm16, 0x270(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm1, %xmm16
vpbroadcastd %edi, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
vpbroadcastd %r9d, %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
vmovsd 0x2540c7(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
vmovaps %xmm19, 0x1a0(%rsp)
vmovaps %xmm25, 0x190(%rsp)
vmovaps %xmm1, 0x70(%rsp)
vmovaps %xmm6, 0x40(%rsp)
vmovaps %xmm5, 0x30(%rsp)
vmovaps %xmm2, 0x20(%rsp)
vmovaps %xmm3, 0x10(%rsp)
vmovaps %xmm10, (%rsp)
vmovups %ymm29, 0x430(%rsp)
vmovups %ymm28, 0x410(%rsp)
vmovaps %xmm15, %xmm30
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x254089(%rip), %ymm13 # 0x1eec714
vsubps %xmm1, %xmm13, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm7, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm19, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm19) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x28880a(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vpermps %ymm3, %ymm26, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm26, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm26, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm26, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2887c1(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2887d8(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm13, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x258810(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2c7520(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c98ae8
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c98b0d
movl %r14d, %eax
movl %ecx, 0x1b0(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm30, 0x450(%rsp,%rax,8)
incl %r14d
vbroadcastss 0x2584d5(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2883a3(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x288395(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x2584b7(%rip), %ymm18 # 0x1ef0fec
vmovss 0x2883a1(%rip), %xmm21 # 0x1f20ee0
vmovss 0x253bcb(%rip), %xmm22 # 0x1eec714
vmovss 0x2584ad(%rip), %xmm23 # 0x1ef1000
vmovss 0x258eef(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x253bad(%rip), %xmm25 # 0x1eec714
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0xf0(%rsp), %xmm28
vmovaps 0xe0(%rsp), %xmm29
testl %r14d, %r14d
je 0x1c99cf3
leal -0x1(%r14), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x1b0(%rsp,%rax,4), %ecx
vmovsd 0x450(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1b0(%rsp,%rax,4)
cmovel %eax, %r14d
vxorps %xmm13, %xmm13, %xmm13
vcvtsi2ss %rdx, %xmm13, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm13, %xmm13, %xmm13
vcvtsi2ss %rdx, %xmm13, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm30
vfmadd231ss %xmm4, %xmm0, %xmm30 # xmm30 = (xmm0 * xmm4) + xmm30
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm11
vfmadd231ss %xmm2, %xmm0, %xmm11 # xmm11 = (xmm0 * xmm2) + xmm11
vsubss %xmm30, %xmm11, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c99cc1
vmovaps %xmm30, %xmm8
vmovaps %xmm15, %xmm30
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %r14d
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm5, %xmm3
vmulps %xmm1, %xmm7, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps 0x1a0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm8, 0x90(%rsp)
vbroadcastss %xmm8, %xmm6
vmovaps %xmm11, 0x50(%rsp)
vbroadcastss %xmm11, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm18, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x2591a8(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x257bfd(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c98df0
vmovss 0x2590d6(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c98e51
vmovss 0x2590c8(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x257bd1(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c98e51
vmovss 0x259096(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x257b9f(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c99891
vcmpltss %xmm31, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x257b64(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm31, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x252b84(%rip), %xmm7 # 0x1eeba20
vmovss %xmm31, %xmm7, %xmm7 {%k1}
vmovss 0x253cda(%rip), %xmm8 # 0x1eecb84
vmovss %xmm31, %xmm8, %xmm8 {%k1}
vcmpltss %xmm31, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c98ecc
jnp 0x1c98f0b
vucomiss %xmm13, %xmm14
jne 0x1c98f1e
jp 0x1c98f1e
vucomiss %xmm31, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x252b2f(%rip), %xmm13 # 0x1eeba20
vmovss %xmm31, %xmm13, %xmm13 {%k1}
vmovss 0x253c85(%rip), %xmm14 # 0x1eecb84
vmovss 0x25380b(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c98f3f
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
jmp 0x1c98f5a
vxorps %xmm20, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm31, %xmm13 # xmm13 = (xmm31 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm31, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x257a5b(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
jne 0x1c98f7a
jnp 0x1c98fe4
vucomiss %xmm9, %xmm10
jne 0x1c98fb9
jp 0x1c98fb9
vucomiss %xmm31, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x252a81(%rip), %xmm9 # 0x1eeba20
vmovss %xmm31, %xmm9, %xmm9 {%k1}
vmovss 0x253bd7(%rip), %xmm10 # 0x1eecb84
vmovss 0x25375d(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c98fda
vxorps %xmm20, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm31, %xmm9 # xmm9 = (xmm31 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c99883
vaddss 0x2c4438(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x253b64(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm30, %xmm4 # xmm4 = xmm30[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm30, %xmm1 # xmm1 = (xmm30 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm30, %xmm4 # xmm4 = (xmm30 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x90(%rsp), %xmm30
vinsertps $0x10, %xmm1, %xmm30, %xmm6 # xmm6 = xmm30[0],xmm1[0],xmm30[2,3]
vmovaps 0x50(%rsp), %xmm0
vinsertps $0x10, %xmm4, %xmm0, %xmm5 # xmm5 = xmm0[0],xmm4[0],xmm0[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2539f5(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm29, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps 0x330(%rsp), %xmm13
vfmadd213ps 0x100(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x287ca6(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2c6ac4(%rip), %xmm31 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm31, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2c1724(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x287c15(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x287bae(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c99873
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm30, %xmm7
vmovaps 0x70(%rsp), %xmm16
jbe 0x1c9945d
testb %sil, %sil
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x257bdf(%rip), %xmm13 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x100(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm14
je 0x1c99498
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c99498
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x257b81(%rip), %xmm13 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x100(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c9988d
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm13, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm13, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm12, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c9954c
decq %rax
jne 0x1c994a9
jmp 0x1c99843
vucomiss %xmm31, %xmm0
jb 0x1c99843
vucomiss %xmm0, %xmm22
jb 0x1c99843
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm31, %xmm1
jb 0x1c99843
vucomiss %xmm1, %xmm22
jb 0x1c99843
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm12
vmulss %xmm2, %xmm1, %xmm2
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm5) + xmm12
vfmadd231ss %xmm6, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm6) + xmm2
vsubss %xmm0, %xmm22, %xmm9
vmulss %xmm9, %xmm9, %xmm3
vmulss %xmm3, %xmm9, %xmm4
vmulss %xmm0, %xmm13, %xmm5
vmulss %xmm3, %xmm5, %xmm5
vmulps %xmm0, %xmm0, %xmm3
vmulss %xmm3, %xmm13, %xmm6
vmulss %xmm6, %xmm9, %xmm6
vmulps %xmm3, %xmm0, %xmm7
vmulss %xmm2, %xmm7, %xmm2
vfmadd231ss %xmm12, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm12) + xmm2
vfmadd231ss %xmm8, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm8) + xmm2
vfmadd231ss %xmm11, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm11) + xmm2
vucomiss 0x8c(%rsp), %xmm2
jb 0x1c99843
vmovss 0x80(%r11,%r15,4), %xmm14
vucomiss %xmm2, %xmm14
jb 0x1c99843
movq %rbp, 0x80(%rsp)
movq %r12, %rbp
vshufps $0x55, %xmm0, %xmm0, %xmm3 # xmm3 = xmm0[1,1,1,1]
vsubps %xmm3, %xmm25, %xmm8
vmulps 0x290(%rsp), %xmm3, %xmm10
vmulps 0x240(%rsp), %xmm3, %xmm11
vmulps 0x230(%rsp), %xmm3, %xmm12
vmulps 0x270(%rsp), %xmm3, %xmm13
vfmadd231ps 0x2a0(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x250(%rsp), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vfmadd231ps 0x280(%rsp), %xmm8, %xmm13 # xmm13 = (xmm8 * mem) + xmm13
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm11, %xmm12, %xmm11
vsubps %xmm12, %xmm13, %xmm12
vbroadcastss %xmm0, %xmm8
vmulps %xmm11, %xmm8, %xmm13
vbroadcastss %xmm9, %xmm9
vfmadd231ps %xmm10, %xmm9, %xmm13 # xmm13 = (xmm9 * xmm10) + xmm13
vmulps %xmm12, %xmm8, %xmm10
vfmadd231ps %xmm11, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm11) + xmm10
vmulps %xmm10, %xmm8, %xmm10
vfmadd231ps %xmm13, %xmm9, %xmm10 # xmm10 = (xmm9 * xmm13) + xmm10
vmulps %xmm17, %xmm10, %xmm9
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%rdi,8), %r12
movl 0x90(%r11,%r15,4), %eax
testl %eax, 0x34(%r12)
je 0x1c99838
vbroadcastss %xmm7, %xmm7
vmulps 0x1f0(%rsp), %xmm7, %xmm7
vbroadcastss %xmm6, %xmm6
vfmadd132ps 0x200(%rsp), %xmm7, %xmm6 # xmm6 = (xmm6 * mem) + xmm7
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x210(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x220(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vshufps $0xc9, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm9, %xmm9, %xmm6 # xmm6 = xmm9[1,2,0,3]
vmulps %xmm6, %xmm4, %xmm4
vfmsub231ps %xmm5, %xmm9, %xmm4 # xmm4 = (xmm9 * xmm5) - xmm4
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm11
jne 0x1c998c0
cmpq $0x0, 0x40(%r12)
jne 0x1c998c0
vmovss %xmm2, 0x80(%r11,%r15,4)
vextractps $0x1, %xmm4, 0xc0(%r11,%r15,4)
vextractps $0x2, %xmm4, 0xd0(%r11,%r15,4)
vmovss %xmm4, 0xe0(%r11,%r15,4)
vmovss %xmm0, 0xf0(%r11,%r15,4)
vmovss %xmm1, 0x100(%r11,%r15,4)
movl %r9d, 0x110(%r11,%r15,4)
movl %edi, 0x120(%r11,%r15,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x130(%r11,%r15,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x140(%r11,%r15,4)
movq %rbp, %r12
movq 0x80(%rsp), %rbp
jmp 0x1c99866
movq %rbp, %r12
movq 0x80(%rsp), %rbp
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps 0x50(%rsp), %xmm11
testb %bl, %bl
jne 0x1c98b9c
jmp 0x1c99cc1
vxorps %xmm31, %xmm31, %xmm31
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c99843
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c99893
xorl %ebx, %ebx
jmp 0x1c99843
movb $0x1, %bl
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x10(%rsp), %xmm9
vmovaps (%rsp), %xmm10
vmovaps %xmm30, %xmm15
vmovaps 0x90(%rsp), %xmm30
jmp 0x1c99860
movq 0x8(%r10), %rax
vshufps $0x55, %xmm4, %xmm4, %xmm0 # xmm0 = xmm4[1,1,1,1]
vshufps $0xaa, %xmm4, %xmm4, %xmm1 # xmm1 = xmm4[2,2,2,2]
vbroadcastss %xmm4, %xmm4
vmovaps %xmm0, 0x360(%rsp)
vmovaps %xmm1, 0x370(%rsp)
vmovaps %xmm4, 0x380(%rsp)
vmovaps %xmm8, 0x390(%rsp)
vmovaps %xmm3, 0x3a0(%rsp)
vmovaps 0x1d0(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
leaq 0x3d0(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rcx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
vmovss %xmm2, 0x80(%r11,%r15,4)
vmovaps 0x1c0(%rsp), %xmm0
vmovaps %xmm0, 0x110(%rsp)
leaq 0x110(%rsp), %rax
movq %rax, 0x130(%rsp)
movq 0x18(%r12), %rax
movq %rax, 0x138(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x140(%rsp)
movq %r11, 0x148(%rsp)
leaq 0x360(%rsp), %rax
movq %rax, 0x150(%rsp)
movl $0x4, 0x158(%rsp)
movq 0x40(%r12), %rax
testq %rax, %rax
movq %r8, 0xd8(%rsp)
movq %r11, 0xd0(%rsp)
movq %rdi, 0xc8(%rsp)
movq %r9, 0xc0(%rsp)
vmovaps %xmm15, 0x160(%rsp)
vmovss %xmm14, 0x6c(%rsp)
je 0x1c99ae5
leaq 0x130(%rsp), %rdi
movq %r10, 0x128(%rsp)
vzeroupper
callq *%rax
vmovss 0x6c(%rsp), %xmm14
vmovaps 0x50(%rsp), %xmm11
vmovaps 0x90(%rsp), %xmm30
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps (%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xc0(%rsp), %r9
movq 0xc8(%rsp), %rdi
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x257fcf(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x257579(%rip), %xmm23 # 0x1ef1000
vmovss 0x252c83(%rip), %xmm22 # 0x1eec714
vmovss 0x287445(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x257547(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x287411(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x28740b(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x287419(%rip), %ymm27 # 0x1f20edc
vbroadcastss 0x278c37(%rip), %ymm26 # 0x1f12704
movq 0xd0(%rsp), %r11
movq 0x128(%rsp), %r10
movq 0xd8(%rsp), %r8
vmovdqa 0x110(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c99c9e
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c99c05
testb $0x2, (%rcx)
jne 0x1c99b20
testb $0x40, 0x3e(%r12)
je 0x1c99c05
leaq 0x130(%rsp), %rdi
movq %r10, %r12
vzeroupper
callq *%rax
vmovss 0x6c(%rsp), %xmm14
vmovaps 0x50(%rsp), %xmm11
vmovaps 0x90(%rsp), %xmm30
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm29
vmovaps 0xf0(%rsp), %xmm28
vmovaps (%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm7
vmovaps 0x30(%rsp), %xmm5
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xc0(%rsp), %r9
movq 0xc8(%rsp), %rdi
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x257eaa(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x257454(%rip), %xmm23 # 0x1ef1000
vmovss 0x252b5e(%rip), %xmm22 # 0x1eec714
vmovss 0x287320(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x257422(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x2872ec(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x2872e6(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x2872f4(%rip), %ymm27 # 0x1f20edc
vbroadcastss 0x278b12(%rip), %ymm26 # 0x1f12704
movq 0xd0(%rsp), %r11
movq %r12, %r10
movq 0xd8(%rsp), %r8
vmovdqa 0x110(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
kortestb %k1, %k1
je 0x1c99c9e
movq 0x148(%rsp), %rax
movq 0x150(%rsp), %rcx
vmovaps (%rcx), %xmm0
vmovups %xmm0, 0xc0(%rax) {%k1}
vmovaps 0x10(%rcx), %xmm0
vmovups %xmm0, 0xd0(%rax) {%k1}
vmovaps 0x20(%rcx), %xmm0
vmovups %xmm0, 0xe0(%rax) {%k1}
vmovaps 0x30(%rcx), %xmm0
vmovups %xmm0, 0xf0(%rax) {%k1}
vmovaps 0x40(%rcx), %xmm0
vmovups %xmm0, 0x100(%rax) {%k1}
vmovdqa 0x50(%rcx), %xmm0
vmovdqu32 %xmm0, 0x110(%rax) {%k1}
vmovdqa 0x60(%rcx), %xmm0
vmovdqu32 %xmm0, 0x120(%rax) {%k1}
vmovdqa 0x70(%rcx), %xmm0
vmovdqa32 %xmm0, 0x130(%rax) {%k1}
vmovdqa 0x80(%rcx), %xmm0
vmovdqa32 %xmm0, 0x140(%rax) {%k1}
jmp 0x1c99ca8
vmovss %xmm14, 0x80(%r11,%r15,4)
vbroadcastss 0x25733a(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x252a58(%rip), %xmm25 # 0x1eec714
jmp 0x1c9982b
vinsertps $0x10, %xmm11, %xmm30, %xmm0 # xmm0 = xmm30[0],xmm11[0],xmm30[2,3]
vmovaps 0x1a0(%rsp), %xmm19
vmovaps 0x190(%rsp), %xmm25
vmovups 0x430(%rsp), %ymm29
vmovups 0x410(%rsp), %ymm28
jmp 0x1c98676
vmovups 0x3f0(%rsp), %ymm0
vcmpleps 0x80(%r11,%r15,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %r12d, %ebp
andl %eax, %ebp
jne 0x1c97f74
addq $0x478, %rsp # imm = 0x478
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 4>::occluded_hn<embree::avx512::OrientedCurve1IntersectorK<embree::HermiteCurveT, 4>, embree::avx512::Occluded1KEpilog1<4, true>>(embree::avx512::CurvePrecalculationsK<4>&, embree::RayK<4>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_hn(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
if (Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $0x478, %rsp # imm = 0x478
movq %r8, %r10
movq %rdx, %r14
movq %rsi, %r11
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r11,%r14,4), %xmm1
vmovss 0x40(%r11,%r14,4), %xmm2
vinsertps $0x10, 0x10(%r11,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x20(%r11,%r14,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0x50(%r11,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0x60(%r11,%r14,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %r9
vpmovsxbd 0x6(%r8,%r9,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%r9,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2788de(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x2870a7(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm27, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x287016(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x25712d(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x25281b(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %r9
subq %rax, %r9
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%r9), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x30(%r11,%r14,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x285f2b(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x80(%r11,%r14,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x285f06(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2c0902(%rip), %ymm1, %k0 # 0x1f5a920
vmovups %ymm6, 0x3f0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0xf(%rsp)
je 0x1c9bdfb
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
leaq (%r14,%r14,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r12
addq $0x10, %r12
movl $0x1, %eax
shlxl %r14d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %xmm0
vmovdqa %xmm0, 0x1d0(%rsp)
vbroadcastss 0x256f6c(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x286e3a(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x286e2c(%rip), %xmm20 # 0x1f20ec0
vxorps %xmm31, %xmm31, %xmm31
movq %rcx, 0x80(%rsp)
movq %r12, 0x138(%rsp)
tzcntq %r13, %rax
movl 0x2(%r10), %r15d
movl 0x6(%r10,%rax,4), %eax
movq (%rcx), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%r15,8), %rdx
movq %rax, %rcx
imulq 0x68(%rdx), %rcx
movq 0x58(%rdx), %rsi
movq 0x90(%rdx), %rdi
movl (%rsi,%rcx), %ebx
movq 0xa0(%rdx), %r8
movq %r8, %rsi
imulq %rbx, %rsi
vmovaps (%rdi,%rsi), %xmm0
leaq 0x1(%rbx), %rsi
imulq %rsi, %r8
vmovaps (%rdi,%r8), %xmm1
movq 0xc8(%rdx), %rdi
movq 0xd8(%rdx), %r8
movq %r8, %r9
imulq %rbx, %r9
vmovups (%rdi,%r9), %xmm2
movq 0x100(%rdx), %r9
imulq %rsi, %r8
vmovups (%rdi,%r8), %xmm3
movq 0x110(%rdx), %rdi
movq %rdi, %r8
imulq %rbx, %r8
vmovaps (%r9,%r8), %xmm4
movq %r15, %r8
imulq %rsi, %rdi
vmovaps (%r9,%rdi), %xmm5
movq 0x80(%rsp), %rcx
vpbroadcastd %eax, %xmm6
vmovdqa %xmm6, 0x230(%rsp)
movq 0x148(%rdx), %rax
imulq %rax, %rbx
imulq %rsi, %rax
movq 0x138(%rdx), %rdx
vmovss (%r11,%r14,4), %xmm6
vinsertps $0x1c, 0x10(%r11,%r14,4), %xmm6, %xmm6 # xmm6 = xmm6[0],mem[0],zero,zero
vinsertps $0x28, 0x20(%r11,%r14,4), %xmm6, %xmm6 # xmm6 = xmm6[0,1],mem[0],zero
vbroadcastss 0x257d25(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm0, %xmm4 # xmm4 = (xmm4 * xmm22) + xmm0
vfnmadd132ps %xmm22, %xmm1, %xmm5 # xmm5 = -(xmm5 * xmm22) + xmm1
vmovups (%rdx,%rbx), %xmm10
vfmadd132ps %xmm22, %xmm2, %xmm10 # xmm10 = (xmm10 * xmm22) + xmm2
vmovups (%rdx,%rax), %xmm11
vfnmadd132ps %xmm22, %xmm3, %xmm11 # xmm11 = -(xmm11 * xmm22) + xmm3
vxorps %xmm15, %xmm15, %xmm15
vmulps %xmm1, %xmm15, %xmm7
vfmadd231ps %xmm15, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm15) + xmm7
vxorps %xmm8, %xmm8, %xmm8
vfmadd213ps %xmm7, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm8) + xmm7
vaddps %xmm0, %xmm8, %xmm8
vfmadd231ps %xmm17, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm17) + xmm7
vfnmadd231ps %xmm17, %xmm0, %xmm7 # xmm7 = -(xmm0 * xmm17) + xmm7
vmulps %xmm3, %xmm15, %xmm12
vfmadd231ps %xmm15, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm15) + xmm12
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm12, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm9) + xmm12
vaddps %xmm2, %xmm9, %xmm13
vfmadd231ps %xmm17, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm2, %xmm12 # xmm12 = -(xmm2 * xmm17) + xmm12
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm1, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm9) + xmm1
vfmadd231ps %xmm15, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm15) + xmm9
vfmadd231ps %xmm15, %xmm0, %xmm9 # xmm9 = (xmm0 * xmm15) + xmm9
vmulps %xmm17, %xmm1, %xmm1
vfnmadd231ps %xmm5, %xmm17, %xmm1 # xmm1 = -(xmm17 * xmm5) + xmm1
vfmadd231ps %xmm4, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm4) + xmm1
vfnmadd231ps %xmm0, %xmm15, %xmm1 # xmm1 = -(xmm15 * xmm0) + xmm1
vxorps %xmm0, %xmm0, %xmm0
vfmadd213ps %xmm3, %xmm11, %xmm0 # xmm0 = (xmm11 * xmm0) + xmm3
vfmadd231ps %xmm15, %xmm10, %xmm0 # xmm0 = (xmm10 * xmm15) + xmm0
vfmadd231ps %xmm15, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm15) + xmm0
vmulps %xmm17, %xmm3, %xmm3
vfnmadd231ps %xmm11, %xmm17, %xmm3 # xmm3 = -(xmm17 * xmm11) + xmm3
vfmadd231ps %xmm10, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm10) + xmm3
vfnmadd231ps %xmm2, %xmm15, %xmm3 # xmm3 = -(xmm15 * xmm2) + xmm3
vshufps $0xc9, %xmm7, %xmm7, %xmm2 # xmm2 = xmm7[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm4 # xmm4 = xmm13[1,2,0,3]
vmulps %xmm4, %xmm7, %xmm4
vfmsub231ps %xmm13, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm13) - xmm4
vshufps $0xc9, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[1,2,0,3]
vmulps %xmm5, %xmm7, %xmm5
vfmsub231ps %xmm12, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm12) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm10 # xmm10 = xmm0[1,2,0,3]
vmulps %xmm1, %xmm10, %xmm10
vfmsub231ps %xmm0, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm0) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,2,0,3]
vmulps %xmm1, %xmm10, %xmm10
vfmsub231ps %xmm3, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm3) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm2 # xmm2 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm4, %xmm4, %xmm3
vmovss %xmm3, %xmm15, %xmm10 # xmm10 = xmm3[0],xmm15[1,2,3]
vrsqrt14ss %xmm10, %xmm15, %xmm11
vmovss 0x25244d(%rip), %xmm16 # 0x1eec718
vmulss %xmm16, %xmm11, %xmm12
vmovss 0x2528a5(%rip), %xmm17 # 0x1eecb80
vmulss %xmm17, %xmm3, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm4, %xmm11, %xmm12
vdpps $0x7f, %xmm5, %xmm4, %xmm13
vbroadcastss %xmm3, %xmm14
vmulps %xmm5, %xmm14, %xmm5
vbroadcastss %xmm13, %xmm13
vmulps %xmm4, %xmm13, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm10, %xmm15, %xmm5
vmovss 0x256cce(%rip), %xmm18 # 0x1ef0ff8
vfnmadd213ss %xmm18, %xmm5, %xmm3 # xmm3 = -(xmm5 * xmm3) + xmm18
vmulss %xmm3, %xmm5, %xmm3
vdpps $0x7f, %xmm0, %xmm0, %xmm5
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmovss %xmm5, %xmm15, %xmm4 # xmm4 = xmm5[0],xmm15[1,2,3]
vrsqrt14ss %xmm4, %xmm15, %xmm10
vmulss %xmm16, %xmm10, %xmm11
vmulss %xmm17, %xmm5, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm0, %xmm10, %xmm11
vdpps $0x7f, %xmm2, %xmm0, %xmm13
vbroadcastss %xmm5, %xmm14
vmulps %xmm2, %xmm14, %xmm2
vbroadcastss %xmm13, %xmm13
vmulps %xmm0, %xmm13, %xmm0
vsubps %xmm0, %xmm2, %xmm0
vrcp14ss %xmm4, %xmm15, %xmm2
vfnmadd213ss %xmm18, %xmm2, %xmm5 # xmm5 = -(xmm2 * xmm5) + xmm18
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vshufps $0xff, %xmm8, %xmm8, %xmm2 # xmm2 = xmm8[3,3,3,3]
vmulps %xmm2, %xmm12, %xmm4
vsubps %xmm4, %xmm8, %xmm13
vshufps $0xff, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm5
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
vsubps %xmm2, %xmm7, %xmm3
vaddps %xmm4, %xmm8, %xmm14
vaddps %xmm2, %xmm7, %xmm2
vshufps $0xff, %xmm9, %xmm9, %xmm4 # xmm4 = xmm9[3,3,3,3]
vmulps %xmm4, %xmm11, %xmm5
vsubps %xmm5, %xmm9, %xmm15
vshufps $0xff, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[3,3,3,3]
vmulps %xmm7, %xmm11, %xmm7
vmulps %xmm0, %xmm4, %xmm0
vaddps %xmm0, %xmm7, %xmm0
vsubps %xmm0, %xmm1, %xmm4
vaddps %xmm5, %xmm9, %xmm16
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm22, %xmm3, %xmm1
vaddps %xmm1, %xmm13, %xmm17
vmulps %xmm22, %xmm4, %xmm1
vsubps %xmm1, %xmm15, %xmm18
vmulps %xmm22, %xmm2, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm22, %xmm0, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vsubps %xmm6, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vmovaps %xmm0, 0x330(%rsp)
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r12), %xmm3
vmovaps 0x10(%r12), %xmm4
vmovaps 0x20(%r12), %xmm5
vmulps %xmm0, %xmm5, %xmm0
vfmadd231ps %xmm2, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm6, %xmm17, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x320(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm5, %xmm1
vfmadd231ps %xmm7, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm7) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm6, %xmm18, %xmm8
vbroadcastss %xmm8, %xmm2
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x310(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm5, %xmm8, %xmm8
vfmadd231ps %xmm7, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm7) + xmm8
vfmadd231ps %xmm2, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm2) + xmm8
vsubps %xmm6, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm2
vshufps $0x55, %xmm9, %xmm9, %xmm7 # xmm7 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x300(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm5, %xmm9, %xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vfmadd231ps %xmm2, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm2) + xmm9
vsubps %xmm6, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm2
vshufps $0x55, %xmm10, %xmm10, %xmm7 # xmm7 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2f0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm5, %xmm10, %xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vfmadd231ps %xmm2, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm2) + xmm10
vsubps %xmm6, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm2
vshufps $0x55, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2e0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm5, %xmm11, %xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vfmadd231ps %xmm2, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm2) + xmm11
vsubps %xmm6, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm2
vshufps $0x55, %xmm12, %xmm12, %xmm7 # xmm7 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2d0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm5, %xmm12, %xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vfmadd231ps %xmm2, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm2) + xmm12
vsubps %xmm6, %xmm16, %xmm7
vbroadcastss %xmm7, %xmm2
vshufps $0x55, %xmm7, %xmm7, %xmm6 # xmm6 = xmm7[1,1,1,1]
vmovaps %xmm7, 0x2c0(%rsp)
vshufps $0xaa, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
vmulps %xmm7, %xmm5, %xmm5
vfmadd231ps %xmm6, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm6) + xmm5
vfmadd231ps %xmm2, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm2) + xmm5
vmovlhps %xmm10, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm10[0]
vmovlhps %xmm11, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm11[0]
vmovlhps %xmm12, %xmm8, %xmm23 # xmm23 = xmm8[0],xmm12[0]
vmovlhps %xmm5, %xmm9, %xmm24 # xmm24 = xmm9[0],xmm5[0]
vminps %xmm7, %xmm6, %xmm2
vmaxps %xmm7, %xmm6, %xmm3
vminps %xmm24, %xmm23, %xmm4
vminps %xmm4, %xmm2, %xmm2
vmaxps %xmm24, %xmm23, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vshufpd $0x3, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1]
vminps %xmm4, %xmm2, %xmm2
vshufpd $0x3, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1]
vmaxps %xmm4, %xmm3, %xmm3
vandps %xmm19, %xmm2, %xmm2
vandps %xmm19, %xmm3, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vmaxss %xmm2, %xmm3, %xmm2
leaq 0xff(%r13), %r9
vmulss 0x25789a(%rip), %xmm2, %xmm2 # 0x1ef1eb8
vmovddup %xmm0, %xmm19 # xmm19 = xmm0[0,0]
vmovddup %xmm1, %xmm25 # xmm25 = xmm1[0,0]
vmovddup %xmm8, %xmm1 # xmm1 = xmm8[0,0]
vmovddup %xmm9, %xmm3 # xmm3 = xmm9[0,0]
vmovddup %xmm10, %xmm8 # xmm8 = xmm10[0,0]
vmovddup %xmm11, %xmm9 # xmm9 = xmm11[0,0]
vmovddup %xmm12, %xmm10 # xmm10 = xmm12[0,0]
vmovddup %xmm5, %xmm11 # xmm11 = xmm5[0,0]
vmovaps %xmm2, 0x150(%rsp)
vbroadcastss %xmm2, %ymm29
vxorps %xmm20, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r15d, %r15d
xorl %ebp, %ebp
vmovss 0x30(%r11,%r14,4), %xmm0
vmovss %xmm0, 0x8c(%rsp)
vmovaps %xmm6, 0xb0(%rsp)
vsubps %xmm6, %xmm7, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm7, 0xa0(%rsp)
vsubps %xmm7, %xmm23, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm23, 0x110(%rsp)
vmovaps %xmm24, 0x160(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm13, 0x2b0(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm3, %xmm13
vmovaps %xmm17, 0x270(%rsp)
vmovaps %xmm21, 0x250(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm18, 0x260(%rsp)
vmovaps %xmm22, 0x240(%rsp)
vsubps %xmm18, %xmm22, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm15, 0x290(%rsp)
vmovaps %xmm16, 0x280(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm1, %xmm16
vpbroadcastd %r8d, %xmm0
vmovdqa %xmm0, 0x1e0(%rsp)
vmovsd 0x251f8d(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
vmovaps %xmm19, 0x180(%rsp)
vmovaps %xmm25, 0x170(%rsp)
vmovaps %xmm1, 0x70(%rsp)
vmovaps %xmm3, 0x50(%rsp)
vmovaps %xmm8, 0x40(%rsp)
vmovaps %xmm9, 0x30(%rsp)
vmovaps %xmm10, 0x20(%rsp)
vmovaps %xmm11, 0x10(%rsp)
vmovups %ymm29, 0x430(%rsp)
vmovups %ymm28, 0x410(%rsp)
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x251f4d(%rip), %ymm30 # 0x1eec714
vsubps %xmm1, %xmm30, %xmm2
vmulps %xmm1, %xmm8, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps %xmm19, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm19) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2866cc(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x277ee4(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x28667b(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x286692(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm30, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2566c9(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2c53d9(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %edx
andb $0x7f, %al
je 0x1c9ac2f
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %edx
andb %dl, %al
movzbl %al, %edx
testl %edx, %edx
je 0x1c9ac52
movl %ebp, %eax
movl %edx, 0x190(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm26, 0x450(%rsp,%rax,8)
incl %ebp
vbroadcastss 0x256390(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x28625e(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x286250(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x256372(%rip), %ymm18 # 0x1ef0fec
vmovss 0x28625c(%rip), %xmm21 # 0x1f20ee0
vmovss 0x251a86(%rip), %xmm22 # 0x1eec714
vmovss 0x256368(%rip), %xmm23 # 0x1ef1000
vmovss 0x256daa(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x251a68(%rip), %xmm25 # 0x1eec714
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0xe0(%rsp), %xmm30
testl %ebp, %ebp
je 0x1c9bdce
leal -0x1(%rbp), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x190(%rsp,%rax,4), %esi
vmovsd 0x450(%rsp,%rax,8), %xmm15
tzcntq %rsi, %rdx
blsrl %esi, %esi
movl %esi, 0x190(%rsp,%rax,4)
cmovel %eax, %ebp
vxorps %xmm2, %xmm2, %xmm2
vcvtsi2ss %rdx, %xmm2, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm12, %xmm12, %xmm12
vcvtsi2ss %rdx, %xmm12, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm26
vfmadd231ss %xmm4, %xmm0, %xmm26 # xmm26 = (xmm0 * xmm4) + xmm26
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm26, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c9bd9c
vmovaps %xmm26, %xmm6
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %dil
cmpl $0x4, %ebp
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm8, %xmm3
vmulps %xmm1, %xmm9, %xmm4
vmulps %xmm1, %xmm10, %xmm5
vmulps %xmm1, %xmm11, %xmm1
vfmadd231ps 0x180(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x170(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm13, %xmm1 # xmm1 = (xmm13 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0x90(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x60(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm18, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x25705e(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x255ab3(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c9af3a
vmovss 0x256f8c(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c9af9b
vmovss 0x256f7e(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x255a87(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c9af9b
vmovss 0x256f4c(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x255a55(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c9bd4a
vcmpltss %xmm31, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x255a1a(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm31, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x250a3a(%rip), %xmm7 # 0x1eeba20
vmovss %xmm31, %xmm7, %xmm7 {%k1}
vmovss 0x251b90(%rip), %xmm8 # 0x1eecb84
vmovss %xmm31, %xmm8, %xmm8 {%k1}
vcmpltss %xmm31, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c9b016
jnp 0x1c9b055
vucomiss %xmm13, %xmm14
jne 0x1c9b068
jp 0x1c9b068
vucomiss %xmm31, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x2509e5(%rip), %xmm13 # 0x1eeba20
vmovss %xmm31, %xmm13, %xmm13 {%k1}
vmovss 0x251b3b(%rip), %xmm14 # 0x1eecb84
vmovss 0x2516c1(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c9b089
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
jmp 0x1c9b0a4
vxorps %xmm20, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm31, %xmm13 # xmm13 = (xmm31 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm31, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x255911(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x60(%rsp), %xmm14
jne 0x1c9b0ca
jnp 0x1c9b134
vucomiss %xmm9, %xmm10
jne 0x1c9b109
jp 0x1c9b109
vucomiss %xmm31, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x250931(%rip), %xmm9 # 0x1eeba20
vmovss %xmm31, %xmm9, %xmm9 {%k1}
vmovss 0x251a87(%rip), %xmm10 # 0x1eecb84
vmovss 0x25160d(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c9b12a
vxorps %xmm20, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm31, %xmm9 # xmm9 = (xmm31 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %bl
vucomiss %xmm8, %xmm7
ja 0x1c9bce4
vaddss 0x2c22e8(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x251a14(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x90(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x2518ab(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm29, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm30, %xmm13
vfmadd213ps 0x110(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x285b5f(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2c497d(%rip), %xmm31 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm31, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2bf5dd(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x285ace(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x285a67(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c9bcd4
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps 0x70(%rsp), %xmm16
jbe 0x1c9b5a4
testb %sil, %sil
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x255a98(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x160(%rsp), %xmm13
vmovaps 0x150(%rsp), %xmm14
je 0x1c9b5df
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c9b5df
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x255a3a(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x160(%rsp), %xmm13
vmovaps 0x150(%rsp), %xmm14
orb %dil, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c9bd1a
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm12, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c9b694
decq %rax
jne 0x1c9b5f1
jmp 0x1c9bd1c
vucomiss %xmm31, %xmm0
jb 0x1c9bd1c
vucomiss %xmm0, %xmm22
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
jb 0x1c9b7c1
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm31, %xmm1
jb 0x1c9b7c1
vucomiss %xmm1, %xmm22
jb 0x1c9b7c1
vmovss 0x8(%r12), %xmm2
vinsertps $0x1c, 0x18(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r12), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x330(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm7
vmulss %xmm7, %xmm7, %xmm3
vmulss %xmm3, %xmm7, %xmm2
vmulss %xmm0, %xmm12, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm4
vmulss %xmm4, %xmm12, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vmulps %xmm4, %xmm0, %xmm6
vmulss %xmm1, %xmm6, %xmm1
vfmadd231ss %xmm9, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm11, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm11) + xmm1
vucomiss 0x8c(%rsp), %xmm1
jb 0x1c9b7c1
vmovss 0x80(%r11,%r14,4), %xmm4
vucomiss %xmm1, %xmm4
jae 0x1c9b7de
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
jmp 0x1c9bd40
vmovss %xmm4, 0x88(%rsp)
movq %r13, %r12
movl %r15d, %r13d
vshufps $0x55, %xmm0, %xmm0, %xmm4 # xmm4 = xmm0[1,1,1,1]
vsubps %xmm4, %xmm25, %xmm8
vmulps 0x2a0(%rsp), %xmm4, %xmm9
vmulps 0x250(%rsp), %xmm4, %xmm10
vmulps 0x240(%rsp), %xmm4, %xmm11
vmulps 0x280(%rsp), %xmm4, %xmm12
vfmadd231ps 0x2b0(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x270(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x290(%rsp), %xmm8, %xmm12 # xmm12 = (xmm8 * mem) + xmm12
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm12, %xmm10
vbroadcastss %xmm0, %xmm0
vmulps %xmm0, %xmm9, %xmm11
vbroadcastss %xmm7, %xmm7
vfmadd231ps %xmm8, %xmm7, %xmm11 # xmm11 = (xmm7 * xmm8) + xmm11
vmulps %xmm0, %xmm10, %xmm8
vfmadd231ps %xmm9, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm9) + xmm8
vmulps %xmm0, %xmm8, %xmm8
vfmadd231ps %xmm11, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm11) + xmm8
vmulps %xmm17, %xmm8, %xmm7
movq (%rcx), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r15
movl 0x90(%r11,%r14,4), %eax
testl %eax, 0x34(%r15)
je 0x1c9bca7
movq 0x10(%rcx), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
jne 0x1c9b8ce
movb $0x1, %al
cmpq $0x0, 0x48(%r15)
je 0x1c9bcc1
vbroadcastss %xmm6, %xmm6
vmulps 0x1f0(%rsp), %xmm6, %xmm6
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x200(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm5, %xmm3 # xmm3 = (xmm3 * mem) + xmm5
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x220(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[1,2,0,3]
vmulps %xmm5, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm7, %xmm2 # xmm2 = (xmm7 * xmm3) - xmm2
movq 0x8(%rcx), %rax
vshufps $0x55, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1,1,1]
vshufps $0xaa, %xmm2, %xmm2, %xmm5 # xmm5 = xmm2[2,2,2,2]
vbroadcastss %xmm2, %xmm2
vmovaps %xmm3, 0x360(%rsp)
vmovaps %xmm5, 0x370(%rsp)
vmovaps %xmm2, 0x380(%rsp)
vmovaps %xmm0, 0x390(%rsp)
vmovaps %xmm4, 0x3a0(%rsp)
vmovaps 0x230(%rsp), %xmm0
vmovaps %xmm0, 0x3b0(%rsp)
vmovdqa 0x1e0(%rsp), %xmm0
vmovdqa %xmm0, 0x3c0(%rsp)
leaq 0x3d0(%rsp), %rdx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqu %ymm0, (%rdx)
vbroadcastss (%rax), %xmm0
vmovaps %xmm0, 0x3d0(%rsp)
vbroadcastss 0x4(%rax), %xmm0
vmovaps %xmm0, 0x3e0(%rsp)
vmovss %xmm1, 0x80(%r11,%r14,4)
vmovaps 0x1d0(%rsp), %xmm0
vmovaps %xmm0, 0x120(%rsp)
leaq 0x120(%rsp), %rax
movq %rax, 0x1a0(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x1a8(%rsp)
movq 0x8(%rcx), %rax
movq %rax, 0x1b0(%rsp)
movq %r11, 0x1b8(%rsp)
leaq 0x360(%rsp), %rax
movq %rax, 0x1c0(%rsp)
movl $0x4, 0x1c8(%rsp)
movq 0x48(%r15), %rax
testq %rax, %rax
movq %r10, 0xd8(%rsp)
movq %r8, 0xd0(%rsp)
movq %r9, 0xc8(%rsp)
vmovaps %xmm15, 0x140(%rsp)
je 0x1c9bb3e
leaq 0x1a0(%rsp), %rdi
movq %r11, 0x130(%rsp)
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0x90(%rsp), %xmm26
vmovaps 0x140(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm30
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm16
movq 0xc8(%rsp), %r9
movq 0xd0(%rsp), %r8
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x250c48(%rip), %xmm25 # 0x1eec714
vmovss 0x255f76(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x255520(%rip), %xmm23 # 0x1ef1000
vmovss 0x250c2a(%rip), %xmm22 # 0x1eec714
vmovss 0x2853ec(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2554ee(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x2853b8(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x2853b2(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x2554d0(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x2853b6(%rip), %ymm27 # 0x1f20edc
movq 0x80(%rsp), %rcx
movq 0x130(%rsp), %r11
movq 0xd8(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k0
kortestb %k0, %k0
je 0x1c9bd7a
movq 0x10(%rcx), %rdx
movq 0x10(%rdx), %rax
testq %rax, %rax
je 0x1c9bc6a
testb $0x2, (%rdx)
jne 0x1c9bb78
testb $0x40, 0x3e(%r15)
je 0x1c9bc6a
leaq 0x1a0(%rsp), %rdi
movq %r11, %r15
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0x90(%rsp), %xmm26
vmovaps 0x140(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm30
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm16
movq 0xc8(%rsp), %r9
movq 0xd0(%rsp), %r8
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x250b17(%rip), %xmm25 # 0x1eec714
vmovss 0x255e45(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2553ef(%rip), %xmm23 # 0x1ef1000
vmovss 0x250af9(%rip), %xmm22 # 0x1eec714
vmovss 0x2852bb(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2553bd(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x285287(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x285281(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x25539f(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x285285(%rip), %ymm27 # 0x1f20edc
movq 0x80(%rsp), %rcx
movq %r15, %r11
movq 0xd8(%rsp), %r10
vmovdqa 0x120(%rsp), %xmm0
vptestmd %xmm0, %xmm0, %k1
movq 0x1b8(%rsp), %rax
vmovaps 0x80(%rax), %xmm0
vbroadcastss 0x250ef1(%rip), %xmm0 {%k1} # 0x1eecb84
vmovaps %xmm0, 0x80(%rax)
kortestb %k1, %k1
setne %al
jmp 0x1c9bd7c
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
movl %r13d, %r15d
orb %al, %r15b
movq %r12, %r13
movq 0x138(%rsp), %r12
jmp 0x1c9bd40
vxorps %xmm31, %xmm31, %xmm31
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c9bd1c
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps %xmm26, %xmm15
vmovaps 0x90(%rsp), %xmm26
jmp 0x1c9bd40
xorl %ebx, %ebx
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps 0x60(%rsp), %xmm14
testb %bl, %bl
jne 0x1c9acea
jmp 0x1c9bd9c
movb $0x1, %bl
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x10(%rsp), %xmm11
vmovaps %xmm26, %xmm15
vmovaps 0x90(%rsp), %xmm26
jmp 0x1c9bd3a
xorl %eax, %eax
testb %al, %al
jne 0x1c9bcc1
vmovss 0x88(%rsp), %xmm0
vmovss %xmm0, 0x80(%r11,%r14,4)
jmp 0x1c9bcc1
vinsertps $0x10, %xmm14, %xmm26, %xmm0 # xmm0 = xmm26[0],xmm14[0],xmm26[2,3]
vmovaps 0x180(%rsp), %xmm19
vmovaps 0x170(%rsp), %xmm25
vmovups 0x430(%rsp), %ymm29
vmovups 0x410(%rsp), %ymm28
jmp 0x1c9a7b1
testb $0x1, %r15b
jne 0x1c9bdfb
vmovups 0x3f0(%rsp), %ymm0
vcmpleps 0x80(%r11,%r14,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %r9d, %r13d
andl %eax, %r13d
setne 0xf(%rsp)
jne 0x1c9a0aa
movb 0xf(%rsp), %al
andb $0x1, %al
addq $0x478, %rsp # imm = 0x478
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
void embree::avx512::CurveNiIntersectorK<8, 8>::intersect_hn<embree::avx512::OrientedCurve1IntersectorK<embree::HermiteCurveT, 8>, embree::avx512::Intersect1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayHitK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline void intersect_hn(Precalculations& pre, RayHitK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(normal.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,k,context,geomID,primID));
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x560, %rsp # imm = 0x560
movq %rcx, %r10
movq %rdx, %r11
movzbl 0x1(%r8), %ecx
leaq (%rcx,%rcx,4), %rax
leaq (%rax,%rax,4), %rdx
vbroadcastss 0x12(%r8,%rdx), %xmm0
vmovss (%rsi,%r11,4), %xmm1
vmovss 0x80(%rsi,%r11,4), %xmm2
vinsertps $0x10, 0x20(%rsi,%r11,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%rsi,%r11,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%rsi,%r11,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
vinsertps $0x20, 0xc0(%rsi,%r11,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rdx), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rcx,4), %ymm1
vpmovsxbd 0x6(%r8,%rax), %ymm2
vcvtdq2ps %ymm1, %ymm5
vcvtdq2ps %ymm2, %ymm6
leaq (%rcx,%rcx,2), %rdx
vpmovsxbd 0x6(%r8,%rdx,2), %ymm1
vcvtdq2ps %ymm1, %ymm4
leaq (%rcx,%rax,2), %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
leal (,%rdx,4), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm2
vcvtdq2ps %ymm1, %ymm7
vcvtdq2ps %ymm2, %ymm8
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rcx,%rcx,8), %rbx
leal (%rbx,%rbx), %r9d
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm10
addq %rcx, %r9
vpmovsxbd 0x6(%r8,%r9), %ymm1
vcvtdq2ps %ymm1, %ymm11
shll $0x2, %eax
vpmovsxbd 0x6(%r8,%rax), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x2767e5(%rip), %ymm15 # 0x1f12704
vbroadcastss 0x284fb3(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm15, %ymm14
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm4, %ymm0, %ymm2
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm6) + ymm2
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm5) + ymm2
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm27, %ymm3
vmulps %ymm4, %ymm3, %ymm15
vmulps %ymm3, %ymm9, %ymm4
vmulps %ymm3, %ymm12, %ymm3
vfmadd231ps %ymm6, %ymm14, %ymm15 # ymm15 = (ymm14 * ymm6) + ymm15
vfmadd231ps %ymm8, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm8) + ymm4
vfmadd231ps %ymm11, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm11) + ymm3
vfmadd231ps %ymm5, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm5) + ymm15
vfmadd231ps %ymm7, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm7) + ymm4
vbroadcastss 0x284f28(%rip), %ymm5 # 0x1f20ec4
vfmadd231ps %ymm10, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm10) + ymm3
vandps %ymm5, %ymm2, %ymm6
vbroadcastss 0x25503a(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm2 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm2, %ymm5
vbroadcastss 0x250728(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm2 # ymm2 = -(ymm5 * ymm2) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm2 # ymm2 = (ymm2 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rcx,8), %r9
subq %rcx, %r9
vpmovsxwd 0x6(%r8,%r9), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm15, %ymm5, %ymm5
vmulps %ymm5, %ymm2, %ymm5
vpmovsxwd 0x6(%r8,%rbx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm15, %ymm6, %ymm6
leaq (%rcx,%rcx), %r9
addq %rcx, %rax
shlq $0x3, %rdx
subq %rcx, %rdx
vpbroadcastd %ecx, %ymm7
shll $0x4, %ecx
vpmovsxwd 0x6(%r8,%rcx), %ymm8
vmulps %ymm6, %ymm2, %ymm2
subq %r9, %rcx
vpmovsxwd 0x6(%r8,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm4, %ymm6, %ymm6
vmulps %ymm6, %ymm1, %ymm6
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm4, %ymm8, %ymm4
vmulps %ymm4, %ymm1, %ymm1
vpmovsxwd 0x6(%r8,%rax), %ymm4
vcvtdq2ps %ymm4, %ymm4
vsubps %ymm3, %ymm4, %ymm4
vmulps %ymm0, %ymm4, %ymm4
vpmovsxwd 0x6(%r8,%rdx), %ymm8
vcvtdq2ps %ymm8, %ymm8
vsubps %ymm3, %ymm8, %ymm3
vmulps %ymm3, %ymm0, %ymm0
vpminsd %ymm2, %ymm5, %ymm3
vpminsd %ymm1, %ymm6, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vpminsd %ymm0, %ymm4, %ymm8
vmaxps 0x60(%rsi,%r11,4){1to8}, %ymm8, %ymm8
vmaxps %ymm8, %ymm3, %ymm3
vmulps 0x283e34(%rip){1to8}, %ymm3, %ymm3 # 0x1f1ff10
vpmaxsd %ymm2, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm6, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm4, %ymm0
vminps 0x100(%rsi,%r11,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x283e0f(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpcmpgtd 0x2be811(%rip), %ymm7, %k0 # 0x1f5a920
vmovaps %ymm3, 0x3a0(%rsp)
vcmpleps %ymm0, %ymm3, %k1
ktestb %k0, %k1
je 0x1c9df96
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r13d
leaq (%r11,%r11,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %rbx
addq $0x20, %rbx
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x340(%rsp)
vbroadcastss 0x254e80(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x284d4e(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x284d40(%rip), %xmm20 # 0x1f20ec0
vxorps %xmm31, %xmm31, %xmm31
tzcntq %r13, %rax
movl 0x2(%r8), %r15d
movl 0x6(%r8,%rax,4), %r12d
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r15,8), %rax
movq %r12, %rcx
imulq 0x68(%rax), %rcx
movq 0x58(%rax), %rdx
movq 0x90(%rax), %r9
movl (%rdx,%rcx), %ecx
movq 0xa0(%rax), %rdi
movq %rdi, %rdx
imulq %rcx, %rdx
vmovaps (%r9,%rdx), %xmm5
leaq 0x1(%rcx), %rdx
imulq %rdx, %rdi
vmovaps (%r9,%rdi), %xmm4
movq 0xc8(%rax), %r14
movq 0xd8(%rax), %rdi
movq %rdi, %r9
imulq %rcx, %r9
vmovups (%r14,%r9), %xmm6
movq 0x100(%rax), %r9
imulq %rdx, %rdi
vmovups (%r14,%rdi), %xmm7
movq 0x110(%rax), %r14
movq %r14, %rdi
imulq %rcx, %rdi
vmovaps (%r9,%rdi), %xmm8
imulq %rdx, %r14
vmovaps (%r9,%r14), %xmm9
movq %r15, %r9
movq 0x148(%rax), %rdi
imulq %rdi, %rcx
imulq %rdx, %rdi
movq 0x138(%rax), %rax
vmovups (%rax,%rcx), %xmm10
vmovss (%rsi,%r11,4), %xmm0
vinsertps $0x1c, 0x20(%rsi,%r11,4), %xmm0, %xmm0 # xmm0 = xmm0[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%rsi,%r11,4), %xmm0, %xmm0 # xmm0 = xmm0[0,1],mem[0],zero
vmovss 0x60(%rsi,%r11,4), %xmm1
vmovss %xmm1, 0x8c(%rsp)
vbroadcastss 0x255c4a(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm5, %xmm8 # xmm8 = (xmm8 * xmm22) + xmm5
vfnmadd132ps %xmm22, %xmm4, %xmm9 # xmm9 = -(xmm9 * xmm22) + xmm4
vmovups (%rax,%rdi), %xmm11
vfmadd132ps %xmm22, %xmm6, %xmm10 # xmm10 = (xmm10 * xmm22) + xmm6
vfnmadd132ps %xmm22, %xmm7, %xmm11 # xmm11 = -(xmm11 * xmm22) + xmm7
vxorps %xmm15, %xmm15, %xmm15
vmulps %xmm4, %xmm15, %xmm1
vfmadd231ps %xmm15, %xmm9, %xmm1 # xmm1 = (xmm9 * xmm15) + xmm1
vxorps %xmm2, %xmm2, %xmm2
vfmadd213ps %xmm1, %xmm8, %xmm2 # xmm2 = (xmm8 * xmm2) + xmm1
vaddps %xmm2, %xmm5, %xmm2
vfmadd231ps %xmm17, %xmm8, %xmm1 # xmm1 = (xmm8 * xmm17) + xmm1
vfnmadd231ps %xmm17, %xmm5, %xmm1 # xmm1 = -(xmm5 * xmm17) + xmm1
vmulps %xmm7, %xmm15, %xmm12
vfmadd231ps %xmm15, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm15) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm12, %xmm10, %xmm3 # xmm3 = (xmm10 * xmm3) + xmm12
vaddps %xmm3, %xmm6, %xmm13
vfmadd231ps %xmm17, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm6, %xmm12 # xmm12 = -(xmm6 * xmm17) + xmm12
vxorps %xmm3, %xmm3, %xmm3
vfmadd213ps %xmm4, %xmm9, %xmm3 # xmm3 = (xmm9 * xmm3) + xmm4
vfmadd231ps %xmm15, %xmm8, %xmm3 # xmm3 = (xmm8 * xmm15) + xmm3
vfmadd231ps %xmm15, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm15) + xmm3
vmulps %xmm17, %xmm4, %xmm4
vfnmadd231ps %xmm9, %xmm17, %xmm4 # xmm4 = -(xmm17 * xmm9) + xmm4
vfmadd231ps %xmm8, %xmm15, %xmm4 # xmm4 = (xmm15 * xmm8) + xmm4
vfnmadd231ps %xmm5, %xmm15, %xmm4 # xmm4 = -(xmm15 * xmm5) + xmm4
vxorps %xmm5, %xmm5, %xmm5
vfmadd213ps %xmm7, %xmm11, %xmm5 # xmm5 = (xmm11 * xmm5) + xmm7
vfmadd231ps %xmm15, %xmm10, %xmm5 # xmm5 = (xmm10 * xmm15) + xmm5
vfmadd231ps %xmm15, %xmm6, %xmm5 # xmm5 = (xmm6 * xmm15) + xmm5
vmulps %xmm17, %xmm7, %xmm8
vfnmadd231ps %xmm11, %xmm17, %xmm8 # xmm8 = -(xmm17 * xmm11) + xmm8
vfmadd231ps %xmm10, %xmm15, %xmm8 # xmm8 = (xmm15 * xmm10) + xmm8
vfnmadd231ps %xmm6, %xmm15, %xmm8 # xmm8 = -(xmm15 * xmm6) + xmm8
vshufps $0xc9, %xmm1, %xmm1, %xmm6 # xmm6 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm7 # xmm7 = xmm13[1,2,0,3]
vmulps %xmm7, %xmm1, %xmm7
vfmsub231ps %xmm13, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm13) - xmm7
vshufps $0xc9, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,2,0,3]
vmulps %xmm1, %xmm9, %xmm9
vfmsub231ps %xmm12, %xmm6, %xmm9 # xmm9 = (xmm6 * xmm12) - xmm9
vshufps $0xc9, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[1,2,0,3]
vshufps $0xc9, %xmm4, %xmm4, %xmm6 # xmm6 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm5, %xmm5, %xmm10 # xmm10 = xmm5[1,2,0,3]
vmulps %xmm4, %xmm10, %xmm10
vfmsub231ps %xmm5, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm5) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm5 # xmm5 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm8, %xmm8, %xmm10 # xmm10 = xmm8[1,2,0,3]
vmulps %xmm4, %xmm10, %xmm10
vfmsub231ps %xmm8, %xmm6, %xmm10 # xmm10 = (xmm6 * xmm8) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm6 # xmm6 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm7, %xmm7, %xmm8
vmovss %xmm8, %xmm15, %xmm10 # xmm10 = xmm8[0],xmm15[1,2,3]
vrsqrt14ss %xmm10, %xmm15, %xmm11
vmovss 0x250377(%rip), %xmm16 # 0x1eec718
vmulss %xmm16, %xmm11, %xmm12
vmovss 0x2507cf(%rip), %xmm17 # 0x1eecb80
vmulss %xmm17, %xmm8, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vdpps $0x7f, %xmm9, %xmm7, %xmm12
vbroadcastss %xmm11, %xmm11
vmulps %xmm7, %xmm11, %xmm13
vbroadcastss %xmm8, %xmm14
vmulps %xmm9, %xmm14, %xmm9
vbroadcastss %xmm12, %xmm12
vmulps %xmm7, %xmm12, %xmm7
vsubps %xmm7, %xmm9, %xmm7
vrcp14ss %xmm10, %xmm15, %xmm9
vmovss 0x254bf7(%rip), %xmm18 # 0x1ef0ff8
vfnmadd213ss %xmm18, %xmm9, %xmm8 # xmm8 = -(xmm9 * xmm8) + xmm18
vmulss %xmm8, %xmm9, %xmm8
vbroadcastss %xmm8, %xmm8
vmulps %xmm7, %xmm8, %xmm7
vmulps %xmm7, %xmm11, %xmm7
vdpps $0x7f, %xmm5, %xmm5, %xmm8
vmovss %xmm8, %xmm15, %xmm9 # xmm9 = xmm8[0],xmm15[1,2,3]
vrsqrt14ss %xmm9, %xmm15, %xmm10
vmulss %xmm16, %xmm10, %xmm11
vmulss %xmm17, %xmm8, %xmm12
vmulss %xmm10, %xmm12, %xmm12
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm12, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vdpps $0x7f, %xmm6, %xmm5, %xmm11
vmulps %xmm5, %xmm10, %xmm12
vbroadcastss %xmm8, %xmm14
vmulps %xmm6, %xmm14, %xmm6
vbroadcastss %xmm11, %xmm11
vmulps %xmm5, %xmm11, %xmm5
vsubps %xmm5, %xmm6, %xmm5
vrcp14ss %xmm9, %xmm15, %xmm6
vfnmadd213ss %xmm18, %xmm6, %xmm8 # xmm8 = -(xmm6 * xmm8) + xmm18
vmulss %xmm6, %xmm8, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm5, %xmm5
vmulps %xmm5, %xmm10, %xmm5
vshufps $0xff, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[3,3,3,3]
vmulps %xmm6, %xmm13, %xmm8
vsubps %xmm8, %xmm2, %xmm14
vshufps $0xff, %xmm1, %xmm1, %xmm9 # xmm9 = xmm1[3,3,3,3]
vmulps %xmm13, %xmm9, %xmm9
vmulps %xmm7, %xmm6, %xmm6
vaddps %xmm6, %xmm9, %xmm6
vsubps %xmm6, %xmm1, %xmm7
vaddps %xmm2, %xmm8, %xmm13
vaddps %xmm6, %xmm1, %xmm1
vshufps $0xff, %xmm3, %xmm3, %xmm2 # xmm2 = xmm3[3,3,3,3]
vmulps %xmm2, %xmm12, %xmm6
vsubps %xmm6, %xmm3, %xmm15
vshufps $0xff, %xmm4, %xmm4, %xmm8 # xmm8 = xmm4[3,3,3,3]
vmulps %xmm12, %xmm8, %xmm8
vmulps %xmm5, %xmm2, %xmm2
vaddps %xmm2, %xmm8, %xmm2
vsubps %xmm2, %xmm4, %xmm5
vaddps %xmm6, %xmm3, %xmm16
vaddps %xmm2, %xmm4, %xmm2
vmulps %xmm22, %xmm7, %xmm3
vaddps %xmm3, %xmm14, %xmm17
vmulps %xmm22, %xmm5, %xmm3
vsubps %xmm3, %xmm15, %xmm18
vmulps %xmm22, %xmm1, %xmm1
vaddps %xmm1, %xmm13, %xmm21
vmulps %xmm22, %xmm2, %xmm1
vsubps %xmm1, %xmm16, %xmm22
vsubps %xmm0, %xmm14, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm3 # xmm3 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x310(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmovaps (%rbx), %xmm4
vmovaps 0x10(%rbx), %xmm5
vmovaps 0x20(%rbx), %xmm7
vmulps %xmm1, %xmm7, %xmm1
vfmadd231ps %xmm3, %xmm5, %xmm1 # xmm1 = (xmm5 * xmm3) + xmm1
vfmadd231ps %xmm2, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm2) + xmm1
vsubps %xmm0, %xmm17, %xmm2
vbroadcastss %xmm2, %xmm3
vshufps $0x55, %xmm2, %xmm2, %xmm6 # xmm6 = xmm2[1,1,1,1]
vmovaps %xmm2, 0x300(%rsp)
vshufps $0xaa, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[2,2,2,2]
vmulps %xmm2, %xmm7, %xmm2
vfmadd231ps %xmm6, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm6) + xmm2
vfmadd231ps %xmm3, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm3) + xmm2
vsubps %xmm0, %xmm18, %xmm3
vbroadcastss %xmm3, %xmm6
vshufps $0x55, %xmm3, %xmm3, %xmm8 # xmm8 = xmm3[1,1,1,1]
vmovaps %xmm3, 0x2f0(%rsp)
vshufps $0xaa, %xmm3, %xmm3, %xmm3 # xmm3 = xmm3[2,2,2,2]
vmulps %xmm3, %xmm7, %xmm3
vfmadd231ps %xmm8, %xmm5, %xmm3 # xmm3 = (xmm5 * xmm8) + xmm3
vfmadd231ps %xmm6, %xmm4, %xmm3 # xmm3 = (xmm4 * xmm6) + xmm3
vsubps %xmm0, %xmm15, %xmm6
vbroadcastss %xmm6, %xmm8
vshufps $0x55, %xmm6, %xmm6, %xmm9 # xmm9 = xmm6[1,1,1,1]
vmovaps %xmm6, 0x2e0(%rsp)
vshufps $0xaa, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[2,2,2,2]
vmulps %xmm6, %xmm7, %xmm6
vfmadd231ps %xmm9, %xmm5, %xmm6 # xmm6 = (xmm5 * xmm9) + xmm6
vfmadd231ps %xmm8, %xmm4, %xmm6 # xmm6 = (xmm4 * xmm8) + xmm6
vsubps %xmm0, %xmm13, %xmm10
vbroadcastss %xmm10, %xmm8
vshufps $0x55, %xmm10, %xmm10, %xmm9 # xmm9 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2d0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm7, %xmm10, %xmm10
vfmadd231ps %xmm9, %xmm5, %xmm10 # xmm10 = (xmm5 * xmm9) + xmm10
vfmadd231ps %xmm8, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm8) + xmm10
vsubps %xmm0, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm8
vshufps $0x55, %xmm11, %xmm11, %xmm9 # xmm9 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2c0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm7, %xmm11, %xmm11
vfmadd231ps %xmm9, %xmm5, %xmm11 # xmm11 = (xmm5 * xmm9) + xmm11
vfmadd231ps %xmm8, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm8) + xmm11
vsubps %xmm0, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm8
vshufps $0x55, %xmm12, %xmm12, %xmm9 # xmm9 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2b0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm7, %xmm12, %xmm12
vfmadd231ps %xmm9, %xmm5, %xmm12 # xmm12 = (xmm5 * xmm9) + xmm12
vfmadd231ps %xmm8, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm8) + xmm12
vsubps %xmm0, %xmm16, %xmm9
vbroadcastss %xmm9, %xmm0
vshufps $0x55, %xmm9, %xmm9, %xmm8 # xmm8 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x2a0(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm7, %xmm9, %xmm7
vfmadd231ps %xmm8, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm8) + xmm7
vfmadd231ps %xmm0, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm0) + xmm7
vmovlhps %xmm10, %xmm1, %xmm8 # xmm8 = xmm1[0],xmm10[0]
vmovlhps %xmm11, %xmm2, %xmm9 # xmm9 = xmm2[0],xmm11[0]
vmovlhps %xmm12, %xmm3, %xmm23 # xmm23 = xmm3[0],xmm12[0]
vmovlhps %xmm7, %xmm6, %xmm24 # xmm24 = xmm6[0],xmm7[0]
vminps %xmm9, %xmm8, %xmm0
vmaxps %xmm9, %xmm8, %xmm4
vminps %xmm24, %xmm23, %xmm5
vminps %xmm5, %xmm0, %xmm0
vmaxps %xmm24, %xmm23, %xmm5
vmaxps %xmm5, %xmm4, %xmm4
vshufpd $0x3, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1]
vminps %xmm5, %xmm0, %xmm0
vshufpd $0x3, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1]
vmaxps %xmm5, %xmm4, %xmm4
vandps %xmm19, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm4
vmaxps %xmm4, %xmm0, %xmm0
vmovshdup %xmm0, %xmm4 # xmm4 = xmm0[1,1,3,3]
vmaxss %xmm0, %xmm4, %xmm0
leaq 0xff(%r13), %r15
vmulss 0x2557c6(%rip), %xmm0, %xmm0 # 0x1ef1eb8
vmovddup %xmm1, %xmm19 # xmm19 = xmm1[0,0]
vmovddup %xmm2, %xmm25 # xmm25 = xmm2[0,0]
vmovddup %xmm3, %xmm1 # xmm1 = xmm3[0,0]
vmovddup %xmm6, %xmm6 # xmm6 = xmm6[0,0]
vmovddup %xmm10, %xmm10 # xmm10 = xmm10[0,0]
vmovddup %xmm11, %xmm11 # xmm11 = xmm11[0,0]
vmovddup %xmm12, %xmm12 # xmm12 = xmm12[0,0]
vmovddup %xmm7, %xmm2 # xmm2 = xmm7[0,0]
vmovaps %xmm0, 0x170(%rsp)
vbroadcastss %xmm0, %ymm29
vxorps %xmm20, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
xorl %r14d, %r14d
vmovaps %xmm8, 0xb0(%rsp)
vsubps %xmm8, %xmm9, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm9, 0xa0(%rsp)
vsubps %xmm9, %xmm23, %xmm0
vmovaps %xmm0, 0xf0(%rsp)
vmovaps %xmm23, 0x110(%rsp)
vmovaps %xmm24, 0x180(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0xe0(%rsp)
vmovaps %xmm14, 0x290(%rsp)
vmovaps %xmm13, 0x280(%rsp)
vsubps %xmm14, %xmm13, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm2, %xmm13
vmovaps %xmm17, 0x250(%rsp)
vmovaps %xmm21, 0x230(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm18, 0x240(%rsp)
vmovaps %xmm22, 0x220(%rsp)
vsubps %xmm18, %xmm22, %xmm0
vmovaps %xmm0, 0x1f0(%rsp)
vmovaps %xmm15, 0x270(%rsp)
vmovaps %xmm16, 0x260(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x1e0(%rsp)
vmovaps %xmm1, %xmm16
vpbroadcastd %r9d, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
movq %r12, 0x128(%rsp)
vpbroadcastd %r12d, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vmovsd 0x24feb5(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
movq %rsi, %rdi
vmovaps %xmm19, 0x1a0(%rsp)
vmovaps %xmm25, 0x190(%rsp)
vmovaps %xmm1, 0x70(%rsp)
vmovaps %xmm6, 0x40(%rsp)
vmovaps %xmm10, 0x30(%rsp)
vmovaps %xmm11, 0x20(%rsp)
vmovaps %xmm12, 0x10(%rsp)
vmovaps %xmm2, 0x50(%rsp)
vmovaps %ymm29, 0x3e0(%rsp)
vmovaps %ymm28, 0x3c0(%rsp)
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x24fe78(%rip), %ymm30 # 0x1eec714
vsubps %xmm1, %xmm30, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps %xmm19, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm19) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x2845f7(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x275e0f(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2845a6(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2845bd(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm30, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x2545f4(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2c3304(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c9cd04
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c9cd29
movl %r14d, %eax
movl %ecx, 0x1b0(%rsp,%rax,4)
vmovlps %xmm0, 0x320(%rsp,%rax,8)
vmovlps %xmm26, 0x400(%rsp,%rax,8)
incl %r14d
vbroadcastss 0x2542b9(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x284187(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x284179(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x25429b(%rip), %ymm18 # 0x1ef0fec
vmovss 0x284185(%rip), %xmm21 # 0x1f20ee0
vmovss 0x24f9af(%rip), %xmm22 # 0x1eec714
vmovss 0x254291(%rip), %xmm23 # 0x1ef1000
vmovss 0x254cd3(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x24f991(%rip), %xmm25 # 0x1eec714
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x100(%rsp), %xmm28
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0xe0(%rsp), %xmm30
testl %r14d, %r14d
je 0x1c9df71
leal -0x1(%r14), %eax
vmovss 0x320(%rsp,%rax,8), %xmm0
vmovss 0x324(%rsp,%rax,8), %xmm1
movl 0x1b0(%rsp,%rax,4), %ecx
vmovsd 0x400(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1b0(%rsp,%rax,4)
cmovel %eax, %r14d
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vxorps %xmm3, %xmm3, %xmm3
vcvtsi2ss %rdx, %xmm3, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm26
vfmadd231ss %xmm4, %xmm0, %xmm26 # xmm26 = (xmm0 * xmm4) + xmm26
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm26, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1c9df45
vmovaps %xmm26, %xmm7
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %r14d
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm10, %xmm3
vmulps %xmm1, %xmm11, %xmm4
vmulps %xmm1, %xmm12, %xmm5
vmulps %xmm1, %xmm13, %xmm1
vfmadd231ps 0x1a0(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x190(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm6, %xmm1 # xmm1 = (xmm6 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm7, 0x90(%rsp)
vbroadcastss %xmm7, %xmm6
vmovaps %xmm14, 0x60(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm18, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x254f85(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x2539da(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c9d013
vmovss 0x254eb3(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c9d074
vmovss 0x254ea5(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2539ae(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c9d074
vmovss 0x254e73(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x25397c(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1c9db00
vcmpltss %xmm31, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x253941(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm31, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x24e961(%rip), %xmm7 # 0x1eeba20
vmovss %xmm31, %xmm7, %xmm7 {%k1}
vmovss 0x24fab7(%rip), %xmm8 # 0x1eecb84
vmovss %xmm31, %xmm8, %xmm8 {%k1}
vcmpltss %xmm31, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c9d0ef
jnp 0x1c9d12e
vucomiss %xmm13, %xmm14
jne 0x1c9d141
jp 0x1c9d141
vucomiss %xmm31, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x24e90c(%rip), %xmm13 # 0x1eeba20
vmovss %xmm31, %xmm13, %xmm13 {%k1}
vmovss 0x24fa62(%rip), %xmm14 # 0x1eecb84
vmovss 0x24f5e8(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c9d162
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
jmp 0x1c9d17d
vxorps %xmm20, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm31, %xmm13 # xmm13 = (xmm31 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xb0(%rsp), %xmm15
vmovaps 0xa0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm31, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x253838(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x60(%rsp), %xmm14
jne 0x1c9d1a3
jnp 0x1c9d20d
vucomiss %xmm9, %xmm10
jne 0x1c9d1e2
jp 0x1c9d1e2
vucomiss %xmm31, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x24e858(%rip), %xmm9 # 0x1eeba20
vmovss %xmm31, %xmm9, %xmm9 {%k1}
vmovss 0x24f9ae(%rip), %xmm10 # 0x1eecb84
vmovss 0x24f534(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c9d203
vxorps %xmm20, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm31, %xmm9 # xmm9 = (xmm31 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r12b
vucomiss %xmm8, %xmm7
ja 0x1c9da95
vaddss 0x2c020e(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x24f93a(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0x90(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x24f7d1(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm29, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm30, %xmm13
vfmadd213ps 0x110(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x283a85(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2c28a3(%rip), %xmm31 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm31, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2bd503(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x2839f4(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x28398d(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c9da85
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps 0x70(%rsp), %xmm16
jbe 0x1c9d67e
testb %sil, %sil
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x2539be(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm13
vmovaps 0x170(%rsp), %xmm14
je 0x1c9d6b9
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c9d6b9
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x253960(%rip), %xmm12 # 0x1ef0fec
vmovaps 0xb0(%rsp), %xmm9
vmovaps 0xa0(%rsp), %xmm10
vmovaps 0x110(%rsp), %xmm11
vmovaps 0x180(%rsp), %xmm13
vmovaps 0x170(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c9dacb
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm12, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm11, %xmm6 # xmm6 = (xmm11 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c9d76d
decq %rax
jne 0x1c9d6ca
jmp 0x1c9dace
vucomiss %xmm31, %xmm0
jb 0x1c9dace
vucomiss %xmm0, %xmm22
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
jb 0x1c9da6b
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm31, %xmm1
jb 0x1c9da6b
vucomiss %xmm1, %xmm22
jb 0x1c9da6b
vmovss 0x8(%rbx), %xmm2
vinsertps $0x1c, 0x18(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%rbx), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2b0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2a0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm11
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm2
vfmadd231ss %xmm3, %xmm10, %xmm11 # xmm11 = (xmm10 * xmm3) + xmm11
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm2 # xmm2 = (xmm10 * xmm6) + xmm2
vsubss %xmm0, %xmm22, %xmm7
vmulss %xmm7, %xmm7, %xmm4
vmulss %xmm4, %xmm7, %xmm3
vmulss %xmm0, %xmm12, %xmm5
vmulss %xmm4, %xmm5, %xmm4
vmulps %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm12, %xmm5
vmulss %xmm5, %xmm7, %xmm5
vmulps %xmm6, %xmm0, %xmm6
vmulss %xmm2, %xmm6, %xmm2
vfmadd231ss %xmm9, %xmm5, %xmm2 # xmm2 = (xmm5 * xmm9) + xmm2
vfmadd231ss %xmm8, %xmm4, %xmm2 # xmm2 = (xmm4 * xmm8) + xmm2
vfmadd231ss %xmm11, %xmm3, %xmm2 # xmm2 = (xmm3 * xmm11) + xmm2
vucomiss 0x8c(%rsp), %xmm2
jb 0x1c9da6b
vmovss 0x100(%rdi,%r11,4), %xmm8
vucomiss %xmm2, %xmm8
jb 0x1c9da6b
vmovss %xmm8, 0x88(%rsp)
movq %r15, 0x80(%rsp)
vshufps $0x55, %xmm0, %xmm0, %xmm8 # xmm8 = xmm0[1,1,1,1]
vsubps %xmm8, %xmm25, %xmm9
vmulps 0x280(%rsp), %xmm8, %xmm10
vmulps 0x230(%rsp), %xmm8, %xmm11
vmulps 0x220(%rsp), %xmm8, %xmm12
vmulps 0x260(%rsp), %xmm8, %xmm8
vfmadd231ps 0x290(%rsp), %xmm9, %xmm10 # xmm10 = (xmm9 * mem) + xmm10
vfmadd231ps 0x250(%rsp), %xmm9, %xmm11 # xmm11 = (xmm9 * mem) + xmm11
vfmadd231ps 0x240(%rsp), %xmm9, %xmm12 # xmm12 = (xmm9 * mem) + xmm12
vfmadd231ps 0x270(%rsp), %xmm9, %xmm8 # xmm8 = (xmm9 * mem) + xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm12, %xmm10
vsubps %xmm12, %xmm8, %xmm8
vbroadcastss %xmm0, %xmm11
vmulps %xmm10, %xmm11, %xmm12
vbroadcastss %xmm7, %xmm7
vfmadd231ps %xmm9, %xmm7, %xmm12 # xmm12 = (xmm7 * xmm9) + xmm12
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm10, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm10) + xmm8
vmulps %xmm8, %xmm11, %xmm8
vfmadd231ps %xmm12, %xmm7, %xmm8 # xmm8 = (xmm7 * xmm12) + xmm8
vmulps %xmm17, %xmm8, %xmm7
movq (%r10), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r9,8), %r15
movl 0x120(%rdi,%r11,4), %eax
testl %eax, 0x34(%r15)
je 0x1c9da63
vbroadcastss %xmm6, %xmm6
vmulps 0x1e0(%rsp), %xmm6, %xmm6
vbroadcastss %xmm5, %xmm5
vfmadd132ps 0x1f0(%rsp), %xmm6, %xmm5 # xmm5 = (xmm5 * mem) + xmm6
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x200(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x210(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vshufps $0xc9, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,2,0,3]
vshufps $0xc9, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[1,2,0,3]
vmulps %xmm5, %xmm3, %xmm3
vfmsub231ps %xmm4, %xmm7, %xmm3 # xmm3 = (xmm7 * xmm4) - xmm3
movq 0x10(%r10), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
jne 0x1c9db31
cmpq $0x0, 0x40(%r15)
jne 0x1c9db31
vmovss %xmm2, 0x100(%rdi,%r11,4)
vextractps $0x1, %xmm3, 0x180(%rdi,%r11,4)
vextractps $0x2, %xmm3, 0x1a0(%rdi,%r11,4)
vmovss %xmm3, 0x1c0(%rdi,%r11,4)
vmovss %xmm0, 0x1e0(%rdi,%r11,4)
vmovss %xmm1, 0x200(%rdi,%r11,4)
movq 0x128(%rsp), %rax
movl %eax, 0x220(%rdi,%r11,4)
movl %r9d, 0x240(%rdi,%r11,4)
movq 0x8(%r10), %rax
movl (%rax), %eax
movl %eax, 0x260(%rdi,%r11,4)
movq 0x8(%r10), %rax
movl 0x4(%rax), %eax
movl %eax, 0x280(%rdi,%r11,4)
movq 0x80(%rsp), %r15
jmp 0x1c9daf2
movq 0x80(%rsp), %r15
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
jmp 0x1c9daf2
vxorps %xmm31, %xmm31, %xmm31
vmovaps 0x70(%rsp), %xmm16
jmp 0x1c9dace
vmovaps 0x70(%rsp), %xmm16
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps %xmm26, %xmm15
vmovaps 0x90(%rsp), %xmm26
jmp 0x1c9daf2
xorl %r12d, %r12d
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x60(%rsp), %xmm14
testb %r12b, %r12b
jne 0x1c9cdc1
jmp 0x1c9df45
movb $0x1, %r12b
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x50(%rsp), %xmm13
vmovaps %xmm26, %xmm15
vmovaps 0x90(%rsp), %xmm26
jmp 0x1c9daec
movq 0x8(%r10), %rax
vbroadcastss %xmm0, %ymm1
vbroadcastss 0x274bc1(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm0
vpermps %ymm3, %ymm4, %ymm4
vpermps %ymm3, %ymm27, %ymm5
vbroadcastss %xmm3, %ymm3
vmovaps %ymm4, 0x420(%rsp)
vmovaps %ymm5, 0x440(%rsp)
vmovaps %ymm3, 0x460(%rsp)
vmovaps %ymm1, 0x480(%rsp)
vmovaps %ymm0, 0x4a0(%rsp)
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm0, 0x4c0(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x4e0(%rsp)
leaq 0x500(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x500(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
vmovss %xmm2, 0x100(%rdi,%r11,4)
vmovaps 0x340(%rsp), %ymm0
vmovaps %ymm0, 0x1c0(%rsp)
leaq 0x1c0(%rsp), %rax
movq %rax, 0x130(%rsp)
movq 0x18(%r15), %rax
movq %rax, 0x138(%rsp)
movq 0x8(%r10), %rax
movq %rax, 0x140(%rsp)
movq %rdi, 0x148(%rsp)
leaq 0x420(%rsp), %rax
movq %rax, 0x150(%rsp)
movl $0x8, 0x158(%rsp)
movq 0x40(%r15), %rax
testq %rax, %rax
movq %r8, 0xd8(%rsp)
movq %r11, 0xd0(%rsp)
movq %rdi, 0xc8(%rsp)
movq %r9, 0xc0(%rsp)
vmovaps %xmm15, 0x160(%rsp)
je 0x1c9dd60
leaq 0x130(%rsp), %rdi
movq %r10, 0x120(%rsp)
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0x90(%rsp), %xmm26
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm30
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xc0(%rsp), %r9
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x253d52(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2532fc(%rip), %xmm23 # 0x1ef1000
vmovss 0x24ea06(%rip), %xmm22 # 0x1eec714
vmovss 0x2831c8(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2532ca(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x283194(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x28318e(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x28319c(%rip), %ymm27 # 0x1f20edc
movq 0xc8(%rsp), %rdi
movq 0xd0(%rsp), %r11
movq 0x120(%rsp), %r10
movq 0xd8(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c9df19
movq 0x10(%r10), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c9de73
testb $0x2, (%rcx)
jne 0x1c9dd95
testb $0x40, 0x3e(%r15)
je 0x1c9de73
leaq 0x130(%rsp), %rdi
movq %r10, %r15
vzeroupper
callq *%rax
vmovaps 0x60(%rsp), %xmm14
vmovaps 0x90(%rsp), %xmm26
vmovaps 0x160(%rsp), %xmm15
vmovaps 0xe0(%rsp), %xmm30
vmovaps 0xf0(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm28
vmovaps 0x50(%rsp), %xmm13
vmovaps 0x10(%rsp), %xmm12
vmovaps 0x20(%rsp), %xmm11
vmovaps 0x30(%rsp), %xmm10
vmovaps 0x40(%rsp), %xmm6
vmovaps 0x70(%rsp), %xmm16
movq 0xc0(%rsp), %r9
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x253c3a(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x2531e4(%rip), %xmm23 # 0x1ef1000
vmovss 0x24e8ee(%rip), %xmm22 # 0x1eec714
vmovss 0x2830b0(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x2531b2(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x28307c(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x283076(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x283084(%rip), %ymm27 # 0x1f20edc
movq 0xc8(%rsp), %rdi
movq 0xd0(%rsp), %r11
movq %r15, %r10
movq 0xd8(%rsp), %r8
vmovdqa 0x1c0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1c9df19
vptestmd %ymm0, %ymm0, %k1
movq 0x148(%rsp), %rax
movq 0x150(%rsp), %rcx
vmovaps (%rcx), %ymm0
vmovups %ymm0, 0x180(%rax) {%k1}
vmovaps 0x20(%rcx), %ymm0
vmovups %ymm0, 0x1a0(%rax) {%k1}
vmovaps 0x40(%rcx), %ymm0
vmovups %ymm0, 0x1c0(%rax) {%k1}
vmovaps 0x60(%rcx), %ymm0
vmovups %ymm0, 0x1e0(%rax) {%k1}
vmovaps 0x80(%rcx), %ymm0
vmovups %ymm0, 0x200(%rax) {%k1}
vmovdqa 0xa0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x220(%rax) {%k1}
vmovdqa 0xc0(%rcx), %ymm0
vmovdqu32 %ymm0, 0x240(%rax) {%k1}
vmovdqa 0xe0(%rcx), %ymm0
vmovdqa32 %ymm0, 0x260(%rax) {%k1}
vmovdqa 0x100(%rcx), %ymm0
vmovdqa32 %ymm0, 0x280(%rax) {%k1}
jmp 0x1c9df2c
vmovd 0x88(%rsp), %xmm0
vmovd %xmm0, 0x100(%rdi,%r11,4)
vbroadcastss 0x2530b6(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x24e7d4(%rip), %xmm25 # 0x1eec714
jmp 0x1c9da56
vinsertps $0x10, %xmm14, %xmm26, %xmm0 # xmm0 = xmm26[0],xmm14[0],xmm26[2,3]
vmovaps 0x1a0(%rsp), %xmm19
vmovaps 0x190(%rsp), %xmm25
vmovaps 0x3e0(%rsp), %ymm29
vmovaps 0x3c0(%rsp), %ymm28
jmp 0x1c9c886
vmovaps 0x3a0(%rsp), %ymm0
vcmpleps 0x100(%rdi,%r11,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %r15d, %r13d
andl %eax, %r13d
movq %rdi, %rsi
jne 0x1c9c186
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
bool embree::avx512::CurveNiIntersectorK<8, 8>::occluded_hn<embree::avx512::OrientedCurve1IntersectorK<embree::HermiteCurveT, 8>, embree::avx512::Occluded1KEpilog1<8, true>>(embree::avx512::CurvePrecalculationsK<8>&, embree::RayK<8>&, unsigned long, embree::RayQueryContext*, embree::CurveNi<8> const&)
|
static __forceinline bool occluded_hn(Precalculations& pre, RayK<K>& ray, const size_t k, RayQueryContext* context, const Primitive& prim)
{
vfloat<M> tNear;
vbool<M> valid = intersect(ray,k,prim,tNear);
const size_t N = prim.N;
size_t mask = movemask(valid);
while (mask)
{
const size_t i = bscf(mask);
STAT3(shadow.trav_prims,1,1,1);
const unsigned int geomID = prim.geomID(N);
const unsigned int primID = prim.primID(N)[i];
const CurveGeometry* geom = context->scene->get<CurveGeometry>(geomID);
Vec3ff p0,t0,p1,t1; Vec3fa n0,dn0,n1,dn1; geom->gather_hermite(p0,t0,n0,dn0,p1,t1,n1,dn1,geom->curve(primID));
if (Intersector().intersect(pre,ray,k,context,geom,primID,p0,t0,p1,t1,n0,dn0,n1,dn1,Epilog(ray,k,context,geomID,primID)))
return true;
mask &= movemask(tNear <= vfloat<M>(ray.tfar[k]));
}
return false;
}
|
pushq %rbp
movq %rsp, %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
andq $-0x20, %rsp
subq $0x580, %rsp # imm = 0x580
movq %r8, %r10
movq %rdx, %r11
movq %rsi, %r9
movzbl 0x1(%r8), %eax
leaq (%rax,%rax,4), %rdx
leaq (%rdx,%rdx,4), %rsi
vbroadcastss 0x12(%r8,%rsi), %xmm0
vmovss (%r9,%r11,4), %xmm1
vmovss 0x80(%r9,%r11,4), %xmm2
vinsertps $0x10, 0x20(%r9,%r11,4), %xmm1, %xmm1 # xmm1 = xmm1[0],mem[0],xmm1[2,3]
vinsertps $0x20, 0x40(%r9,%r11,4), %xmm1, %xmm1 # xmm1 = xmm1[0,1],mem[0],xmm1[3]
vinsertps $0x10, 0xa0(%r9,%r11,4), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],xmm2[2,3]
movq %rcx, 0x98(%rsp)
vinsertps $0x20, 0xc0(%r9,%r11,4), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],xmm2[3]
vsubps 0x6(%r8,%rsi), %xmm1, %xmm1
vmulps %xmm1, %xmm0, %xmm3
vmulps %xmm2, %xmm0, %xmm0
vpmovsxbd 0x6(%r8,%rax,4), %ymm1
vcvtdq2ps %ymm1, %ymm5
vpmovsxbd 0x6(%r8,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm6
leaq (%rax,%rax,2), %rcx
vpmovsxbd 0x6(%r8,%rcx,2), %ymm1
vcvtdq2ps %ymm1, %ymm2
leaq (%rax,%rdx,2), %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm7
leal (,%rcx,4), %esi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm8
addq %rax, %rsi
vpmovsxbd 0x6(%r8,%rsi), %ymm1
vcvtdq2ps %ymm1, %ymm9
leaq (%rax,%rax,8), %rsi
leal (%rsi,%rsi), %r8d
vpmovsxbd 0x6(%r10,%r8), %ymm1
addq %rax, %r8
vpmovsxbd 0x6(%r10,%r8), %ymm4
vcvtdq2ps %ymm1, %ymm10
vcvtdq2ps %ymm4, %ymm11
shll $0x2, %edx
vpmovsxbd 0x6(%r10,%rdx), %ymm1
vcvtdq2ps %ymm1, %ymm12
vbroadcastss %xmm0, %ymm13
vbroadcastss 0x274649(%rip), %ymm15 # 0x1f12704
vpermps %ymm0, %ymm15, %ymm14
vbroadcastss 0x282e12(%rip), %ymm27 # 0x1f20edc
vpermps %ymm0, %ymm27, %ymm0
vmulps %ymm2, %ymm0, %ymm4
vmulps %ymm0, %ymm9, %ymm1
vmulps %ymm0, %ymm12, %ymm0
vfmadd231ps %ymm6, %ymm14, %ymm4 # ymm4 = (ymm14 * ymm6) + ymm4
vfmadd231ps %ymm8, %ymm14, %ymm1 # ymm1 = (ymm14 * ymm8) + ymm1
vfmadd231ps %ymm14, %ymm11, %ymm0 # ymm0 = (ymm11 * ymm14) + ymm0
vfmadd231ps %ymm5, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm5) + ymm4
vfmadd231ps %ymm7, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm7) + ymm1
vfmadd231ps %ymm13, %ymm10, %ymm0 # ymm0 = (ymm10 * ymm13) + ymm0
vbroadcastss %xmm3, %ymm13
vpermps %ymm3, %ymm15, %ymm14
vpermps %ymm3, %ymm27, %ymm15
vmulps %ymm2, %ymm15, %ymm16
vmulps %ymm9, %ymm15, %ymm3
vmulps %ymm12, %ymm15, %ymm2
vfmadd231ps %ymm6, %ymm14, %ymm16 # ymm16 = (ymm14 * ymm6) + ymm16
vfmadd231ps %ymm8, %ymm14, %ymm3 # ymm3 = (ymm14 * ymm8) + ymm3
vfmadd231ps %ymm11, %ymm14, %ymm2 # ymm2 = (ymm14 * ymm11) + ymm2
vfmadd231ps %ymm5, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm5) + ymm16
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm10, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm10) + ymm2
vbroadcastss 0x282d81(%rip), %ymm5 # 0x1f20ec4
vandps %ymm5, %ymm4, %ymm6
vbroadcastss 0x252e98(%rip), %ymm7 # 0x1ef0fe8
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm4 {%k1}
vandps %ymm5, %ymm1, %ymm6
vcmpltps %ymm7, %ymm6, %k1
vmovaps %ymm7, %ymm1 {%k1}
vandps %ymm5, %ymm0, %ymm5
vcmpltps %ymm7, %ymm5, %k1
vmovaps %ymm7, %ymm0 {%k1}
vrcp14ps %ymm4, %ymm5
vbroadcastss 0x24e586(%rip), %ymm6 # 0x1eec714
vfnmadd213ps %ymm6, %ymm5, %ymm4 # ymm4 = -(ymm5 * ymm4) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm4 # ymm4 = (ymm4 * ymm5) + ymm5
vrcp14ps %ymm1, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm1 # ymm1 = -(ymm5 * ymm1) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm1 # ymm1 = (ymm1 * ymm5) + ymm5
vrcp14ps %ymm0, %ymm5
vfnmadd213ps %ymm6, %ymm5, %ymm0 # ymm0 = -(ymm5 * ymm0) + ymm6
vfmadd132ps %ymm5, %ymm5, %ymm0 # ymm0 = (ymm0 * ymm5) + ymm5
leaq (,%rax,8), %r8
subq %rax, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm5
vcvtdq2ps %ymm5, %ymm5
vsubps %ymm16, %ymm5, %ymm5
vpmovsxwd 0x6(%r10,%rsi), %ymm6
vmulps %ymm5, %ymm4, %ymm5
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm16, %ymm6, %ymm6
vmulps %ymm6, %ymm4, %ymm4
leaq (%rax,%rax), %rsi
addq %rax, %rdx
shlq $0x3, %rcx
subq %rax, %rcx
movl %eax, %r8d
shll $0x4, %r8d
vpmovsxwd 0x6(%r10,%r8), %ymm6
subq %rsi, %r8
vpmovsxwd 0x6(%r10,%r8), %ymm7
vcvtdq2ps %ymm7, %ymm7
vsubps %ymm3, %ymm7, %ymm7
vmulps %ymm7, %ymm1, %ymm7
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm3, %ymm6, %ymm3
vmulps %ymm3, %ymm1, %ymm1
vpmovsxwd 0x6(%r10,%rdx), %ymm3
vcvtdq2ps %ymm3, %ymm3
vsubps %ymm2, %ymm3, %ymm3
vmulps %ymm0, %ymm3, %ymm3
vpmovsxwd 0x6(%r10,%rcx), %ymm6
vcvtdq2ps %ymm6, %ymm6
vsubps %ymm2, %ymm6, %ymm2
vmulps %ymm2, %ymm0, %ymm0
vpminsd %ymm4, %ymm5, %ymm2
vpminsd %ymm1, %ymm7, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vpminsd %ymm0, %ymm3, %ymm6
vmaxps 0x60(%r9,%r11,4){1to8}, %ymm6, %ymm6
vmaxps %ymm6, %ymm2, %ymm2
vmulps 0x281c96(%rip){1to8}, %ymm2, %ymm6 # 0x1f1ff10
vpmaxsd %ymm4, %ymm5, %ymm2
vpmaxsd %ymm1, %ymm7, %ymm1
vminps %ymm1, %ymm2, %ymm1
vpmaxsd %ymm0, %ymm3, %ymm0
vminps 0x100(%r9,%r11,4){1to8}, %ymm0, %ymm0
vminps %ymm0, %ymm1, %ymm0
vmulps 0x281c71(%rip){1to8}, %ymm0, %ymm0 # 0x1f1ff14
vpbroadcastd %eax, %ymm1
vpcmpgtd 0x2bc66d(%rip), %ymm1, %k0 # 0x1f5a920
vmovaps %ymm6, 0x3c0(%rsp)
vcmpleps %ymm0, %ymm6, %k1
ktestb %k0, %k1
setne 0x1f(%rsp)
je 0x1ca00b3
kandb %k0, %k1, %k0
kmovd %k0, %eax
movzbl %al, %r12d
leaq (%r11,%r11,2), %rax
shlq $0x4, %rax
leaq (%rdi,%rax), %r13
addq $0x20, %r13
movl $0x1, %eax
shlxl %r11d, %eax, %eax
kmovd %eax, %k0
vpmovm2d %k0, %ymm0
vmovdqa %ymm0, 0x360(%rsp)
vbroadcastss 0x252cd7(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x282ba5(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x282b97(%rip), %xmm20 # 0x1f20ec0
vxorps %xmm31, %xmm31, %xmm31
movq 0x98(%rsp), %rdi
movq %r9, 0xb0(%rsp)
movq %r13, 0x148(%rsp)
tzcntq %r12, %rax
movl 0x2(%r10), %ebx
movl 0x6(%r10,%rax,4), %eax
movq (%rdi), %rcx
movq 0x1e8(%rcx), %rcx
movq (%rcx,%rbx,8), %rdx
movq %rax, %rcx
imulq 0x68(%rdx), %rcx
movq 0x58(%rdx), %rsi
movq 0x90(%rdx), %rdi
movl (%rsi,%rcx), %ecx
movq 0xa0(%rdx), %r8
movq %r8, %rsi
imulq %rcx, %rsi
vmovaps (%rdi,%rsi), %xmm0
leaq 0x1(%rcx), %rsi
imulq %rsi, %r8
vmovaps (%rdi,%r8), %xmm1
movq 0xc8(%rdx), %rdi
movq 0xd8(%rdx), %r8
movq %r8, %r9
imulq %rcx, %r9
vmovups (%rdi,%r9), %xmm2
movq 0x100(%rdx), %r9
imulq %rsi, %r8
vmovups (%rdi,%r8), %xmm3
movq 0x110(%rdx), %rdi
movq %rdi, %r8
imulq %rcx, %r8
vmovaps (%r9,%r8), %xmm4
movq %rbx, %r8
imulq %rsi, %rdi
vmovaps (%r9,%rdi), %xmm5
movq 0x98(%rsp), %rdi
movq 0xb0(%rsp), %r9
vpbroadcastd %eax, %ymm6
vmovdqa %ymm6, 0x3a0(%rsp)
movq 0x148(%rdx), %rax
imulq %rax, %rcx
imulq %rsi, %rax
movq 0x138(%rdx), %rdx
vmovss (%r9,%r11,4), %xmm6
vinsertps $0x1c, 0x20(%r9,%r11,4), %xmm6, %xmm6 # xmm6 = xmm6[0],mem[0],zero,zero
vinsertps $0x28, 0x40(%r9,%r11,4), %xmm6, %xmm6 # xmm6 = xmm6[0,1],mem[0],zero
vbroadcastss 0x253a80(%rip), %xmm22 # 0x1ef1ebc
vfmadd132ps %xmm22, %xmm0, %xmm4 # xmm4 = (xmm4 * xmm22) + xmm0
vfnmadd132ps %xmm22, %xmm1, %xmm5 # xmm5 = -(xmm5 * xmm22) + xmm1
vmovups (%rdx,%rcx), %xmm10
vfmadd132ps %xmm22, %xmm2, %xmm10 # xmm10 = (xmm10 * xmm22) + xmm2
vmovups (%rdx,%rax), %xmm11
vfnmadd132ps %xmm22, %xmm3, %xmm11 # xmm11 = -(xmm11 * xmm22) + xmm3
vxorps %xmm15, %xmm15, %xmm15
vmulps %xmm1, %xmm15, %xmm7
vfmadd231ps %xmm15, %xmm5, %xmm7 # xmm7 = (xmm5 * xmm15) + xmm7
vxorps %xmm8, %xmm8, %xmm8
vfmadd213ps %xmm7, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm8) + xmm7
vaddps %xmm0, %xmm8, %xmm8
vfmadd231ps %xmm17, %xmm4, %xmm7 # xmm7 = (xmm4 * xmm17) + xmm7
vfnmadd231ps %xmm17, %xmm0, %xmm7 # xmm7 = -(xmm0 * xmm17) + xmm7
vmulps %xmm3, %xmm15, %xmm12
vfmadd231ps %xmm15, %xmm11, %xmm12 # xmm12 = (xmm11 * xmm15) + xmm12
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm12, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm9) + xmm12
vaddps %xmm2, %xmm9, %xmm13
vfmadd231ps %xmm17, %xmm10, %xmm12 # xmm12 = (xmm10 * xmm17) + xmm12
vfnmadd231ps %xmm17, %xmm2, %xmm12 # xmm12 = -(xmm2 * xmm17) + xmm12
vxorps %xmm9, %xmm9, %xmm9
vfmadd213ps %xmm1, %xmm5, %xmm9 # xmm9 = (xmm5 * xmm9) + xmm1
vfmadd231ps %xmm15, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm15) + xmm9
vfmadd231ps %xmm15, %xmm0, %xmm9 # xmm9 = (xmm0 * xmm15) + xmm9
vmulps %xmm17, %xmm1, %xmm1
vfnmadd231ps %xmm5, %xmm17, %xmm1 # xmm1 = -(xmm17 * xmm5) + xmm1
vfmadd231ps %xmm4, %xmm15, %xmm1 # xmm1 = (xmm15 * xmm4) + xmm1
vfnmadd231ps %xmm0, %xmm15, %xmm1 # xmm1 = -(xmm15 * xmm0) + xmm1
vxorps %xmm0, %xmm0, %xmm0
vfmadd213ps %xmm3, %xmm11, %xmm0 # xmm0 = (xmm11 * xmm0) + xmm3
vfmadd231ps %xmm15, %xmm10, %xmm0 # xmm0 = (xmm10 * xmm15) + xmm0
vfmadd231ps %xmm15, %xmm2, %xmm0 # xmm0 = (xmm2 * xmm15) + xmm0
vmulps %xmm17, %xmm3, %xmm3
vfnmadd231ps %xmm11, %xmm17, %xmm3 # xmm3 = -(xmm17 * xmm11) + xmm3
vfmadd231ps %xmm10, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm10) + xmm3
vfnmadd231ps %xmm2, %xmm15, %xmm3 # xmm3 = -(xmm15 * xmm2) + xmm3
vshufps $0xc9, %xmm7, %xmm7, %xmm2 # xmm2 = xmm7[1,2,0,3]
vshufps $0xc9, %xmm13, %xmm13, %xmm4 # xmm4 = xmm13[1,2,0,3]
vmulps %xmm4, %xmm7, %xmm4
vfmsub231ps %xmm13, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm13) - xmm4
vshufps $0xc9, %xmm4, %xmm4, %xmm4 # xmm4 = xmm4[1,2,0,3]
vshufps $0xc9, %xmm12, %xmm12, %xmm5 # xmm5 = xmm12[1,2,0,3]
vmulps %xmm5, %xmm7, %xmm5
vfmsub231ps %xmm12, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm12) - xmm5
vshufps $0xc9, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,2,0,3]
vshufps $0xc9, %xmm1, %xmm1, %xmm2 # xmm2 = xmm1[1,2,0,3]
vshufps $0xc9, %xmm0, %xmm0, %xmm10 # xmm10 = xmm0[1,2,0,3]
vmulps %xmm1, %xmm10, %xmm10
vfmsub231ps %xmm0, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm0) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm0 # xmm0 = xmm10[1,2,0,3]
vshufps $0xc9, %xmm3, %xmm3, %xmm10 # xmm10 = xmm3[1,2,0,3]
vmulps %xmm1, %xmm10, %xmm10
vfmsub231ps %xmm3, %xmm2, %xmm10 # xmm10 = (xmm2 * xmm3) - xmm10
vshufps $0xc9, %xmm10, %xmm10, %xmm2 # xmm2 = xmm10[1,2,0,3]
vdpps $0x7f, %xmm4, %xmm4, %xmm3
vmovss %xmm3, %xmm15, %xmm10 # xmm10 = xmm3[0],xmm15[1,2,3]
vrsqrt14ss %xmm10, %xmm15, %xmm11
vmovss 0x24e1a8(%rip), %xmm16 # 0x1eec718
vmulss %xmm16, %xmm11, %xmm12
vmovss 0x24e600(%rip), %xmm17 # 0x1eecb80
vmulss %xmm17, %xmm3, %xmm13
vmulss %xmm11, %xmm13, %xmm13
vmulss %xmm11, %xmm11, %xmm11
vmulss %xmm11, %xmm13, %xmm11
vsubss %xmm11, %xmm12, %xmm11
vbroadcastss %xmm11, %xmm11
vmulps %xmm4, %xmm11, %xmm12
vdpps $0x7f, %xmm5, %xmm4, %xmm13
vbroadcastss %xmm3, %xmm14
vmulps %xmm5, %xmm14, %xmm5
vbroadcastss %xmm13, %xmm13
vmulps %xmm4, %xmm13, %xmm4
vsubps %xmm4, %xmm5, %xmm4
vrcp14ss %xmm10, %xmm15, %xmm5
vmovss 0x252a29(%rip), %xmm18 # 0x1ef0ff8
vfnmadd213ss %xmm18, %xmm5, %xmm3 # xmm3 = -(xmm5 * xmm3) + xmm18
vmulss %xmm3, %xmm5, %xmm3
vdpps $0x7f, %xmm0, %xmm0, %xmm5
vbroadcastss %xmm3, %xmm3
vmulps %xmm3, %xmm4, %xmm3
vmulps %xmm3, %xmm11, %xmm3
vmovss %xmm5, %xmm15, %xmm4 # xmm4 = xmm5[0],xmm15[1,2,3]
vrsqrt14ss %xmm4, %xmm15, %xmm10
vmulss %xmm16, %xmm10, %xmm11
vmulss %xmm17, %xmm5, %xmm13
vmulss %xmm10, %xmm13, %xmm13
vmulss %xmm10, %xmm10, %xmm10
vmulss %xmm10, %xmm13, %xmm10
vsubss %xmm10, %xmm11, %xmm10
vbroadcastss %xmm10, %xmm10
vmulps %xmm0, %xmm10, %xmm11
vdpps $0x7f, %xmm2, %xmm0, %xmm13
vbroadcastss %xmm5, %xmm14
vmulps %xmm2, %xmm14, %xmm2
vbroadcastss %xmm13, %xmm13
vmulps %xmm0, %xmm13, %xmm0
vsubps %xmm0, %xmm2, %xmm0
vrcp14ss %xmm4, %xmm15, %xmm2
vfnmadd213ss %xmm18, %xmm2, %xmm5 # xmm5 = -(xmm2 * xmm5) + xmm18
vmulss %xmm5, %xmm2, %xmm2
vbroadcastss %xmm2, %xmm2
vmulps %xmm2, %xmm0, %xmm0
vmulps %xmm0, %xmm10, %xmm0
vshufps $0xff, %xmm8, %xmm8, %xmm2 # xmm2 = xmm8[3,3,3,3]
vmulps %xmm2, %xmm12, %xmm4
vsubps %xmm4, %xmm8, %xmm13
vshufps $0xff, %xmm7, %xmm7, %xmm5 # xmm5 = xmm7[3,3,3,3]
vmulps %xmm5, %xmm12, %xmm5
vmulps %xmm3, %xmm2, %xmm2
vaddps %xmm2, %xmm5, %xmm2
vsubps %xmm2, %xmm7, %xmm3
vaddps %xmm4, %xmm8, %xmm14
vaddps %xmm2, %xmm7, %xmm2
vshufps $0xff, %xmm9, %xmm9, %xmm4 # xmm4 = xmm9[3,3,3,3]
vmulps %xmm4, %xmm11, %xmm5
vsubps %xmm5, %xmm9, %xmm15
vshufps $0xff, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[3,3,3,3]
vmulps %xmm7, %xmm11, %xmm7
vmulps %xmm0, %xmm4, %xmm0
vaddps %xmm0, %xmm7, %xmm0
vsubps %xmm0, %xmm1, %xmm4
vaddps %xmm5, %xmm9, %xmm16
vaddps %xmm0, %xmm1, %xmm0
vmulps %xmm22, %xmm3, %xmm1
vaddps %xmm1, %xmm13, %xmm17
vmulps %xmm22, %xmm4, %xmm1
vsubps %xmm1, %xmm15, %xmm18
vmulps %xmm22, %xmm2, %xmm1
vaddps %xmm1, %xmm14, %xmm21
vmulps %xmm22, %xmm0, %xmm0
vsubps %xmm0, %xmm16, %xmm22
vsubps %xmm6, %xmm13, %xmm0
vbroadcastss %xmm0, %xmm1
vshufps $0x55, %xmm0, %xmm0, %xmm2 # xmm2 = xmm0[1,1,1,1]
vmovaps %xmm0, 0x330(%rsp)
vshufps $0xaa, %xmm0, %xmm0, %xmm0 # xmm0 = xmm0[2,2,2,2]
vmovaps (%r13), %xmm3
vmovaps 0x10(%r13), %xmm4
vmovaps 0x20(%r13), %xmm5
vmulps %xmm0, %xmm5, %xmm0
vfmadd231ps %xmm2, %xmm4, %xmm0 # xmm0 = (xmm4 * xmm2) + xmm0
vfmadd231ps %xmm1, %xmm3, %xmm0 # xmm0 = (xmm3 * xmm1) + xmm0
vsubps %xmm6, %xmm17, %xmm1
vbroadcastss %xmm1, %xmm2
vshufps $0x55, %xmm1, %xmm1, %xmm7 # xmm7 = xmm1[1,1,1,1]
vmovaps %xmm1, 0x320(%rsp)
vshufps $0xaa, %xmm1, %xmm1, %xmm1 # xmm1 = xmm1[2,2,2,2]
vmulps %xmm1, %xmm5, %xmm1
vfmadd231ps %xmm7, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm7) + xmm1
vfmadd231ps %xmm2, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm2) + xmm1
vsubps %xmm6, %xmm18, %xmm8
vbroadcastss %xmm8, %xmm2
vshufps $0x55, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1,1,1]
vmovaps %xmm8, 0x310(%rsp)
vshufps $0xaa, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[2,2,2,2]
vmulps %xmm5, %xmm8, %xmm8
vfmadd231ps %xmm7, %xmm4, %xmm8 # xmm8 = (xmm4 * xmm7) + xmm8
vfmadd231ps %xmm2, %xmm3, %xmm8 # xmm8 = (xmm3 * xmm2) + xmm8
vsubps %xmm6, %xmm15, %xmm9
vbroadcastss %xmm9, %xmm2
vshufps $0x55, %xmm9, %xmm9, %xmm7 # xmm7 = xmm9[1,1,1,1]
vmovaps %xmm9, 0x300(%rsp)
vshufps $0xaa, %xmm9, %xmm9, %xmm9 # xmm9 = xmm9[2,2,2,2]
vmulps %xmm5, %xmm9, %xmm9
vfmadd231ps %xmm7, %xmm4, %xmm9 # xmm9 = (xmm4 * xmm7) + xmm9
vfmadd231ps %xmm2, %xmm3, %xmm9 # xmm9 = (xmm3 * xmm2) + xmm9
vsubps %xmm6, %xmm14, %xmm10
vbroadcastss %xmm10, %xmm2
vshufps $0x55, %xmm10, %xmm10, %xmm7 # xmm7 = xmm10[1,1,1,1]
vmovaps %xmm10, 0x2f0(%rsp)
vshufps $0xaa, %xmm10, %xmm10, %xmm10 # xmm10 = xmm10[2,2,2,2]
vmulps %xmm5, %xmm10, %xmm10
vfmadd231ps %xmm7, %xmm4, %xmm10 # xmm10 = (xmm4 * xmm7) + xmm10
vfmadd231ps %xmm2, %xmm3, %xmm10 # xmm10 = (xmm3 * xmm2) + xmm10
vsubps %xmm6, %xmm21, %xmm11
vbroadcastss %xmm11, %xmm2
vshufps $0x55, %xmm11, %xmm11, %xmm7 # xmm7 = xmm11[1,1,1,1]
vmovaps %xmm11, 0x2e0(%rsp)
vshufps $0xaa, %xmm11, %xmm11, %xmm11 # xmm11 = xmm11[2,2,2,2]
vmulps %xmm5, %xmm11, %xmm11
vfmadd231ps %xmm7, %xmm4, %xmm11 # xmm11 = (xmm4 * xmm7) + xmm11
vfmadd231ps %xmm2, %xmm3, %xmm11 # xmm11 = (xmm3 * xmm2) + xmm11
vsubps %xmm6, %xmm22, %xmm12
vbroadcastss %xmm12, %xmm2
vshufps $0x55, %xmm12, %xmm12, %xmm7 # xmm7 = xmm12[1,1,1,1]
vmovaps %xmm12, 0x2d0(%rsp)
vshufps $0xaa, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[2,2,2,2]
vmulps %xmm5, %xmm12, %xmm12
vfmadd231ps %xmm7, %xmm4, %xmm12 # xmm12 = (xmm4 * xmm7) + xmm12
vfmadd231ps %xmm2, %xmm3, %xmm12 # xmm12 = (xmm3 * xmm2) + xmm12
vsubps %xmm6, %xmm16, %xmm7
vbroadcastss %xmm7, %xmm2
vshufps $0x55, %xmm7, %xmm7, %xmm6 # xmm6 = xmm7[1,1,1,1]
vmovaps %xmm7, 0x2c0(%rsp)
vshufps $0xaa, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[2,2,2,2]
vmulps %xmm7, %xmm5, %xmm5
vfmadd231ps %xmm6, %xmm4, %xmm5 # xmm5 = (xmm4 * xmm6) + xmm5
vfmadd231ps %xmm2, %xmm3, %xmm5 # xmm5 = (xmm3 * xmm2) + xmm5
vmovlhps %xmm10, %xmm0, %xmm6 # xmm6 = xmm0[0],xmm10[0]
vmovlhps %xmm11, %xmm1, %xmm7 # xmm7 = xmm1[0],xmm11[0]
vmovlhps %xmm12, %xmm8, %xmm23 # xmm23 = xmm8[0],xmm12[0]
vmovlhps %xmm5, %xmm9, %xmm24 # xmm24 = xmm9[0],xmm5[0]
vminps %xmm7, %xmm6, %xmm2
vmaxps %xmm7, %xmm6, %xmm3
vminps %xmm24, %xmm23, %xmm4
vminps %xmm4, %xmm2, %xmm2
vmaxps %xmm24, %xmm23, %xmm4
vmaxps %xmm4, %xmm3, %xmm3
vshufpd $0x3, %xmm2, %xmm2, %xmm4 # xmm4 = xmm2[1,1]
vminps %xmm4, %xmm2, %xmm2
vshufpd $0x3, %xmm3, %xmm3, %xmm4 # xmm4 = xmm3[1,1]
vmaxps %xmm4, %xmm3, %xmm3
vandps %xmm19, %xmm2, %xmm2
vandps %xmm19, %xmm3, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vmovshdup %xmm2, %xmm3 # xmm3 = xmm2[1,1,3,3]
vmaxss %xmm2, %xmm3, %xmm2
leaq 0xff(%r12), %r14
vmulss 0x2535f6(%rip), %xmm2, %xmm2 # 0x1ef1eb8
vmovddup %xmm0, %xmm19 # xmm19 = xmm0[0,0]
vmovddup %xmm1, %xmm25 # xmm25 = xmm1[0,0]
vmovddup %xmm8, %xmm1 # xmm1 = xmm8[0,0]
vmovddup %xmm9, %xmm3 # xmm3 = xmm9[0,0]
vmovddup %xmm10, %xmm4 # xmm4 = xmm10[0,0]
vmovddup %xmm11, %xmm8 # xmm8 = xmm11[0,0]
vmovddup %xmm12, %xmm9 # xmm9 = xmm12[0,0]
vmovddup %xmm5, %xmm10 # xmm10 = xmm5[0,0]
vmovaps %xmm2, 0x160(%rsp)
vbroadcastss %xmm2, %ymm29
vxorps %xmm20, %xmm29, %xmm0
vbroadcastss %xmm0, %ymm28
movl $0x0, 0x5c(%rsp)
xorl %ebx, %ebx
vmovss 0x60(%r9,%r11,4), %xmm0
vmovss %xmm0, 0xbc(%rsp)
vmovaps %xmm6, 0xe0(%rsp)
vsubps %xmm6, %xmm7, %xmm0
vmovaps %xmm0, 0x120(%rsp)
vmovaps %xmm7, 0xd0(%rsp)
vsubps %xmm7, %xmm23, %xmm0
vmovaps %xmm0, 0x110(%rsp)
vmovaps %xmm23, 0x130(%rsp)
vmovaps %xmm24, 0x170(%rsp)
vsubps %xmm23, %xmm24, %xmm0
vmovaps %xmm0, 0x100(%rsp)
vmovaps %xmm13, 0x2b0(%rsp)
vmovaps %xmm14, 0x2a0(%rsp)
vsubps %xmm13, %xmm14, %xmm0
vmovaps %xmm0, 0x230(%rsp)
vmovaps %xmm4, %xmm13
vmovaps %xmm3, %xmm12
vmovaps %xmm17, 0x270(%rsp)
vmovaps %xmm21, 0x250(%rsp)
vsubps %xmm17, %xmm21, %xmm0
vmovaps %xmm0, 0x220(%rsp)
vmovaps %xmm18, 0x260(%rsp)
vmovaps %xmm22, 0x240(%rsp)
vsubps %xmm18, %xmm22, %xmm0
vmovaps %xmm0, 0x210(%rsp)
vmovaps %xmm15, 0x290(%rsp)
vmovaps %xmm16, 0x280(%rsp)
vsubps %xmm15, %xmm16, %xmm0
vmovaps %xmm0, 0x200(%rsp)
vmovaps %xmm1, %xmm16
vpbroadcastd %r8d, %ymm0
vmovdqa %ymm0, 0x380(%rsp)
vmovsd 0x24dce0(%rip), %xmm0 # 0x1eec6f0
vmovaps %xmm0, %xmm15
vmovaps %xmm19, 0x190(%rsp)
vmovaps %xmm25, 0x180(%rsp)
vmovaps %xmm1, 0xa0(%rsp)
vmovaps %xmm3, 0x70(%rsp)
vmovaps %xmm4, 0x60(%rsp)
vmovaps %xmm8, 0x40(%rsp)
vmovaps %xmm9, 0x30(%rsp)
vmovaps %xmm10, 0x20(%rsp)
vmovaps %ymm29, 0x400(%rsp)
vmovaps %ymm28, 0x3e0(%rsp)
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vbroadcastss 0x24dca3(%rip), %ymm30 # 0x1eec714
vsubps %xmm1, %xmm30, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps %xmm19, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm19) + xmm3
vfmadd231ps %xmm25, %xmm2, %xmm4 # xmm4 = (xmm2 * xmm25) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vmovshdup %xmm0, %xmm2 # xmm2 = xmm0[1,1,3,3]
vsubss %xmm0, %xmm2, %xmm6
vmulss 0x282422(%rip), %xmm6, %xmm6 # 0x1f20ed0
vbroadcastss %xmm0, %ymm7
vbroadcastsd %xmm2, %ymm2
vsubps %ymm7, %ymm2, %ymm10
vbroadcastss %xmm3, %ymm2
vbroadcastss 0x273c3a(%rip), %ymm9 # 0x1f12704
vpermps %ymm3, %ymm9, %ymm8
vbroadcastss %xmm4, %ymm20
vpermps %ymm4, %ymm9, %ymm21
vbroadcastss %xmm5, %ymm22
vpermps %ymm5, %ymm9, %ymm23
vbroadcastss %xmm1, %ymm24
vpermps %ymm1, %ymm9, %ymm25
vbroadcastss %xmm6, %ymm9
vpermps %ymm3, %ymm27, %ymm19
vbroadcastss 0x2823d1(%rip), %ymm6 # 0x1f20ed8
vpermps %ymm3, %ymm6, %ymm18
vpermps %ymm4, %ymm27, %ymm15
vpermps %ymm4, %ymm6, %ymm14
vpermps %ymm5, %ymm27, %ymm12
vpermps %ymm5, %ymm6, %ymm11
vpermps %ymm1, %ymm27, %ymm16
vpermps %ymm1, %ymm6, %ymm17
vfmadd132ps 0x2823e8(%rip), %ymm7, %ymm10 # ymm10 = (ymm10 * mem) + ymm7
vsubps %ymm10, %ymm30, %ymm13
vmulps %ymm10, %ymm20, %ymm1
vmulps %ymm10, %ymm21, %ymm3
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm8, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm8) + ymm3
vmulps %ymm10, %ymm22, %ymm2
vmulps %ymm10, %ymm23, %ymm4
vfmadd231ps %ymm20, %ymm13, %ymm2 # ymm2 = (ymm13 * ymm20) + ymm2
vfmadd231ps %ymm21, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm21) + ymm4
vmulps %ymm10, %ymm24, %ymm5
vmulps %ymm10, %ymm25, %ymm6
vfmadd231ps %ymm22, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm22) + ymm5
vfmadd231ps %ymm23, %ymm13, %ymm6 # ymm6 = (ymm13 * ymm23) + ymm6
vmulps %ymm2, %ymm10, %ymm7
vmulps %ymm4, %ymm10, %ymm8
vfmadd231ps %ymm1, %ymm13, %ymm7 # ymm7 = (ymm13 * ymm1) + ymm7
vfmadd231ps %ymm3, %ymm13, %ymm8 # ymm8 = (ymm13 * ymm3) + ymm8
vmulps %ymm5, %ymm10, %ymm1
vmulps %ymm6, %ymm10, %ymm5
vfmadd231ps %ymm2, %ymm13, %ymm1 # ymm1 = (ymm13 * ymm2) + ymm1
vfmadd231ps %ymm4, %ymm13, %ymm5 # ymm5 = (ymm13 * ymm4) + ymm5
vmulps %ymm1, %ymm10, %ymm3
vmulps %ymm5, %ymm10, %ymm4
vfmadd231ps %ymm7, %ymm13, %ymm3 # ymm3 = (ymm13 * ymm7) + ymm3
vfmadd231ps %ymm8, %ymm13, %ymm4 # ymm4 = (ymm13 * ymm8) + ymm4
vsubps %ymm7, %ymm1, %ymm1
vsubps %ymm8, %ymm5, %ymm2
vbroadcastss 0x25241f(%rip), %ymm22 # 0x1ef0fec
vmulps %ymm22, %ymm1, %ymm1
vmulps %ymm22, %ymm2, %ymm2
vmulps %ymm1, %ymm9, %ymm8
vmulps %ymm2, %ymm9, %ymm20
vmovaps %ymm3, %ymm5
vmovaps 0x2c112f(%rip), %ymm23 # 0x1f5fd20
vxorps %xmm24, %xmm24, %xmm24
vpermt2ps %ymm24, %ymm23, %ymm5
vmovaps %ymm4, %ymm6
vpermt2ps %ymm24, %ymm23, %ymm6
vaddps %ymm3, %ymm8, %ymm1
vpermt2ps %ymm24, %ymm23, %ymm8
vaddps %ymm20, %ymm4, %ymm7
vpermt2ps %ymm24, %ymm23, %ymm20
vsubps %ymm8, %ymm5, %ymm2
vsubps %ymm20, %ymm6, %ymm8
vmulps %ymm10, %ymm15, %ymm20
vmulps %ymm10, %ymm14, %ymm21
vfmadd231ps %ymm19, %ymm13, %ymm20 # ymm20 = (ymm13 * ymm19) + ymm20
vfmadd231ps %ymm18, %ymm13, %ymm21 # ymm21 = (ymm13 * ymm18) + ymm21
vmulps %ymm10, %ymm12, %ymm18
vmulps %ymm10, %ymm11, %ymm19
vfmadd231ps %ymm15, %ymm13, %ymm18 # ymm18 = (ymm13 * ymm15) + ymm18
vfmadd231ps %ymm14, %ymm13, %ymm19 # ymm19 = (ymm13 * ymm14) + ymm19
vmulps %ymm10, %ymm16, %ymm14
vmulps %ymm10, %ymm17, %ymm15
vfmadd231ps %ymm12, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm12) + ymm14
vfmadd231ps %ymm11, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm11) + ymm15
vmulps %ymm18, %ymm10, %ymm16
vmulps %ymm19, %ymm10, %ymm17
vfmadd231ps %ymm20, %ymm13, %ymm16 # ymm16 = (ymm13 * ymm20) + ymm16
vfmadd231ps %ymm21, %ymm13, %ymm17 # ymm17 = (ymm13 * ymm21) + ymm17
vmulps %ymm14, %ymm10, %ymm14
vmulps %ymm15, %ymm10, %ymm15
vfmadd231ps %ymm18, %ymm13, %ymm14 # ymm14 = (ymm13 * ymm18) + ymm14
vfmadd231ps %ymm19, %ymm13, %ymm15 # ymm15 = (ymm13 * ymm19) + ymm15
vmulps %ymm14, %ymm10, %ymm11
vmulps %ymm15, %ymm10, %ymm12
vfmadd231ps %ymm16, %ymm13, %ymm11 # ymm11 = (ymm13 * ymm16) + ymm11
vfmadd231ps %ymm13, %ymm17, %ymm12 # ymm12 = (ymm17 * ymm13) + ymm12
vsubps %ymm16, %ymm14, %ymm10
vsubps %ymm17, %ymm15, %ymm13
vmulps %ymm22, %ymm10, %ymm10
vmulps %ymm22, %ymm13, %ymm13
vmulps %ymm10, %ymm9, %ymm10
vmulps %ymm13, %ymm9, %ymm16
vmovaps %ymm11, %ymm13
vpermt2ps %ymm24, %ymm23, %ymm13
vmovaps %ymm12, %ymm14
vpermt2ps %ymm24, %ymm23, %ymm14
vaddps %ymm10, %ymm11, %ymm9
vpermt2ps %ymm24, %ymm23, %ymm10
vaddps %ymm16, %ymm12, %ymm15
vpermt2ps %ymm24, %ymm23, %ymm16
vsubps %ymm10, %ymm13, %ymm10
vsubps %ymm16, %ymm14, %ymm16
vsubps %ymm3, %ymm11, %ymm17
vsubps %ymm4, %ymm12, %ymm18
vsubps %ymm5, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm6, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm19
vfnmadd231ps %ymm18, %ymm3, %ymm19 # ymm19 = -(ymm3 * ymm18) + ymm19
vmulps %ymm17, %ymm7, %ymm20
vfnmadd231ps %ymm18, %ymm1, %ymm20 # ymm20 = -(ymm1 * ymm18) + ymm20
vmulps %ymm17, %ymm8, %ymm21
vfnmadd231ps %ymm18, %ymm2, %ymm21 # ymm21 = -(ymm2 * ymm18) + ymm21
vmulps %ymm17, %ymm6, %ymm22
vfnmadd231ps %ymm18, %ymm5, %ymm22 # ymm22 = -(ymm5 * ymm18) + ymm22
vmulps %ymm17, %ymm12, %ymm23
vfnmadd231ps %ymm18, %ymm11, %ymm23 # ymm23 = -(ymm11 * ymm18) + ymm23
vmulps %ymm17, %ymm15, %ymm24
vfnmadd231ps %ymm18, %ymm9, %ymm24 # ymm24 = -(ymm9 * ymm18) + ymm24
vmulps %ymm17, %ymm16, %ymm25
vfnmadd231ps %ymm18, %ymm10, %ymm25 # ymm25 = -(ymm10 * ymm18) + ymm25
vmulps %ymm17, %ymm14, %ymm17
vfnmadd231ps %ymm18, %ymm13, %ymm17 # ymm17 = -(ymm13 * ymm18) + ymm17
vminps %ymm20, %ymm19, %ymm18
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm22, %ymm21, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm22, %ymm21, %ymm20
vmaxps %ymm20, %ymm19, %ymm19
vminps %ymm24, %ymm23, %ymm20
vmaxps %ymm24, %ymm23, %ymm21
vminps %ymm17, %ymm25, %ymm22
vminps %ymm22, %ymm20, %ymm20
vminps %ymm20, %ymm18, %ymm18
vmaxps %ymm17, %ymm25, %ymm17
vmaxps %ymm17, %ymm21, %ymm17
vmaxps %ymm17, %ymm19, %ymm17
vcmpleps %ymm29, %ymm18, %k1
vcmpnltps %ymm28, %ymm17, %k0 {%k1}
kmovd %k0, %eax
movl $0x0, %ecx
andb $0x7f, %al
je 0x1c9eed9
vsubps %ymm3, %ymm5, %ymm17
vsubps %ymm4, %ymm6, %ymm18
vsubps %ymm11, %ymm13, %ymm19
vaddps %ymm19, %ymm17, %ymm17
vsubps %ymm12, %ymm14, %ymm19
vaddps %ymm19, %ymm18, %ymm18
vmulps %ymm17, %ymm4, %ymm4
vfnmadd231ps %ymm3, %ymm18, %ymm4 # ymm4 = -(ymm18 * ymm3) + ymm4
vmulps %ymm17, %ymm7, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm1 # ymm1 = -(ymm18 * ymm1) + ymm3
vmulps %ymm17, %ymm8, %ymm3
vfnmadd213ps %ymm3, %ymm18, %ymm2 # ymm2 = -(ymm18 * ymm2) + ymm3
vmulps %ymm17, %ymm6, %ymm3
vfnmadd231ps %ymm5, %ymm18, %ymm3 # ymm3 = -(ymm18 * ymm5) + ymm3
vmulps %ymm17, %ymm12, %ymm5
vfnmadd231ps %ymm11, %ymm18, %ymm5 # ymm5 = -(ymm18 * ymm11) + ymm5
vmulps %ymm17, %ymm15, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm9 # ymm9 = -(ymm18 * ymm9) + ymm6
vmulps %ymm17, %ymm16, %ymm6
vfnmadd213ps %ymm6, %ymm18, %ymm10 # ymm10 = -(ymm18 * ymm10) + ymm6
vmulps %ymm17, %ymm14, %ymm6
vfnmadd231ps %ymm18, %ymm13, %ymm6 # ymm6 = -(ymm13 * ymm18) + ymm6
vminps %ymm1, %ymm4, %ymm7
vmaxps %ymm1, %ymm4, %ymm1
vminps %ymm3, %ymm2, %ymm4
vminps %ymm4, %ymm7, %ymm4
vmaxps %ymm3, %ymm2, %ymm2
vmaxps %ymm2, %ymm1, %ymm1
vminps %ymm9, %ymm5, %ymm2
vmaxps %ymm9, %ymm5, %ymm3
vminps %ymm6, %ymm10, %ymm5
vminps %ymm5, %ymm2, %ymm2
vminps %ymm2, %ymm4, %ymm2
vmaxps %ymm6, %ymm10, %ymm4
vmaxps %ymm4, %ymm3, %ymm3
vmaxps %ymm3, %ymm1, %ymm1
vcmpnltps %ymm28, %ymm1, %k1
vcmpleps %ymm29, %ymm2, %k0 {%k1}
kmovd %k0, %ecx
andb %cl, %al
movzbl %al, %ecx
testl %ecx, %ecx
je 0x1c9eefc
movl %ebx, %eax
movl %ecx, 0x1a0(%rsp,%rax,4)
vmovlps %xmm0, 0x340(%rsp,%rax,8)
vmovlps %xmm26, 0x420(%rsp,%rax,8)
incl %ebx
vbroadcastss 0x2520e6(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x281fb4(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x281fa6(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x2520c8(%rip), %ymm18 # 0x1ef0fec
vmovss 0x281fb2(%rip), %xmm21 # 0x1f20ee0
vmovss 0x24d7dc(%rip), %xmm22 # 0x1eec714
vmovss 0x2520be(%rip), %xmm23 # 0x1ef1000
vmovss 0x252b00(%rip), %xmm24 # 0x1ef1a4c
vbroadcastss 0x24d7be(%rip), %xmm25 # 0x1eec714
vmovaps 0xa0(%rsp), %xmm16
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x120(%rsp), %xmm28
vmovaps 0x110(%rsp), %xmm29
vmovaps 0x100(%rsp), %xmm30
testl %ebx, %ebx
je 0x1ca0085
leal -0x1(%rbx), %eax
vmovss 0x340(%rsp,%rax,8), %xmm0
vmovss 0x344(%rsp,%rax,8), %xmm1
movl 0x1a0(%rsp,%rax,4), %ecx
vmovsd 0x420(%rsp,%rax,8), %xmm15
tzcntq %rcx, %rdx
blsrl %ecx, %ecx
movl %ecx, 0x1a0(%rsp,%rax,4)
cmovel %eax, %ebx
vcvtsi2ss %rdx, %xmm27, %xmm2
vmulss %xmm21, %xmm2, %xmm2
incq %rdx
vcvtsi2ss %rdx, %xmm27, %xmm3
vmulss %xmm21, %xmm3, %xmm3
vsubss %xmm2, %xmm22, %xmm4
vmulss %xmm2, %xmm1, %xmm26
vfmadd231ss %xmm4, %xmm0, %xmm26 # xmm26 = (xmm0 * xmm4) + xmm26
vsubss %xmm3, %xmm22, %xmm2
vmulss %xmm3, %xmm1, %xmm14
vfmadd231ss %xmm2, %xmm0, %xmm14 # xmm14 = (xmm0 * xmm2) + xmm14
vsubss %xmm26, %xmm14, %xmm0
vucomiss %xmm0, %xmm23
jbe 0x1ca0059
vmovaps %xmm26, %xmm6
vmovaps %xmm15, %xmm26
vshufps $0x50, %xmm15, %xmm15, %xmm1 # xmm1 = xmm15[0,0,1,1]
vucomiss %xmm0, %xmm24
seta %cl
cmpl $0x4, %ebx
setae %al
vsubps %xmm1, %xmm25, %xmm2
vmulps %xmm1, %xmm13, %xmm3
vmulps %xmm1, %xmm8, %xmm4
vmulps %xmm1, %xmm9, %xmm5
vmulps %xmm1, %xmm10, %xmm1
vfmadd231ps 0x190(%rsp), %xmm2, %xmm3 # xmm3 = (xmm2 * mem) + xmm3
vfmadd231ps 0x180(%rsp), %xmm2, %xmm4 # xmm4 = (xmm2 * mem) + xmm4
vfmadd231ps %xmm16, %xmm2, %xmm5 # xmm5 = (xmm2 * xmm16) + xmm5
vfmadd231ps %xmm2, %xmm12, %xmm1 # xmm1 = (xmm12 * xmm2) + xmm1
vinsertf128 $0x1, %xmm3, %ymm3, %ymm2
vinsertf128 $0x1, %xmm4, %ymm4, %ymm3
vinsertf128 $0x1, %xmm5, %ymm5, %ymm4
vmovaps %xmm6, 0xc0(%rsp)
vbroadcastss %xmm6, %xmm6
vmovaps %xmm14, 0x80(%rsp)
vbroadcastss %xmm14, %xmm7
vinsertf128 $0x1, %xmm7, %ymm6, %ymm6
vsubps %ymm2, %ymm3, %ymm7
vfmadd213ps %ymm2, %ymm6, %ymm7 # ymm7 = (ymm6 * ymm7) + ymm2
vsubps %ymm3, %ymm4, %ymm2
vfmadd213ps %ymm3, %ymm6, %ymm2 # ymm2 = (ymm6 * ymm2) + ymm3
vsubps %xmm5, %xmm1, %xmm1
vinsertf128 $0x1, %xmm1, %ymm1, %ymm3
vfmadd213ps %ymm4, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm4
vsubps %ymm7, %ymm2, %ymm1
vfmadd213ps %ymm7, %ymm6, %ymm1 # ymm1 = (ymm6 * ymm1) + ymm7
vsubps %ymm2, %ymm3, %ymm3
vfmadd213ps %ymm2, %ymm6, %ymm3 # ymm3 = (ymm6 * ymm3) + ymm2
vsubps %ymm1, %ymm3, %ymm2
vfmadd231ps %ymm6, %ymm2, %ymm1 # ymm1 = (ymm2 * ymm6) + ymm1
vmulps %ymm18, %ymm2, %ymm3
vextractf128 $0x1, %ymm1, %xmm2
vextractf128 $0x1, %ymm3, %xmm4
vmulss 0x252db9(%rip), %xmm0, %xmm5 # 0x1ef1ebc
vbroadcastss %xmm5, %xmm6
vmulps %xmm3, %xmm6, %xmm3
vaddps %xmm3, %xmm1, %xmm5
vmulps %xmm4, %xmm6, %xmm3
vsubps %xmm3, %xmm2, %xmm6
vshufpd $0x3, %xmm1, %xmm1, %xmm4 # xmm4 = xmm1[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,1]
vsubps %xmm1, %xmm4, %xmm7
vsubps %xmm2, %xmm3, %xmm8
vaddps %xmm7, %xmm8, %xmm7
vshufps $0xb1, %xmm1, %xmm1, %xmm8 # xmm8 = xmm1[1,0,3,2]
vshufps $0xb1, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,0,3,2]
vshufps $0xb1, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,0,3,2]
vshufps $0xb1, %xmm2, %xmm2, %xmm11 # xmm11 = xmm2[1,0,3,2]
vbroadcastss %xmm7, %xmm12
vshufps $0x55, %xmm7, %xmm7, %xmm7 # xmm7 = xmm7[1,1,1,1]
vmulps %xmm7, %xmm8, %xmm8
vmulps %xmm7, %xmm9, %xmm9
vmulps %xmm7, %xmm10, %xmm10
vmulps %xmm7, %xmm11, %xmm7
vfmadd231ps %xmm1, %xmm12, %xmm8 # xmm8 = (xmm12 * xmm1) + xmm8
vfmadd231ps %xmm5, %xmm12, %xmm9 # xmm9 = (xmm12 * xmm5) + xmm9
vfmadd231ps %xmm6, %xmm12, %xmm10 # xmm10 = (xmm12 * xmm6) + xmm10
vfmadd231ps %xmm12, %xmm2, %xmm7 # xmm7 = (xmm2 * xmm12) + xmm7
vshufpd $0x1, %xmm8, %xmm8, %xmm11 # xmm11 = xmm8[1,0]
vshufpd $0x1, %xmm9, %xmm9, %xmm12 # xmm12 = xmm9[1,0]
vshufpd $0x1, %xmm10, %xmm10, %xmm14 # xmm14 = xmm10[1,0]
vshufpd $0x1, %xmm7, %xmm7, %xmm15 # xmm15 = xmm7[1,0]
vminss %xmm9, %xmm8, %xmm13
vmaxss %xmm8, %xmm9, %xmm8
vminss %xmm7, %xmm10, %xmm9
vmaxss %xmm10, %xmm7, %xmm7
vminss %xmm9, %xmm13, %xmm13
vmaxss %xmm8, %xmm7, %xmm9
vminss %xmm12, %xmm11, %xmm7
vmaxss %xmm11, %xmm12, %xmm8
vminss %xmm15, %xmm14, %xmm10
vmaxss %xmm14, %xmm15, %xmm11
vminss %xmm10, %xmm7, %xmm14
vmaxss %xmm8, %xmm11, %xmm10
vmovss 0x25180e(%rip), %xmm7 # 0x1ef09d8
vucomiss %xmm13, %xmm7
jbe 0x1c9f1df
vmovss 0x252ce7(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm10
ja 0x1c9f240
vmovss 0x252cd9(%rip), %xmm7 # 0x1ef1ec0
vucomiss %xmm7, %xmm9
seta %dl
vmovss 0x2517e2(%rip), %xmm7 # 0x1ef09d8
vcmpltps %xmm7, %xmm14, %k0
vcmpltps %xmm7, %xmm13, %k1
korw %k0, %k1, %k0
kmovd %k0, %esi
testb %sil, %dl
jne 0x1c9f240
vmovss 0x252ca7(%rip), %xmm7 # 0x1ef1ec0
vcmpnltps %xmm10, %xmm7, %k0
vmovss 0x2517b0(%rip), %xmm7 # 0x1ef09d8
vcmpnltps %xmm7, %xmm14, %k1
korw %k0, %k1, %k0
kmovd %k0, %edx
testb $0x1, %dl
jne 0x1ca0006
vcmpltss %xmm31, %xmm13, %k1
vmovaps %xmm22, %xmm15
vmovss 0x251775(%rip), %xmm16 # 0x1ef09cc
vmovss %xmm16, %xmm15, %xmm15 {%k1}
vcmpltss %xmm31, %xmm9, %k1
vmovaps %xmm22, %xmm12
vmovss %xmm16, %xmm12, %xmm12 {%k1}
vucomiss %xmm12, %xmm15
setp %dl
setne %sil
orb %dl, %sil
kmovd %esi, %k1
vmovss 0x24c795(%rip), %xmm7 # 0x1eeba20
vmovss %xmm31, %xmm7, %xmm7 {%k1}
vmovss 0x24d8eb(%rip), %xmm8 # 0x1eecb84
vmovss %xmm31, %xmm8, %xmm8 {%k1}
vcmpltss %xmm31, %xmm14, %k1
vmovaps %xmm22, %xmm11
vmovss %xmm16, %xmm11, %xmm11 {%k1}
vucomiss %xmm11, %xmm15
jne 0x1c9f2bb
jnp 0x1c9f2fa
vucomiss %xmm13, %xmm14
jne 0x1c9f30d
jp 0x1c9f30d
vucomiss %xmm31, %xmm13
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x24c740(%rip), %xmm13 # 0x1eeba20
vmovss %xmm31, %xmm13, %xmm13 {%k1}
vmovss 0x24d896(%rip), %xmm14 # 0x1eecb84
vmovss 0x24d41c(%rip), %xmm14 {%k1} # 0x1eec714
jmp 0x1c9f32e
vmovaps 0xe0(%rsp), %xmm15
vmovaps 0xd0(%rsp), %xmm16
jmp 0x1c9f349
vxorps %xmm20, %xmm13, %xmm15
vsubss %xmm13, %xmm14, %xmm13
vdivss %xmm13, %xmm15, %xmm14
vsubss %xmm14, %xmm22, %xmm13
vfmadd213ss %xmm14, %xmm31, %xmm13 # xmm13 = (xmm31 * xmm13) + xmm14
vmovaps %xmm13, %xmm14
vmovaps 0xe0(%rsp), %xmm15
vmovaps 0xd0(%rsp), %xmm16
vminss %xmm13, %xmm7, %xmm7
vmaxss %xmm8, %xmm14, %xmm8
vcmpltss %xmm31, %xmm10, %k1
vmovaps %xmm22, %xmm13
vmovss 0x25166c(%rip), %xmm13 {%k1} # 0x1ef09cc
vucomiss %xmm13, %xmm12
vmovaps 0x80(%rsp), %xmm14
jne 0x1c9f372
jnp 0x1c9f3dc
vucomiss %xmm9, %xmm10
jne 0x1c9f3b1
jp 0x1c9f3b1
vucomiss %xmm31, %xmm9
setnp %dl
sete %sil
andb %dl, %sil
kmovd %esi, %k1
vmovss 0x24c689(%rip), %xmm9 # 0x1eeba20
vmovss %xmm31, %xmm9, %xmm9 {%k1}
vmovss 0x24d7df(%rip), %xmm10 # 0x1eecb84
vmovss 0x24d365(%rip), %xmm10 {%k1} # 0x1eec714
jmp 0x1c9f3d2
vxorps %xmm20, %xmm9, %xmm12
vsubss %xmm9, %xmm10, %xmm9
vdivss %xmm9, %xmm12, %xmm10
vsubss %xmm10, %xmm22, %xmm9
vfmadd213ss %xmm10, %xmm31, %xmm9 # xmm9 = (xmm31 * xmm9) + xmm10
vmovaps %xmm9, %xmm10
vminss %xmm9, %xmm7, %xmm7
vmaxss %xmm8, %xmm10, %xmm8
vucomiss %xmm13, %xmm11
setp %dl
setne %sil
orb %dl, %sil
vminss %xmm22, %xmm7, %xmm9
kmovd %esi, %k1
vmovss %xmm9, %xmm7, %xmm7 {%k1}
vmaxss %xmm8, %xmm22, %xmm9
vmovss %xmm9, %xmm8, %xmm8 {%k1}
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
movb $0x1, %r15b
vucomiss %xmm8, %xmm7
ja 0x1c9ff9b
vaddss 0x2be03f(%rip), %xmm7, %xmm7 # 0x1f5d468
vaddss 0x24d76b(%rip), %xmm8, %xmm8 # 0x1eecb9c
vmaxss %xmm7, %xmm31, %xmm7
vminss %xmm22, %xmm8, %xmm8
vmovddup %xmm1, %xmm1 # xmm1 = xmm1[0,0]
vmovddup %xmm5, %xmm9 # xmm9 = xmm5[0,0]
vmovddup %xmm6, %xmm10 # xmm10 = xmm6[0,0]
vmovddup %xmm2, %xmm2 # xmm2 = xmm2[0,0]
vshufpd $0x3, %xmm5, %xmm5, %xmm5 # xmm5 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm6 # xmm6 = xmm6[1,1]
vshufps $0x0, %xmm8, %xmm7, %xmm11 # xmm11 = xmm7[0,0],xmm8[0,0]
vsubps %xmm11, %xmm25, %xmm12
vmulps %xmm4, %xmm11, %xmm13
vmulps %xmm5, %xmm11, %xmm5
vmulps %xmm6, %xmm11, %xmm6
vmulps %xmm3, %xmm11, %xmm3
vfmadd231ps %xmm1, %xmm12, %xmm13 # xmm13 = (xmm12 * xmm1) + xmm13
vfmadd231ps %xmm9, %xmm12, %xmm5 # xmm5 = (xmm12 * xmm9) + xmm5
vfmadd231ps %xmm10, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm10) + xmm6
vfmadd231ps %xmm2, %xmm12, %xmm3 # xmm3 = (xmm12 * xmm2) + xmm3
vsubss %xmm7, %xmm22, %xmm2
vmovshdup %xmm26, %xmm4 # xmm4 = xmm26[1,1,3,3]
vmulss %xmm7, %xmm4, %xmm1
vfmadd231ss %xmm2, %xmm26, %xmm1 # xmm1 = (xmm26 * xmm2) + xmm1
vsubss %xmm8, %xmm22, %xmm2
vmulss %xmm4, %xmm8, %xmm4
vfmadd231ss %xmm2, %xmm26, %xmm4 # xmm4 = (xmm26 * xmm2) + xmm4
vdivss %xmm0, %xmm22, %xmm0
vsubps %xmm13, %xmm5, %xmm2
vmulps %xmm17, %xmm2, %xmm2
vsubps %xmm5, %xmm6, %xmm7
vmulps %xmm17, %xmm7, %xmm7
vsubps %xmm6, %xmm3, %xmm8
vmulps %xmm17, %xmm8, %xmm8
vminps %xmm8, %xmm7, %xmm9
vmaxps %xmm8, %xmm7, %xmm7
vminps %xmm9, %xmm2, %xmm8
vmaxps %xmm7, %xmm2, %xmm2
vshufpd $0x3, %xmm8, %xmm8, %xmm7 # xmm7 = xmm8[1,1]
vshufpd $0x3, %xmm2, %xmm2, %xmm9 # xmm9 = xmm2[1,1]
vminps %xmm7, %xmm8, %xmm7
vmaxps %xmm9, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm7, %xmm0, %xmm8
vmulps %xmm2, %xmm0, %xmm7
vsubss %xmm1, %xmm4, %xmm0
vdivss %xmm0, %xmm22, %xmm0
vshufpd $0x3, %xmm13, %xmm13, %xmm2 # xmm2 = xmm13[1,1]
vshufpd $0x3, %xmm5, %xmm5, %xmm9 # xmm9 = xmm5[1,1]
vshufpd $0x3, %xmm6, %xmm6, %xmm10 # xmm10 = xmm6[1,1]
vshufpd $0x3, %xmm3, %xmm3, %xmm11 # xmm11 = xmm3[1,1]
vsubps %xmm13, %xmm2, %xmm2
vsubps %xmm5, %xmm9, %xmm5
vsubps %xmm6, %xmm10, %xmm6
vsubps %xmm3, %xmm11, %xmm3
vminps %xmm5, %xmm2, %xmm9
vmaxps %xmm5, %xmm2, %xmm2
vminps %xmm3, %xmm6, %xmm5
vminps %xmm5, %xmm9, %xmm5
vmaxps %xmm3, %xmm6, %xmm3
vmaxps %xmm3, %xmm2, %xmm2
vbroadcastss %xmm0, %xmm0
vmulps %xmm5, %xmm0, %xmm10
vmulps %xmm2, %xmm0, %xmm11
vmovaps 0xc0(%rsp), %xmm26
vinsertps $0x10, %xmm1, %xmm26, %xmm6 # xmm6 = xmm26[0],xmm1[0],xmm26[2,3]
vinsertps $0x10, %xmm4, %xmm14, %xmm5 # xmm5 = xmm14[0],xmm4[0],xmm14[2,3]
vaddps %xmm5, %xmm6, %xmm0
vmulps 0x24d602(%rip){1to4}, %xmm0, %xmm9 # 0x1eecb80
vshufps $0x54, %xmm9, %xmm9, %xmm0 # xmm0 = xmm9[0,1,1,1]
vbroadcastss %xmm9, %xmm2
vmovaps %xmm28, %xmm3
vfmadd213ps %xmm15, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm15
vmovaps %xmm29, %xmm12
vfmadd213ps %xmm16, %xmm2, %xmm12 # xmm12 = (xmm2 * xmm12) + xmm16
vmovaps %xmm30, %xmm13
vfmadd213ps 0x130(%rsp), %xmm2, %xmm13 # xmm13 = (xmm2 * xmm13) + mem
vsubps %xmm3, %xmm12, %xmm14
vfmadd213ps %xmm3, %xmm2, %xmm14 # xmm14 = (xmm2 * xmm14) + xmm3
vsubps %xmm12, %xmm13, %xmm3
vfmadd213ps %xmm12, %xmm2, %xmm3 # xmm3 = (xmm2 * xmm3) + xmm12
vsubps %xmm14, %xmm3, %xmm3
vfmadd231ps %xmm2, %xmm3, %xmm14 # xmm14 = (xmm3 * xmm2) + xmm14
vmulps %xmm17, %xmm3, %xmm2
vmovddup %xmm14, %xmm12 # xmm12 = xmm14[0,0]
vshufpd $0x3, %xmm14, %xmm14, %xmm3 # xmm3 = xmm14[1,1]
vshufps $0x55, %xmm9, %xmm9, %xmm13 # xmm13 = xmm9[1,1,1,1]
vsubps %xmm12, %xmm3, %xmm3
vfmadd231ps %xmm3, %xmm13, %xmm12 # xmm12 = (xmm13 * xmm3) + xmm12
vmovddup %xmm2, %xmm14 # xmm14 = xmm2[0,0]
vshufpd $0x3, %xmm2, %xmm2, %xmm2 # xmm2 = xmm2[1,1]
vsubps %xmm14, %xmm2, %xmm15
vfmadd213ps %xmm14, %xmm13, %xmm15 # xmm15 = (xmm13 * xmm15) + xmm14
vbroadcastss 0x2818b6(%rip), %xmm14 # 0x1f20ec0
vxorps %xmm3, %xmm14, %xmm2
vmovshdup %xmm15, %xmm13 # xmm13 = xmm15[1,1,3,3]
vxorps %xmm14, %xmm13, %xmm14
vmovshdup %xmm3, %xmm16 # xmm16 = xmm3[1,1,3,3]
vmovss 0x2c06d4(%rip), %xmm31 # 0x1f5fcfc
vpermt2ps %xmm3, %xmm31, %xmm14
vmulss %xmm3, %xmm13, %xmm3
vfmsub231ss %xmm16, %xmm15, %xmm3 # xmm3 = (xmm15 * xmm16) - xmm3
vmovss 0x2bb334(%rip), %xmm13 # 0x1f5a974
vpermt2ps %xmm2, %xmm13, %xmm15
vbroadcastss %xmm3, %xmm3
vdivps %xmm3, %xmm14, %xmm2
vdivps %xmm3, %xmm15, %xmm3
vbroadcastss %xmm12, %xmm13
vmulps %xmm2, %xmm13, %xmm13
vshufps $0x55, %xmm12, %xmm12, %xmm12 # xmm12 = xmm12[1,1,1,1]
vmulps %xmm3, %xmm12, %xmm12
vaddps %xmm12, %xmm13, %xmm12
vsubps %xmm12, %xmm0, %xmm0
vmovshdup %xmm2, %xmm12 # xmm12 = xmm2[1,1,3,3]
vinsertps $0x1c, %xmm10, %xmm8, %xmm13 # xmm13 = xmm8[0],xmm10[0],zero,zero
vmulps %xmm13, %xmm12, %xmm14
vinsertps $0x1c, %xmm11, %xmm7, %xmm15 # xmm15 = xmm7[0],xmm11[0],zero,zero
vmulps %xmm15, %xmm12, %xmm12
vminps %xmm12, %xmm14, %xmm16
vmaxps %xmm14, %xmm12, %xmm12
vmovshdup %xmm3, %xmm14 # xmm14 = xmm3[1,1,3,3]
vinsertps $0x4c, %xmm8, %xmm10, %xmm8 # xmm8 = xmm8[1],xmm10[1],zero,zero
vmulps %xmm8, %xmm14, %xmm10
vinsertps $0x4c, %xmm7, %xmm11, %xmm7 # xmm7 = xmm7[1],xmm11[1],zero,zero
vmulps %xmm7, %xmm14, %xmm11
vminps %xmm11, %xmm10, %xmm14
vaddps %xmm14, %xmm16, %xmm14
vmaxps %xmm10, %xmm11, %xmm10
vaddps %xmm10, %xmm12, %xmm10
vmovddup 0x281825(%rip), %xmm11 # xmm11 = mem[0,0]
vsubps %xmm10, %xmm11, %xmm10
vsubps %xmm14, %xmm11, %xmm11
vsubps %xmm9, %xmm6, %xmm12
vsubps %xmm9, %xmm5, %xmm9
vmulps %xmm10, %xmm12, %xmm14
vbroadcastss %xmm2, %xmm16
vmulps %xmm13, %xmm16, %xmm13
vmulps %xmm15, %xmm16, %xmm15
vminps %xmm15, %xmm13, %xmm16
vmaxps %xmm13, %xmm15, %xmm13
vbroadcastss %xmm3, %xmm15
vmulps %xmm8, %xmm15, %xmm8
vmulps %xmm7, %xmm15, %xmm7
vminps %xmm7, %xmm8, %xmm15
vaddps %xmm15, %xmm16, %xmm15
vmulps %xmm11, %xmm12, %xmm16
vmulps %xmm10, %xmm9, %xmm10
vmulps %xmm11, %xmm9, %xmm11
vmaxps %xmm8, %xmm7, %xmm7
vaddps %xmm7, %xmm13, %xmm7
vmovddup 0x2817be(%rip), %xmm8 # xmm8 = mem[0,0]
vsubps %xmm7, %xmm8, %xmm7
vsubps %xmm15, %xmm8, %xmm8
vmulps %xmm7, %xmm12, %xmm13
vmulps %xmm8, %xmm12, %xmm12
vmulps %xmm7, %xmm9, %xmm7
vmulps %xmm8, %xmm9, %xmm8
vminps %xmm12, %xmm13, %xmm9
vminps %xmm8, %xmm7, %xmm15
vminps %xmm15, %xmm9, %xmm9
vmaxps %xmm13, %xmm12, %xmm12
vmaxps %xmm7, %xmm8, %xmm7
vmaxps %xmm12, %xmm7, %xmm7
vminps %xmm16, %xmm14, %xmm8
vminps %xmm11, %xmm10, %xmm12
vminps %xmm12, %xmm8, %xmm8
vhaddps %xmm8, %xmm9, %xmm8
vmaxps %xmm14, %xmm16, %xmm9
vmaxps %xmm10, %xmm11, %xmm10
vmaxps %xmm9, %xmm10, %xmm9
vhaddps %xmm9, %xmm7, %xmm7
vshufps $0xe8, %xmm8, %xmm8, %xmm8 # xmm8 = xmm8[0,2,2,3]
vshufps $0xe8, %xmm7, %xmm7, %xmm9 # xmm9 = xmm7[0,2,2,3]
vaddps %xmm0, %xmm8, %xmm7
vaddps %xmm0, %xmm9, %xmm8
vmaxps %xmm7, %xmm6, %xmm6
vminps %xmm5, %xmm8, %xmm9
vcmpltps %xmm6, %xmm9, %k0
vinsertps $0x10, %xmm4, %xmm1, %xmm15 # xmm15 = xmm1[0],xmm4[0],xmm1[2,3]
kmovd %k0, %edx
testb $0x3, %dl
jne 0x1c9ff8b
vucomiss %xmm8, %xmm5
seta %sil
xorl %edx, %edx
vucomiss %xmm26, %xmm7
vmovaps 0xa0(%rsp), %xmm16
jbe 0x1c9f84d
testb %sil, %sil
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x2517ef(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
je 0x1c9f888
vcmpltps %xmm5, %xmm8, %k0
kshiftrb $0x1, %k0, %k0
kmovd %k0, %esi
vmovshdup %xmm7, %xmm4 # xmm4 = xmm7[1,1,3,3]
vucomiss %xmm1, %xmm4
seta %dl
andb %sil, %dl
jmp 0x1c9f888
vxorps %xmm31, %xmm31, %xmm31
vmovss 0x251791(%rip), %xmm11 # 0x1ef0fec
vmovaps 0xe0(%rsp), %xmm9
vmovaps 0xd0(%rsp), %xmm10
vmovaps 0x130(%rsp), %xmm12
vmovaps 0x170(%rsp), %xmm13
vmovaps 0x160(%rsp), %xmm14
orb %cl, %al
orb %dl, %al
cmpb $0x1, %al
jne 0x1c9ffd1
movl $0xc8, %eax
vsubss %xmm0, %xmm22, %xmm1
vmulss %xmm1, %xmm1, %xmm4
vmulss %xmm4, %xmm1, %xmm5
vmulss %xmm0, %xmm11, %xmm6
vmulss %xmm4, %xmm6, %xmm4
vmulss %xmm0, %xmm0, %xmm6
vmulss %xmm6, %xmm11, %xmm7
vmulss %xmm7, %xmm1, %xmm1
vbroadcastss %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vbroadcastss %xmm1, %xmm1
vmulss %xmm6, %xmm0, %xmm6
vbroadcastss %xmm6, %xmm6
vmulps %xmm6, %xmm13, %xmm6
vfmadd231ps %xmm1, %xmm12, %xmm6 # xmm6 = (xmm12 * xmm1) + xmm6
vfmadd231ps %xmm4, %xmm10, %xmm6 # xmm6 = (xmm10 * xmm4) + xmm6
vfmadd231ps %xmm5, %xmm9, %xmm6 # xmm6 = (xmm9 * xmm5) + xmm6
vmovddup %xmm6, %xmm1 # xmm1 = xmm6[0,0]
vshufpd $0x3, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,1]
vshufps $0x55, %xmm0, %xmm0, %xmm5 # xmm5 = xmm0[1,1,1,1]
vsubps %xmm1, %xmm4, %xmm4
vfmadd213ps %xmm1, %xmm5, %xmm4 # xmm4 = (xmm5 * xmm4) + xmm1
vbroadcastss %xmm4, %xmm1
vmulps %xmm1, %xmm2, %xmm1
vshufps $0x55, %xmm4, %xmm4, %xmm5 # xmm5 = xmm4[1,1,1,1]
vmulps %xmm5, %xmm3, %xmm5
vaddps %xmm5, %xmm1, %xmm1
vsubps %xmm1, %xmm0, %xmm0
vandps %xmm19, %xmm4, %xmm1
vprolq $0x20, %xmm1, %xmm4
vmaxss %xmm1, %xmm4, %xmm1
vucomiss %xmm1, %xmm14
ja 0x1c9f93c
decq %rax
jne 0x1c9f899
jmp 0x1c9ffd4
vucomiss %xmm31, %xmm0
jb 0x1c9ffd4
vucomiss %xmm0, %xmm22
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x80(%rsp), %xmm14
jb 0x1c9fa6f
vmovshdup %xmm0, %xmm1 # xmm1 = xmm0[1,1,3,3]
vucomiss %xmm31, %xmm1
jb 0x1c9fa6f
vucomiss %xmm1, %xmm22
jb 0x1c9fa6f
vmovss 0x8(%r13), %xmm2
vinsertps $0x1c, 0x18(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0],mem[0],zero,zero
vinsertps $0x28, 0x28(%r13), %xmm2, %xmm2 # xmm2 = xmm2[0,1],mem[0],zero
vdpps $0x7f, 0x330(%rsp), %xmm2, %xmm3
vdpps $0x7f, 0x320(%rsp), %xmm2, %xmm4
vdpps $0x7f, 0x310(%rsp), %xmm2, %xmm5
vdpps $0x7f, 0x300(%rsp), %xmm2, %xmm6
vdpps $0x7f, 0x2f0(%rsp), %xmm2, %xmm7
vdpps $0x7f, 0x2e0(%rsp), %xmm2, %xmm8
vdpps $0x7f, 0x2d0(%rsp), %xmm2, %xmm9
vdpps $0x7f, 0x2c0(%rsp), %xmm2, %xmm2
vsubss %xmm1, %xmm22, %xmm10
vmulss %xmm7, %xmm1, %xmm7
vmulss %xmm1, %xmm8, %xmm8
vmulss %xmm1, %xmm9, %xmm9
vmulss %xmm2, %xmm1, %xmm1
vfmadd231ss %xmm3, %xmm10, %xmm7 # xmm7 = (xmm10 * xmm3) + xmm7
vfmadd231ss %xmm4, %xmm10, %xmm8 # xmm8 = (xmm10 * xmm4) + xmm8
vfmadd231ss %xmm5, %xmm10, %xmm9 # xmm9 = (xmm10 * xmm5) + xmm9
vfmadd231ss %xmm6, %xmm10, %xmm1 # xmm1 = (xmm10 * xmm6) + xmm1
vsubss %xmm0, %xmm22, %xmm6
vmulss %xmm6, %xmm6, %xmm3
vmulss %xmm3, %xmm6, %xmm2
vmulss %xmm0, %xmm11, %xmm4
vmulss %xmm3, %xmm4, %xmm3
vmulps %xmm0, %xmm0, %xmm5
vmulss %xmm5, %xmm11, %xmm4
vmulss %xmm4, %xmm6, %xmm4
vmulps %xmm5, %xmm0, %xmm5
vmulss %xmm1, %xmm5, %xmm1
vfmadd231ss %xmm9, %xmm4, %xmm1 # xmm1 = (xmm4 * xmm9) + xmm1
vfmadd231ss %xmm8, %xmm3, %xmm1 # xmm1 = (xmm3 * xmm8) + xmm1
vfmadd231ss %xmm7, %xmm2, %xmm1 # xmm1 = (xmm2 * xmm7) + xmm1
vucomiss 0xbc(%rsp), %xmm1
jb 0x1c9fa6f
vmovss 0x100(%r9,%r11,4), %xmm7
vucomiss %xmm1, %xmm7
jae 0x1c9fa86
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
jmp 0x1c9fffb
vmovss %xmm7, 0xb8(%rsp)
movq %r12, %r13
movq %r14, %r12
vshufps $0x55, %xmm0, %xmm0, %xmm7 # xmm7 = xmm0[1,1,1,1]
vsubps %xmm7, %xmm25, %xmm8
vmulps 0x2a0(%rsp), %xmm7, %xmm9
vmulps 0x250(%rsp), %xmm7, %xmm10
vmulps 0x240(%rsp), %xmm7, %xmm11
vmulps 0x280(%rsp), %xmm7, %xmm7
vfmadd231ps 0x2b0(%rsp), %xmm8, %xmm9 # xmm9 = (xmm8 * mem) + xmm9
vfmadd231ps 0x270(%rsp), %xmm8, %xmm10 # xmm10 = (xmm8 * mem) + xmm10
vfmadd231ps 0x260(%rsp), %xmm8, %xmm11 # xmm11 = (xmm8 * mem) + xmm11
vfmadd231ps 0x290(%rsp), %xmm8, %xmm7 # xmm7 = (xmm8 * mem) + xmm7
vsubps %xmm9, %xmm10, %xmm8
vsubps %xmm10, %xmm11, %xmm9
vsubps %xmm11, %xmm7, %xmm7
vbroadcastss %xmm0, %xmm10
vmulps %xmm9, %xmm10, %xmm11
vbroadcastss %xmm6, %xmm6
vfmadd231ps %xmm8, %xmm6, %xmm11 # xmm11 = (xmm6 * xmm8) + xmm11
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm9, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm9) + xmm7
vmulps %xmm7, %xmm10, %xmm7
vfmadd231ps %xmm11, %xmm6, %xmm7 # xmm7 = (xmm6 * xmm11) + xmm7
vmulps %xmm17, %xmm7, %xmm6
movq (%rdi), %rax
movq 0x1e8(%rax), %rax
movq (%rax,%r8,8), %r14
movl 0x120(%r9,%r11,4), %eax
testl %eax, 0x34(%r14)
je 0x1c9ff5d
movq 0x10(%rdi), %rax
cmpq $0x0, 0x10(%rax)
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
jne 0x1c9fb71
movb $0x1, %al
cmpq $0x0, 0x48(%r14)
je 0x1c9ff71
vbroadcastss %xmm5, %xmm5
vmulps 0x200(%rsp), %xmm5, %xmm5
vbroadcastss %xmm4, %xmm4
vfmadd132ps 0x210(%rsp), %xmm5, %xmm4 # xmm4 = (xmm4 * mem) + xmm5
vbroadcastss %xmm3, %xmm3
vfmadd132ps 0x220(%rsp), %xmm4, %xmm3 # xmm3 = (xmm3 * mem) + xmm4
vbroadcastss %xmm2, %xmm2
vfmadd132ps 0x230(%rsp), %xmm3, %xmm2 # xmm2 = (xmm2 * mem) + xmm3
vshufps $0xc9, %xmm2, %xmm2, %xmm3 # xmm3 = xmm2[1,2,0,3]
vshufps $0xc9, %xmm6, %xmm6, %xmm4 # xmm4 = xmm6[1,2,0,3]
vmulps %xmm4, %xmm2, %xmm2
vfmsub231ps %xmm3, %xmm6, %xmm2 # xmm2 = (xmm6 * xmm3) - xmm2
movq 0x8(%rdi), %rax
vbroadcastss %xmm0, %ymm3
vbroadcastss 0x272b33(%rip), %ymm4 # 0x1f12704
vpermps %ymm0, %ymm4, %ymm0
vpermps %ymm2, %ymm4, %ymm4
vpermps %ymm2, %ymm27, %ymm5
vbroadcastss %xmm2, %ymm2
vmovaps %ymm4, 0x440(%rsp)
vmovaps %ymm5, 0x460(%rsp)
vmovaps %ymm2, 0x480(%rsp)
vmovaps %ymm3, 0x4a0(%rsp)
vmovaps %ymm0, 0x4c0(%rsp)
vmovaps 0x3a0(%rsp), %ymm0
vmovaps %ymm0, 0x4e0(%rsp)
vmovdqa 0x380(%rsp), %ymm0
vmovdqa %ymm0, 0x500(%rsp)
leaq 0x520(%rsp), %rcx
vpcmpeqd %ymm0, %ymm0, %ymm0
vmovdqa %ymm0, 0x20(%rcx)
vmovdqa %ymm0, (%rcx)
vbroadcastss (%rax), %ymm0
vmovaps %ymm0, 0x520(%rsp)
vbroadcastss 0x4(%rax), %ymm0
vmovaps %ymm0, 0x540(%rsp)
vmovss %xmm1, 0x100(%r9,%r11,4)
vmovaps 0x360(%rsp), %ymm0
vmovaps %ymm0, 0x1e0(%rsp)
leaq 0x1e0(%rsp), %rax
movq %rax, 0x1b0(%rsp)
movq 0x18(%r14), %rax
movq %rax, 0x1b8(%rsp)
movq 0x8(%rdi), %rax
movq %rax, 0x1c0(%rsp)
movq %r9, 0x1c8(%rsp)
leaq 0x440(%rsp), %rax
movq %rax, 0x1d0(%rsp)
movl $0x8, 0x1d8(%rsp)
movq 0x48(%r14), %rax
testq %rax, %rax
movq %r10, 0xf8(%rsp)
movq %r8, 0xf0(%rsp)
vmovaps %xmm15, 0x150(%rsp)
je 0x1c9fdf5
leaq 0x1b0(%rsp), %rdi
movq %r11, 0x140(%rsp)
vzeroupper
callq *%rax
vmovaps 0x80(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm26
vmovaps 0x150(%rsp), %xmm15
vmovaps 0x100(%rsp), %xmm30
vmovaps 0x110(%rsp), %xmm29
vmovaps 0x120(%rsp), %xmm28
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm16
movq 0xf0(%rsp), %r8
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x24c999(%rip), %xmm25 # 0x1eec714
vmovss 0x251cc7(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x251271(%rip), %xmm23 # 0x1ef1000
vmovss 0x24c97b(%rip), %xmm22 # 0x1eec714
vmovss 0x28113d(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x25123f(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x281109(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x281103(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x251221(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x281107(%rip), %ymm27 # 0x1f20edc
movq 0x98(%rsp), %rdi
movq 0xb0(%rsp), %r9
movq 0x140(%rsp), %r11
movq 0xf8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptest %ymm0, %ymm0
je 0x1ca0037
movq 0x10(%rdi), %rcx
movq 0x10(%rcx), %rax
testq %rax, %rax
je 0x1c9ff1f
testb $0x2, (%rcx)
jne 0x1c9fe2a
testb $0x40, 0x3e(%r14)
je 0x1c9ff1f
leaq 0x1b0(%rsp), %rdi
movq %r11, %r14
vzeroupper
callq *%rax
vmovaps 0x80(%rsp), %xmm14
vmovaps 0xc0(%rsp), %xmm26
vmovaps 0x150(%rsp), %xmm15
vmovaps 0x100(%rsp), %xmm30
vmovaps 0x110(%rsp), %xmm29
vmovaps 0x120(%rsp), %xmm28
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x70(%rsp), %xmm12
vmovaps 0xa0(%rsp), %xmm16
movq 0xf0(%rsp), %r8
vxorps %xmm31, %xmm31, %xmm31
vbroadcastss 0x24c86a(%rip), %xmm25 # 0x1eec714
vmovss 0x251b98(%rip), %xmm24 # 0x1ef1a4c
vmovss 0x251142(%rip), %xmm23 # 0x1ef1000
vmovss 0x24c84c(%rip), %xmm22 # 0x1eec714
vmovss 0x28100e(%rip), %xmm21 # 0x1f20ee0
vbroadcastss 0x251110(%rip), %ymm18 # 0x1ef0fec
vbroadcastss 0x280fda(%rip), %xmm20 # 0x1f20ec0
vbroadcastss 0x280fd4(%rip), %xmm19 # 0x1f20ec4
vbroadcastss 0x2510f2(%rip), %xmm17 # 0x1ef0fec
vbroadcastss 0x280fd8(%rip), %ymm27 # 0x1f20edc
movq 0x98(%rsp), %rdi
movq 0xb0(%rsp), %r9
movq %r14, %r11
movq 0xf8(%rsp), %r10
vmovdqa 0x1e0(%rsp), %ymm0
vptestmd %ymm0, %ymm0, %k1
movq 0x1c8(%rsp), %rax
vmovaps 0x100(%rax), %ymm1
vbroadcastss 0x24cc3c(%rip), %ymm1 {%k1} # 0x1eecb84
vmovaps %ymm1, 0x100(%rax)
vptest %ymm0, %ymm0
setne %al
jmp 0x1ca0039
xorl %eax, %eax
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
movl 0x5c(%rsp), %ecx
orb %al, %cl
movl %ecx, 0x5c(%rsp)
movq %r12, %r14
movq %r13, %r12
movq 0x148(%rsp), %r13
jmp 0x1c9fffb
vxorps %xmm31, %xmm31, %xmm31
vmovaps 0xa0(%rsp), %xmm16
jmp 0x1c9ffd4
vmovaps 0xa0(%rsp), %xmm16
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps %xmm26, %xmm15
vmovaps 0xc0(%rsp), %xmm26
jmp 0x1c9fffb
xorl %r15d, %r15d
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps 0x80(%rsp), %xmm14
testb %r15b, %r15b
jne 0x1c9ef94
jmp 0x1ca0059
movb $0x1, %r15b
vmovaps 0x70(%rsp), %xmm12
vmovaps 0x60(%rsp), %xmm13
vmovaps 0x40(%rsp), %xmm8
vmovaps 0x30(%rsp), %xmm9
vmovaps 0x20(%rsp), %xmm10
vmovaps %xmm26, %xmm15
vmovaps 0xc0(%rsp), %xmm26
jmp 0x1c9fff2
xorl %eax, %eax
testb %al, %al
jne 0x1c9ff71
vmovss 0xb8(%rsp), %xmm0
vmovss %xmm0, 0x100(%r9,%r11,4)
jmp 0x1c9ff71
vinsertps $0x10, %xmm14, %xmm26, %xmm0 # xmm0 = xmm26[0],xmm14[0],xmm26[2,3]
vmovaps 0x190(%rsp), %xmm19
vmovaps 0x180(%rsp), %xmm25
vmovaps 0x400(%rsp), %ymm29
vmovaps 0x3e0(%rsp), %ymm28
jmp 0x1c9ea5b
testb $0x1, 0x5c(%rsp)
jne 0x1ca00b3
vmovaps 0x3c0(%rsp), %ymm0
vcmpleps 0x100(%r9,%r11,4){1to8}, %ymm0, %k0
kmovb %k0, %eax
andl %r14d, %r12d
andl %eax, %r12d
setne 0x1f(%rsp)
jne 0x1c9e347
movb 0x1f(%rsp), %al
andb $0x1, %al
leaq -0x28(%rbp), %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
vzeroupper
retq
nop
|
/embree[P]embree/kernels/geometry/curveNi_intersector.h
|
Subsets and Splits
SQL Console for LLM4Binary/decompile-bench
Filters out entries with file names ending in .cpp, providing a basic subset of the dataset that excludes C++ files.